You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by va...@apache.org on 2021/04/13 17:00:40 UTC

[couchdb] branch remove-a-few-3.x-applications created (now 180c3da)

This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a change to branch remove-a-few-3.x-applications
in repository https://gitbox.apache.org/repos/asf/couchdb.git.


      at 180c3da  [wip] 3.x applications cleanup

This branch includes the following new commits:

     new 180c3da  [wip] 3.x applications cleanup

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


[couchdb] 01/01: [wip] 3.x applications cleanup

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch remove-a-few-3.x-applications
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 180c3da72432ef4f9894b5a960d69ed44605ad92
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Tue Apr 13 12:57:57 2021 -0400

    [wip] 3.x applications cleanup
    
    WIP
    
     TODO: need to break it into smaller commits
    
    Summary:
      - about a dozen applications removed
      - all eunit tests are now running, including the ones from couch
      - cleaned up default.ini
      - close local 5986 port
      - return not implemented or not supported for some endpoints
---
 Makefile                                           |    6 +-
 Makefile.win                                       |    2 +-
 dev/run                                            |    2 +-
 emilio.config                                      |    4 +-
 mix.exs                                            |    5 +-
 rebar.config.script                                |   14 -
 rel/apps/couch_epi.config                          |    7 +-
 rel/overlay/etc/default.ini                        |  216 +-
 rel/overlay/etc/local.ini                          |   13 -
 rel/reltool.config                                 |   30 -
 src/chttpd/src/chttpd.erl                          |    7 +-
 src/chttpd/src/chttpd_changes.erl                  |    6 +-
 src/chttpd/src/chttpd_db.erl                       |   12 +-
 src/chttpd/src/chttpd_httpd_handlers.erl           |   23 +-
 src/chttpd/src/chttpd_misc.erl                     |   49 +-
 src/chttpd/src/chttpd_node.erl                     |    1 -
 src/chttpd/src/chttpd_rewrite.erl                  |  487 -----
 src/chttpd/src/chttpd_show.erl                     |  165 +-
 src/chttpd/src/chttpd_util.erl                     |   41 +
 src/chttpd/src/chttpd_view.erl                     |   24 +-
 src/couch/include/couch_db.hrl                     |   44 -
 .../priv/couch_ejson_compare/couch_ejson_compare.c |    2 +-
 src/couch/src/couch.app.src                        |   41 +-
 src/couch/src/couch_att.erl                        |  189 --
 src/couch/src/couch_bt_engine.erl                  | 1246 ------------
 src/couch/src/couch_bt_engine.hrl                  |   27 -
 src/couch/src/couch_bt_engine_compactor.erl        |  590 ------
 src/couch/src/couch_bt_engine_header.erl           |  451 -----
 src/couch/src/couch_bt_engine_stream.erl           |   70 -
 src/couch/src/couch_btree.erl                      |  855 --------
 src/couch/src/couch_changes.erl                    |  724 -------
 src/couch/src/couch_compress.erl                   |   99 -
 src/couch/src/couch_db.erl                         | 2086 --------------------
 src/couch/src/couch_db_engine.erl                  | 1105 -----------
 src/couch/src/couch_db_epi.erl                     |    1 -
 src/couch/src/couch_db_header.erl                  |  405 ----
 src/couch/src/couch_db_int.hrl                     |   76 -
 src/couch/src/couch_db_plugin.erl                  |   96 -
 src/couch/src/couch_db_split.erl                   |  503 -----
 src/couch/src/couch_db_updater.erl                 |  955 ---------
 src/couch/src/couch_debug.erl                      |   38 -
 src/couch/src/couch_doc.erl                        |   59 +-
 src/couch/src/couch_emsort.erl                     |  318 ---
 src/couch/src/couch_event_sup.erl                  |   74 -
 src/couch/src/couch_file.erl                       |  804 --------
 src/couch/src/couch_flags.erl                      |   16 +-
 src/couch/src/couch_httpd.erl                      |  109 +-
 src/couch/src/couch_httpd_db.erl                   | 1263 ------------
 src/couch/src/couch_httpd_misc_handlers.erl        |  269 ---
 src/couch/src/couch_httpd_rewrite.erl              |  484 -----
 src/couch/src/couch_lru.erl                        |   67 -
 src/couch/src/couch_multidb_changes.erl            |  903 ---------
 src/couch/src/couch_os_process.erl                 |    2 +-
 src/couch/src/couch_partition.erl                  |    2 +-
 src/couch/src/couch_primary_sup.erl                |    6 -
 src/couch/src/couch_query_servers.erl              |    3 +
 src/couch/src/couch_secondary_sup.erl              |   13 +-
 src/couch/src/couch_server.erl                     |  872 +-------
 src/couch/src/couch_server_int.hrl                 |   23 -
 src/couch/src/couch_stream.erl                     |  322 ---
 src/couch/src/couch_task_status.erl                |  171 --
 src/couch/src/couch_util.erl                       |    4 +-
 src/couch/src/test_util.erl                        |   42 +-
 src/couch/test/eunit/chttpd_endpoints_tests.erl    |   18 +-
 src/couch/test/eunit/couch_auth_cache_tests.erl    |  349 ----
 .../test/eunit/couch_bt_engine_compactor_tests.erl |  129 --
 src/couch/test/eunit/couch_bt_engine_tests.erl     |   20 -
 .../test/eunit/couch_bt_engine_upgrade_tests.erl   |  244 ---
 src/couch/test/eunit/couch_btree_tests.erl         |  572 ------
 src/couch/test/eunit/couch_changes_tests.erl       |  962 ---------
 src/couch/test/eunit/couch_db_doc_tests.erl        |  121 --
 src/couch/test/eunit/couch_db_mpr_tests.erl        |   12 +-
 src/couch/test/eunit/couch_db_plugin_tests.erl     |  205 --
 .../test/eunit/couch_db_props_upgrade_tests.erl    |   83 -
 src/couch/test/eunit/couch_db_split_tests.erl      |  331 ----
 src/couch/test/eunit/couch_db_tests.erl            |  198 --
 src/couch/test/eunit/couch_doc_json_tests.erl      |   82 +-
 src/couch/test/eunit/couch_doc_tests.erl           |   45 +-
 src/couch/test/eunit/couch_file_tests.erl          |  551 ------
 src/couch/test/eunit/couch_index_tests.erl         |  232 ---
 src/couch/test/eunit/couch_query_servers_tests.erl |    2 +-
 src/couch/test/eunit/couch_server_tests.erl        |  294 ---
 src/couch/test/eunit/couch_stream_tests.erl        |  124 --
 src/couch/test/eunit/couch_task_status_tests.erl   |  233 ---
 src/couch/test/eunit/couchdb_attachments_tests.erl |  765 -------
 src/couch/test/eunit/couchdb_auth_tests.erl        |   11 +-
 src/couch/test/eunit/couchdb_cors_tests.erl        |    9 +-
 src/couch/test/eunit/couchdb_db_tests.erl          |   91 -
 src/couch/test/eunit/couchdb_design_doc_tests.erl  |   87 -
 .../test/eunit/couchdb_file_compression_tests.erl  |  250 ---
 .../test/eunit/couchdb_location_header_tests.erl   |   78 -
 src/couch/test/eunit/couchdb_mrview_cors_tests.erl |   18 +-
 src/couch/test/eunit/couchdb_mrview_tests.erl      |  261 ---
 .../test/eunit/couchdb_update_conflicts_tests.erl  |  280 ---
 src/couch/test/eunit/couchdb_vhosts_tests.erl      |  271 ---
 src/couch/test/eunit/couchdb_views_tests.erl       |  668 -------
 .../test/eunit/fixtures/os_daemon_configer.escript |    3 +-
 src/couch/test/eunit/global_changes_tests.erl      |  159 --
 src/couch/test/exunit/couch_compress_tests.exs     |  113 --
 src/couch/test/exunit/fabric_test.exs              |  101 -
 src/couch_event/.gitignore                         |    2 -
 src/couch_event/LICENSE                            |  202 --
 src/couch_event/README.md                          |    3 -
 src/couch_event/rebar.config                       |    1 -
 src/couch_event/src/couch_event.app.src            |   22 -
 src/couch_event/src/couch_event.erl                |   65 -
 src/couch_event/src/couch_event_app.erl            |   27 -
 src/couch_event/src/couch_event_int.hrl            |   19 -
 src/couch_event/src/couch_event_listener.erl       |  238 ---
 src/couch_event/src/couch_event_listener_mfa.erl   |  107 -
 src/couch_event/src/couch_event_os_listener.erl    |   76 -
 src/couch_event/src/couch_event_server.erl         |  156 --
 src/couch_event/src/couch_event_sup2.erl           |   44 -
 src/couch_index/.gitignore                         |    3 -
 src/couch_index/LICENSE                            |  202 --
 src/couch_index/rebar.config                       |    2 -
 src/couch_index/src/couch_index.app.src            |   19 -
 src/couch_index/src/couch_index.erl                |  639 ------
 src/couch_index/src/couch_index_app.erl            |   21 -
 src/couch_index/src/couch_index_compactor.erl      |  135 --
 src/couch_index/src/couch_index_epi.erl            |   50 -
 src/couch_index/src/couch_index_plugin.erl         |   51 -
 .../src/couch_index_plugin_couch_db.erl            |   26 -
 src/couch_index/src/couch_index_server.erl         |  303 ---
 src/couch_index/src/couch_index_sup.erl            |   24 -
 src/couch_index/src/couch_index_updater.erl        |  239 ---
 src/couch_index/src/couch_index_util.erl           |   78 -
 .../test/eunit/couch_index_compaction_tests.erl    |  117 --
 .../test/eunit/couch_index_ddoc_updated_tests.erl  |  145 --
 src/couch_js/src/couch_js.app.src                  |    3 +-
 src/couch_js/src/couch_js_os_process.erl           |    2 +-
 src/couch_mrview/LICENSE                           |  202 --
 src/couch_mrview/include/couch_mrview.hrl          |  114 --
 src/couch_mrview/priv/stats_descriptions.cfg       |   24 -
 src/couch_mrview/rebar.config                      |    2 -
 src/couch_mrview/src/couch_mrview.app.src          |   18 -
 src/couch_mrview/src/couch_mrview.erl              |  692 -------
 src/couch_mrview/src/couch_mrview_cleanup.erl      |   59 -
 src/couch_mrview/src/couch_mrview_compactor.erl    |  294 ---
 src/couch_mrview/src/couch_mrview_index.erl        |  329 ---
 src/couch_mrview/src/couch_mrview_show.erl         |  468 -----
 src/couch_mrview/src/couch_mrview_test_util.erl    |  123 --
 .../src/couch_mrview_update_notifier.erl           |   49 -
 src/couch_mrview/src/couch_mrview_updater.erl      |  373 ----
 src/couch_mrview/src/couch_mrview_util.erl         | 1180 -----------
 .../test/eunit/couch_mrview_all_docs_tests.erl     |  140 --
 .../test/eunit/couch_mrview_collation_tests.erl    |  207 --
 .../test/eunit/couch_mrview_compact_tests.erl      |  115 --
 .../test/eunit/couch_mrview_ddoc_updated_tests.erl |  145 --
 .../eunit/couch_mrview_ddoc_validation_tests.erl   |  422 ----
 .../test/eunit/couch_mrview_design_docs_tests.erl  |  136 --
 .../test/eunit/couch_mrview_http_tests.erl         |   28 -
 .../test/eunit/couch_mrview_index_info_tests.erl   |  111 --
 .../test/eunit/couch_mrview_local_docs_tests.erl   |  148 --
 .../test/eunit/couch_mrview_map_views_tests.erl    |  144 --
 .../eunit/couch_mrview_purge_docs_fabric_tests.erl |  286 ---
 .../test/eunit/couch_mrview_purge_docs_tests.erl   |  575 ------
 .../test/eunit/couch_mrview_red_views_tests.erl    |   95 -
 .../test/eunit/couch_mrview_util_tests.erl         |   39 -
 src/couch_peruser/.gitignore                       |    9 -
 src/couch_peruser/LICENSE                          |  202 --
 src/couch_peruser/README.md                        |   34 -
 src/couch_peruser/src/couch_peruser.app.src        |   20 -
 src/couch_peruser/src/couch_peruser.erl            |  423 ----
 src/couch_peruser/src/couch_peruser_app.erl        |   26 -
 src/couch_peruser/src/couch_peruser_sup.erl        |   29 -
 .../test/eunit/couch_peruser_test.erl              |  538 -----
 src/couch_plugins/src/couch_plugins_httpd.erl      |    4 +-
 src/couch_pse_tests/src/couch_pse_tests.app.src    |   20 -
 src/couch_pse_tests/src/cpse_gather.erl            |   95 -
 src/couch_pse_tests/src/cpse_test_attachments.erl  |   99 -
 src/couch_pse_tests/src/cpse_test_compaction.erl   |  318 ---
 .../src/cpse_test_copy_purge_infos.erl             |   82 -
 src/couch_pse_tests/src/cpse_test_fold_changes.erl |  185 --
 src/couch_pse_tests/src/cpse_test_fold_docs.erl    |  400 ----
 .../src/cpse_test_fold_purge_infos.erl             |  167 --
 .../src/cpse_test_get_set_props.erl                |   95 -
 .../src/cpse_test_open_close_delete.erl            |   77 -
 .../src/cpse_test_purge_bad_checkpoints.erl        |   80 -
 src/couch_pse_tests/src/cpse_test_purge_docs.erl   |  464 -----
 .../src/cpse_test_purge_replication.erl            |  215 --
 src/couch_pse_tests/src/cpse_test_purge_seqs.erl   |  129 --
 .../src/cpse_test_read_write_docs.erl              |  311 ---
 src/couch_pse_tests/src/cpse_test_ref_counting.erl |  113 --
 src/couch_pse_tests/src/cpse_util.erl              |  677 -------
 .../src/couch_replicator_api_wrap.erl              |    6 +-
 src/couch_replicator/src/couch_replicator_ids.erl  |    2 +-
 src/couch_views/include/couch_views.hrl            |   94 +
 src/couch_views/src/couch_views.erl                |    7 +-
 src/couch_views/src/couch_views_batch.erl          |    2 +-
 src/couch_views/src/couch_views_batch_impl.erl     |    2 +-
 src/couch_views/src/couch_views_fdb.erl            |    1 -
 src/couch_views/src/couch_views_http.erl           |   32 +-
 .../src/couch_views_http_util.erl}                 |  308 +--
 src/couch_views/src/couch_views_indexer.erl        |    1 -
 src/couch_views/src/couch_views_jobs.erl           |    1 -
 src/couch_views/src/couch_views_reader.erl         |    5 +-
 src/couch_views/src/couch_views_trees.erl          |    1 -
 src/couch_views/src/couch_views_updater.erl        |    6 +-
 src/couch_views/src/couch_views_util.erl           |  105 +-
 src/couch_views/src/couch_views_validate.erl       |  466 +++++
 src/couch_views/test/couch_views_batch_test.erl    |    2 +-
 src/couch_views/test/couch_views_cleanup_test.erl  |    1 -
 .../test/couch_views_custom_red_test.erl           |    1 -
 src/couch_views/test/couch_views_indexer_test.erl  |    1 -
 src/couch_views/test/couch_views_info_test.erl     |    2 +-
 src/couch_views/test/couch_views_map_test.erl      |   22 -
 src/couch_views/test/couch_views_size_test.erl     |    3 +-
 .../test/couch_views_trace_index_test.erl          |    2 +-
 src/couch_views/test/couch_views_updater_test.erl  |    3 +-
 src/couch_views/test/couch_views_upgrade_test.erl  |    3 +-
 src/ddoc_cache/LICENSE                             |  202 --
 src/ddoc_cache/README.md                           |    4 -
 src/ddoc_cache/priv/stats_descriptions.cfg         |   12 -
 src/ddoc_cache/src/ddoc_cache.app.src              |   32 -
 src/ddoc_cache/src/ddoc_cache.erl                  |   60 -
 src/ddoc_cache/src/ddoc_cache.hrl                  |   40 -
 src/ddoc_cache/src/ddoc_cache_app.erl              |   25 -
 src/ddoc_cache/src/ddoc_cache_entry.erl            |  374 ----
 src/ddoc_cache/src/ddoc_cache_entry_custom.erl     |   37 -
 src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl     |   46 -
 src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl |   47 -
 .../src/ddoc_cache_entry_validation_funs.erl       |   44 -
 src/ddoc_cache/src/ddoc_cache_lru.erl              |  333 ----
 src/ddoc_cache/src/ddoc_cache_opener.erl           |   66 -
 src/ddoc_cache/src/ddoc_cache_sup.erl              |   46 -
 src/ddoc_cache/src/ddoc_cache_value.erl            |   27 -
 .../test/eunit/ddoc_cache_basic_test.erl           |  175 --
 .../test/eunit/ddoc_cache_coverage_test.erl        |   77 -
 .../test/eunit/ddoc_cache_disabled_test.erl        |   62 -
 .../test/eunit/ddoc_cache_entry_test.erl           |  159 --
 src/ddoc_cache/test/eunit/ddoc_cache_ev.erl        |   21 -
 .../test/eunit/ddoc_cache_eviction_test.erl        |   96 -
 src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl  |  219 --
 .../test/eunit/ddoc_cache_no_cache_test.erl        |   87 -
 .../test/eunit/ddoc_cache_open_error_test.erl      |   46 -
 src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl |  107 -
 .../test/eunit/ddoc_cache_opener_test.erl          |   33 -
 .../test/eunit/ddoc_cache_refresh_test.erl         |  174 --
 .../test/eunit/ddoc_cache_remove_test.erl          |  224 ---
 src/ddoc_cache/test/eunit/ddoc_cache_test.hrl      |   26 -
 src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl     |  111 --
 src/dreyfus/.gitignore                             |    4 -
 src/dreyfus/LICENSE.txt                            |  202 --
 src/dreyfus/README.md                              |   78 -
 src/dreyfus/include/dreyfus.hrl                    |   74 -
 src/dreyfus/priv/stats_descriptions.cfg            |   65 -
 src/dreyfus/src/clouseau_rpc.erl                   |  109 -
 src/dreyfus/src/dreyfus.app.src                    |   22 -
 src/dreyfus/src/dreyfus_app.erl                    |   24 -
 src/dreyfus/src/dreyfus_bookmark.erl               |   90 -
 src/dreyfus/src/dreyfus_config.erl                 |   15 -
 src/dreyfus/src/dreyfus_epi.erl                    |   46 -
 src/dreyfus/src/dreyfus_fabric.erl                 |  205 --
 src/dreyfus/src/dreyfus_fabric_cleanup.erl         |   78 -
 src/dreyfus/src/dreyfus_fabric_group1.erl          |  129 --
 src/dreyfus/src/dreyfus_fabric_group2.erl          |  158 --
 src/dreyfus/src/dreyfus_fabric_info.erl            |  108 -
 src/dreyfus/src/dreyfus_fabric_search.erl          |  270 ---
 src/dreyfus/src/dreyfus_httpd.erl                  |  614 ------
 src/dreyfus/src/dreyfus_httpd_handlers.erl         |   29 -
 src/dreyfus/src/dreyfus_index.erl                  |  391 ----
 src/dreyfus/src/dreyfus_index_manager.erl          |  153 --
 src/dreyfus/src/dreyfus_index_updater.erl          |  181 --
 src/dreyfus/src/dreyfus_plugin_couch_db.erl        |   26 -
 src/dreyfus/src/dreyfus_rpc.erl                    |  130 --
 src/dreyfus/src/dreyfus_sup.erl                    |   32 -
 src/dreyfus/src/dreyfus_util.erl                   |  441 -----
 src/dreyfus/test/dreyfus_blacklist_await_test.erl  |   76 -
 .../test/dreyfus_blacklist_request_test.erl        |   96 -
 src/dreyfus/test/dreyfus_config_test.erl           |   71 -
 src/dreyfus/test/dreyfus_purge_test.erl            |  867 --------
 src/dreyfus/test/dreyfus_test_util.erl             |   13 -
 src/dreyfus/test/elixir/mix.exs                    |   30 -
 src/dreyfus/test/elixir/mix.lock                   |    5 -
 src/dreyfus/test/elixir/run                        |    4 -
 .../test/elixir/test/partition_search_test.exs     |  247 ---
 src/dreyfus/test/elixir/test/search_test.exs       |  226 ---
 src/dreyfus/test/elixir/test/test_helper.exs       |    4 -
 src/fabric/include/fabric.hrl                      |   46 -
 src/fabric/src/fabric.app.src                      |    2 -
 src/fabric/src/fabric.erl                          |  720 -------
 src/fabric/src/fabric2_db.erl                      |   26 +-
 src/fabric/src/fabric2_util.erl                    |    2 +-
 src/fabric/src/fabric_db_create.erl                |  228 ---
 src/fabric/src/fabric_db_delete.erl                |   98 -
 src/fabric/src/fabric_db_doc_count.erl             |   62 -
 src/fabric/src/fabric_db_info.erl                  |  171 --
 src/fabric/src/fabric_db_meta.erl                  |  198 --
 src/fabric/src/fabric_db_partition_info.erl        |  155 --
 src/fabric/src/fabric_db_update_listener.erl       |  177 --
 src/fabric/src/fabric_design_doc_count.erl         |   62 -
 src/fabric/src/fabric_dict.erl                     |   61 -
 src/fabric/src/fabric_doc_attachments.erl          |  160 --
 src/fabric/src/fabric_doc_atts.erl                 |  170 --
 src/fabric/src/fabric_doc_missing_revs.erl         |   97 -
 src/fabric/src/fabric_doc_open.erl                 |  610 ------
 src/fabric/src/fabric_doc_open_revs.erl            |  799 --------
 src/fabric/src/fabric_doc_purge.erl                |  571 ------
 src/fabric/src/fabric_doc_update.erl               |  377 ----
 src/fabric/src/fabric_group_info.erl               |  139 --
 src/fabric/src/fabric_ring.erl                     |  519 -----
 src/fabric/src/fabric_rpc.erl                      |  664 -------
 src/fabric/src/fabric_streams.erl                  |  274 ---
 src/fabric/src/fabric_util.erl                     |  347 ----
 src/fabric/src/fabric_view.erl                     |  478 -----
 src/fabric/src/fabric_view_all_docs.erl            |  332 ----
 src/fabric/src/fabric_view_changes.erl             |  820 --------
 src/fabric/src/fabric_view_map.erl                 |  267 ---
 src/fabric/src/fabric_view_reduce.erl              |  165 --
 src/fabric/test/eunit/fabric_rpc_tests.erl         |  181 --
 src/fabric/test/fabric2_dir_prefix_tests.erl       |    4 +-
 src/fabric/test/fabric2_node_types_tests.erl       |    4 +-
 src/fabric/test/fabric2_tx_options_tests.erl       |    4 +-
 src/global_changes/.gitignore                      |    2 -
 src/global_changes/LICENSE                         |  203 --
 src/global_changes/README.md                       |   27 -
 src/global_changes/priv/stats_descriptions.cfg     |   20 -
 src/global_changes/src/global_changes.app.src      |   32 -
 src/global_changes/src/global_changes_app.erl      |   28 -
 src/global_changes/src/global_changes_epi.erl      |   51 -
 src/global_changes/src/global_changes_httpd.erl    |  285 ---
 .../src/global_changes_httpd_handlers.erl          |   28 -
 src/global_changes/src/global_changes_listener.erl |  165 --
 src/global_changes/src/global_changes_plugin.erl   |   40 -
 src/global_changes/src/global_changes_server.erl   |  229 ---
 src/global_changes/src/global_changes_sup.erl      |   84 -
 src/global_changes/src/global_changes_util.erl     |   27 -
 .../test/eunit/global_changes_hooks_tests.erl      |  156 --
 src/ioq/.gitignore                                 |    2 -
 src/ioq/src/ioq.app.src                            |   21 -
 src/ioq/src/ioq.erl                                |  189 --
 src/ioq/src/ioq_app.erl                            |   21 -
 src/ioq/src/ioq_sup.erl                            |   24 -
 src/ken/README.md                                  |   12 -
 src/ken/rebar.config.script                        |   28 -
 src/ken/src/ken.app.src.script                     |   38 -
 src/ken/src/ken.erl                                |   29 -
 src/ken/src/ken_app.erl                            |   28 -
 src/ken/src/ken_event_handler.erl                  |   56 -
 src/ken/src/ken_server.erl                         |  579 ------
 src/ken/src/ken_sup.erl                            |   33 -
 src/ken/test/config.ini                            |    2 -
 src/ken/test/ken_server_test.erl                   |   97 -
 src/mango/src/mango_cursor.erl                     |    8 -
 src/mango/src/mango_cursor_special.erl             |    2 +-
 src/mango/src/mango_cursor_text.erl                |  334 ----
 src/mango/src/mango_cursor_view.erl                |    4 +-
 src/mango/src/mango_idx.erl                        |   19 +-
 src/mango/src/mango_idx_text.erl                   |  459 -----
 src/mango/src/mango_json_bookmark.erl              |    2 +-
 src/mem3/LICENSE                                   |  202 --
 src/mem3/README.md                                 |   43 -
 src/mem3/README_reshard.md                         |   93 -
 src/mem3/include/mem3.hrl                          |   59 -
 src/mem3/priv/stats_descriptions.cfg               |   12 -
 src/mem3/rebar.config.script                       |   22 -
 src/mem3/src/mem3.app.src                          |   40 -
 src/mem3/src/mem3.erl                              |  424 ----
 src/mem3/src/mem3_app.erl                          |   21 -
 src/mem3/src/mem3_cluster.erl                      |  161 --
 src/mem3/src/mem3_epi.erl                          |   51 -
 src/mem3/src/mem3_hash.erl                         |   73 -
 src/mem3/src/mem3_httpd.erl                        |   84 -
 src/mem3/src/mem3_httpd_handlers.erl               |   61 -
 src/mem3/src/mem3_nodes.erl                        |  155 --
 src/mem3/src/mem3_plugin_couch_db.erl              |   21 -
 src/mem3/src/mem3_rep.erl                          |  998 ----------
 src/mem3/src/mem3_reshard.erl                      |  913 ---------
 src/mem3/src/mem3_reshard.hrl                      |   74 -
 src/mem3/src/mem3_reshard_api.erl                  |  217 --
 src/mem3/src/mem3_reshard_dbdoc.erl                |  274 ---
 src/mem3/src/mem3_reshard_httpd.erl                |  317 ---
 src/mem3/src/mem3_reshard_index.erl                |  164 --
 src/mem3/src/mem3_reshard_job.erl                  |  716 -------
 src/mem3/src/mem3_reshard_job_sup.erl              |   55 -
 src/mem3/src/mem3_reshard_store.erl                |  286 ---
 src/mem3/src/mem3_reshard_sup.erl                  |   47 -
 src/mem3/src/mem3_reshard_validate.erl             |  126 --
 src/mem3/src/mem3_rpc.erl                          |  711 -------
 src/mem3/src/mem3_seeds.erl                        |  162 --
 src/mem3/src/mem3_shards.erl                       |  766 -------
 src/mem3/src/mem3_sup.erl                          |   40 -
 src/mem3/src/mem3_sync.erl                         |  323 ---
 src/mem3/src/mem3_sync_event.erl                   |   86 -
 src/mem3/src/mem3_sync_event_listener.erl          |  353 ----
 src/mem3/src/mem3_sync_nodes.erl                   |  115 --
 src/mem3/src/mem3_sync_security.erl                |  117 --
 src/mem3/src/mem3_util.erl                         |  650 ------
 src/mem3/test/eunit/mem3_cluster_test.erl          |  133 --
 src/mem3/test/eunit/mem3_hash_test.erl             |   23 -
 src/mem3/test/eunit/mem3_rep_test.erl              |  321 ---
 src/mem3/test/eunit/mem3_reshard_api_test.erl      |  847 --------
 .../test/eunit/mem3_reshard_changes_feed_test.erl  |  389 ----
 src/mem3/test/eunit/mem3_reshard_test.erl          |  834 --------
 src/mem3/test/eunit/mem3_ring_prop_tests.erl       |  151 --
 src/mem3/test/eunit/mem3_seeds_test.erl            |   69 -
 src/mem3/test/eunit/mem3_sync_security_test.erl    |   54 -
 src/mem3/test/eunit/mem3_util_test.erl             |  130 --
 src/rexi/README.md                                 |   23 -
 src/rexi/include/rexi.hrl                          |   20 -
 src/rexi/priv/stats_descriptions.cfg               |   24 -
 src/rexi/rebar.config                              |    2 -
 src/rexi/src/rexi.app.src                          |   28 -
 src/rexi/src/rexi.erl                              |  320 ---
 src/rexi/src/rexi_app.erl                          |   22 -
 src/rexi/src/rexi_buffer.erl                       |  104 -
 src/rexi/src/rexi_monitor.erl                      |   65 -
 src/rexi/src/rexi_server.erl                       |  193 --
 src/rexi/src/rexi_server_mon.erl                   |  176 --
 src/rexi/src/rexi_server_sup.erl                   |   29 -
 src/rexi/src/rexi_sup.erl                          |   64 -
 src/rexi/src/rexi_utils.erl                        |  105 -
 src/setup/.gitignore                               |    4 -
 src/setup/LICENSE                                  |  203 --
 src/setup/README.md                                |  210 --
 src/setup/src/setup.app.src                        |   27 -
 src/setup/src/setup.erl                            |  386 ----
 src/setup/src/setup_app.erl                        |   28 -
 src/setup/src/setup_epi.erl                        |   49 -
 src/setup/src/setup_httpd.erl                      |  180 --
 src/setup/src/setup_httpd_handlers.erl             |   32 -
 src/setup/src/setup_sup.erl                        |   44 -
 src/setup/test/t-frontend-setup.sh                 |   71 -
 src/setup/test/t-single-node-auto-setup.sh         |   24 -
 src/setup/test/t-single-node.sh                    |   46 -
 src/setup/test/t.sh                                |   63 -
 src/smoosh/README.md                               |  140 --
 src/smoosh/operator_guide.md                       |  396 ----
 src/smoosh/src/smoosh.app.src                      |   29 -
 src/smoosh/src/smoosh.erl                          |   69 -
 src/smoosh/src/smoosh_app.erl                      |   28 -
 src/smoosh/src/smoosh_channel.erl                  |  325 ---
 src/smoosh/src/smoosh_priority_queue.erl           |   86 -
 src/smoosh/src/smoosh_server.erl                   |  606 ------
 src/smoosh/src/smoosh_sup.erl                      |   38 -
 src/smoosh/src/smoosh_utils.erl                    |   92 -
 src/smoosh/test/exunit/scheduling_window_test.exs  |   79 -
 src/smoosh/test/exunit/test_helper.exs             |    2 -
 test/elixir/lib/step/create_db.ex                  |    2 +-
 440 files changed, 967 insertions(+), 76074 deletions(-)

diff --git a/Makefile b/Makefile
index a748b2c..fc4e83c 100644
--- a/Makefile
+++ b/Makefile
@@ -72,7 +72,7 @@ DESTDIR=
 
 # Rebar options
 apps=
-skip_deps=folsom,meck,mochiweb,triq,proper,snappy,bcrypt,hyper
+skip_deps=folsom,meck,mochiweb,proper,bcrypt,hyper
 suites=
 tests=
 
@@ -160,9 +160,9 @@ endif
 .PHONY: check
 check:  all
 	@$(MAKE) emilio
-	@$(MAKE) eunit apps=couch_eval,couch_expiring_cache,ctrace,couch_jobs,couch_views,fabric,mango,chttpd,couch_replicator
+	@$(MAKE) eunit
 	@$(MAKE) elixir-suite
-	@$(MAKE) exunit apps=chttpd
+	@$(MAKE) exunit
 	@$(MAKE) mango-test
 
 .PHONY: eunit
diff --git a/Makefile.win b/Makefile.win
index aeb7fe7..5240da3 100644
--- a/Makefile.win
+++ b/Makefile.win
@@ -76,7 +76,7 @@ DESTDIR=
 
 # Rebar options
 apps=
-skip_deps=folsom,meck,mochiweb,triq,proper,snappy,bcrypt,hyper
+skip_deps=folsom,meck,mochiweb,proper,bcrypt,hyper
 suites=
 tests=
 
diff --git a/dev/run b/dev/run
index 46759f4..7ad1494 100755
--- a/dev/run
+++ b/dev/run
@@ -791,7 +791,7 @@ def try_request(
 
 
 def create_system_databases(host, port):
-    for dbname in ["_users", "_replicator", "_global_changes"]:
+    for dbname in ["_users", "_replicator"]:
         conn = httpclient.HTTPConnection(host, port)
         conn.request("HEAD", "/" + dbname)
         resp = conn.getresponse()
diff --git a/emilio.config b/emilio.config
index 0dad938..84a6571 100644
--- a/emilio.config
+++ b/emilio.config
@@ -8,13 +8,11 @@
     "src[\/]emilio[\/]*",
     "src[\/]folsom[\/]*",
     "src[\/]mochiweb[\/]*",
-    "src[\/]snappy[\/]*",
     "src[\/]ssl_verify_fun[\/]*",
     "src[\/]ibrowse[\/]*",
     "src[\/]jiffy[\/]*",
     "src[\/]meck[\/]*",
     "src[\/]proper[\/]*",
     "src[\/]recon[\/]*",
-    "src[\/]hyper[\/]*",
-    "src[\/]triq[\/]*"
+    "src[\/]hyper[\/]*"
 ]}.
diff --git a/mix.exs b/mix.exs
index 9cba1a4..12e0221 100644
--- a/mix.exs
+++ b/mix.exs
@@ -133,16 +133,13 @@ defmodule CouchDBTest.Mixfile do
       "b64url",
       "bear",
       "mochiweb",
-      "snappy",
       "rebar",
       "proper",
       "mochiweb",
       "meck",
-      "khash",
       "hyper",
       "fauxton",
-      "folsom",
-      "hqueue"
+      "folsom"
     ]
 
     deps |> Enum.map(fn app -> "src/#{app}" end)
diff --git a/rebar.config.script b/rebar.config.script
index 2bc761a..7f64d22 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -118,33 +118,21 @@ SubDirs = [
     "src/chttpd",
     "src/couch",
     "src/couch_eval",
-    "src/couch_event",
-    "src/mem3",
-    "src/couch_index",
-    "src/couch_mrview",
     "src/couch_js",
     "src/couch_replicator",
     "src/couch_plugins",
     "src/couch_pse_tests",
     "src/couch_stats",
-    "src/couch_peruser",
     "src/couch_tests",
     "src/couch_views",
     "src/ctrace",
-    "src/ddoc_cache",
-    "src/dreyfus",
     "src/fabric",
     "src/aegis",
     "src/couch_jobs",
     "src/couch_expiring_cache",
-    "src/global_changes",
-    "src/ioq",
     "src/jwtf",
-    "src/ken",
     "src/mango",
-    "src/rexi",
     "src/setup",
-    "src/smoosh",
     "src/ebtree",
     "rel"
 ].
@@ -155,8 +143,6 @@ DepDescs = [
 {b64url,           "b64url",           {tag, "1.0.2"}},
 {erlfdb,           "erlfdb",           {tag, "v1.3.3"}},
 {ets_lru,          "ets-lru",          {tag, "1.1.0"}},
-{khash,            "khash",            {tag, "1.1.0"}},
-{snappy,           "snappy",           {tag, "CouchDB-1.0.4"}},
 
 %% Non-Erlang deps
 {docs,             {url, "https://github.com/apache/couchdb-documentation"},
diff --git a/rel/apps/couch_epi.config b/rel/apps/couch_epi.config
index f9f49e1..db85ef1 100644
--- a/rel/apps/couch_epi.config
+++ b/rel/apps/couch_epi.config
@@ -14,12 +14,7 @@
     couch_db_epi,
     fabric2_epi,
     chttpd_epi,
-    couch_index_epi,
     couch_views_epi,
     couch_replicator_epi,
-    dreyfus_epi,
-    global_changes_epi,
-    mango_epi,
-    mem3_epi,
-    setup_epi
+    mango_epi
 ]}.
diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index 25f3cca..8791239 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -4,33 +4,18 @@ name = {{package_author_name}}
 
 [couchdb]
 uuid = {{uuid}}
-database_dir = {{data_dir}}
-view_index_dir = {{view_index_dir}}
 ; util_driver_dir =
 ; plugin_dir =
 os_process_timeout = 5000 ; 5 seconds. for view servers.
 max_dbs_open = 500
-; Method used to compress everything that is appended to database and view index files, except
-; for attachments (see the attachments section). Available methods are:
-;
-; none         - no compression
-; snappy       - use google snappy, a very fast compressor/decompressor
-; deflate_N    - use zlib's deflate, N is the compression level which ranges from 1 (fastest,
-;                lowest compression ratio) to 9 (slowest, highest compression ratio)
-file_compression = snappy
-; Higher values may give better read performance due to less read operations
-; and/or more OS page cache hits, but they can also increase overall response
-; time for writes when there are many attachment write requests in parallel.
-attachment_stream_buffer_size = 4096
+;
 ; Default security object for databases if not explicitly set
 ; everyone - same as couchdb 1.0, everyone can read/write
 ; admin_only - only admins can read/write
 ; admin_local - sharded dbs on :5984 are read/write for everyone,
 ;               local dbs on :5986 are read/write for admins only
 default_security = admin_only
-; btree_chunk_size = 1279
 ; maintenance_mode = false
-; stem_interactive_updates = true
 ; uri_file =
 ; The speed of processing the _changes feed with doc_ids filter can be
 ; influenced directly with this setting - increase for faster processing at the
@@ -64,68 +49,15 @@ max_document_size = 8000000 ; bytes
 ; Maximum attachment size.
 ; max_attachment_size = infinity
 ;
-; Do not update the least recently used DB cache on reads, only writes
-;update_lru_on_read = false
-;
-; The default storage engine to use when creating databases
-; is set as a key into the [couchdb_engines] section.
-default_engine = couch
-;
 ; Enable this to only "soft-delete" databases when DELETE /{db} requests are
 ; made. This will place a .recovery directory in your data directory and
 ; move deleted databases/shards there instead. You can then manually delete
 ; these files later, as desired.
 ;enable_database_recovery = false
 ;
-; Set the maximum size allowed for a partition. This helps users avoid
-; inadvertently abusing partitions resulting in hot shards. The default
-; is 10GiB. A value of 0 or less will disable partition size checks.
-;max_partition_size = 10737418240
-;
-; When true, system databases _users and _replicator are created immediately
-; on startup if not present.
-;single_node = false
-
 ; Allow edits on the _security object in the user db. By default, it's disabled.
 users_db_security_editable = false
 
-[purge]
-; Allowed maximum number of documents in one purge request
-;max_document_id_number = 100
-;
-; Allowed maximum number of accumulated revisions in one purge request
-;max_revisions_number = 1000
-;
-; Allowed durations when index is not updated for local purge checkpoint
-; document. Default is 24 hours.
-;index_lag_warn_seconds = 86400
-
-[couchdb_engines]
-; The keys in this section are the filename extension that
-; the specified engine module will use. This is important so
-; that couch_server is able to find an existing database without
-; having to ask every configured engine.
-couch = couch_bt_engine
-
-[process_priority]
-; Selectively disable altering process priorities for modules that request it.
-; * NOTE: couch_server priority has been shown to lead to CouchDB hangs and
-;     failures on Erlang releases 21.0 - 21.3.8.12 and 22.0 -> 22.2.4. Do not
-;     enable when running with those versions.
-;couch_server = false
-
-[cluster]
-q=2
-n=3
-; placement = metro-dc-a:2,metro-dc-b:1
-
-; Supply a comma-delimited list of node names that this node should
-; contact in order to join a cluster. If a seedlist is configured the ``_up``
-; endpoint will return a 404 until the node has successfully contacted at
-; least one of the members of the seedlist and replicated an up-to-date copy
-; of the ``_nodes``, ``_dbs``, and ``_users`` system databases.
-; seedlist = couchdb@node1.example.com,couchdb@node2.example.com
-
 [chttpd]
 ; These settings affect the main, clustered port (5984 by default).
 port = {{cluster_port}}
@@ -178,23 +110,6 @@ max_db_number_for_dbs_info_req = 100
 ; rsa:foo = -----BEGIN PUBLIC KEY-----\nMIIBIjAN...IDAQAB\n-----END PUBLIC KEY-----\n
 ; ec:bar = -----BEGIN PUBLIC KEY-----\nMHYwEAYHK...AzztRs\n-----END PUBLIC KEY-----\n
 
-[couch_peruser]
-; If enabled, couch_peruser ensures that a private per-user database
-; exists for each document in _users. These databases are writable only
-; by the corresponding user. Databases are in the following form:
-; userdb-{hex encoded username}
-enable = false
-; If set to true and a user is deleted, the respective database gets
-; deleted as well.
-delete_dbs = false
-; Set a default q value for peruser-created databases that is different from
-; cluster / q
-;q = 1
-; prefix for user databases. If you change this after user dbs have been
-; created, the existing databases won't get deleted if the associated user
-; gets deleted because of the then prefix mismatch.
-database_prefix = userdb-
-
 [httpd]
 port = {{backend_port}}
 bind_address = 127.0.0.1
@@ -220,13 +135,6 @@ enable_xframe_options = false
 ; Maximum allowed http request size. Applies to both clustered and local port.
 max_http_request_size = 4294967296 ; 4GB
 
-; [httpd_design_handlers]
-; _view =
-
-; [ioq]
-; concurrency = 10
-; ratio = 0.01
-
 [ssl]
 port = 6984
 
@@ -238,23 +146,7 @@ port = 6984
 ; max_objects =
 ; max_size = 104857600
 
-; [mem3]
-; nodes_db = _nodes
-; shard_cache_size = 25000
-; shards_db = _dbs
-; sync_concurrency = 10
-
 ; [fabric]
-; all_docs_concurrency = 10
-; changes_duration =
-; shard_timeout_factor = 2
-; uuid_prefix_len = 7
-; request_timeout = 60000
-; all_docs_timeout = 10000
-; attachments_timeout = 60000
-; view_timeout = 3600000
-; partition_view_timeout = 3600000
-;
 ; Custom FDB directory prefix. All the nodes of the same CouchDB instance
 ; should have a matching directory prefix in order to read and write the same
 ; data. Changes to this value take effect only on node start-up.
@@ -279,26 +171,6 @@ port = 6984
 ; Bulk docs transaction batch size in bytes
 ;update_docs_batch_size = 2500000
 
-; [rexi]
-; buffer_count = 2000
-; server_per_node = true
-; stream_limit = 5
-;
-; Use a single message to kill a group of remote workers This is
-; mostly is an upgrade clause to allow operating in a mixed cluster of
-; 2.x and 3.x nodes. After upgrading switch to true to save some
-; network bandwidth
-;use_kill_all = false
-
-; [global_changes]
-; max_event_delay = 25
-; max_write_delay = 500
-; update_db = true
-
-; [view_updater]
-; min_writer_items = 100
-; min_writer_size = 16777216
-
 [couch_httpd_auth]
 ; WARNING! This only affects the node-local port (5986 by default).
 ; You probably want the settings under [chttpd].
@@ -440,7 +312,6 @@ os_process_limit = 100
 ; os_process_soft_limit = 100
 ; Timeout for how long a response from a busy view group server can take.
 ; "infinity" is also a valid configuration value.
-;group_info_timeout = 5000
 ;query_limit = 268435456
 ;partition_query_limit = 268435456
 
@@ -462,15 +333,12 @@ query = mango_eval
 ; the warning.
 ;index_scan_warning_threshold = 10
 
-[indexers]
-couch_mrview = true
-
 [feature_flags]
 ; This enables any database to be created as a partitioned databases (except system db's).
 ; Setting this to false will stop the creation of paritioned databases.
 ; paritioned||allowed* = true will scope the creation of partitioned databases
 ; to databases with 'allowed' prefix.
-partitioned||* = true
+; partitioned||* = true
 
 [uuids]
 ; Known algorithms:
@@ -699,86 +567,6 @@ writer = stderr
 ; Stats collection interval in seconds. Default 10 seconds.
 ;interval = 10
 
-[smoosh.ratio_dbs]
-min_priority = 2.0
-
-[smoosh.ratio_views]
-min_priority = 2.0
-
-[ioq]
-; The maximum number of concurrent in-flight IO requests that
-concurrency = 10
-
-; The fraction of the time that a background IO request will be selected
-; over an interactive IO request when both queues are non-empty
-ratio = 0.01
-
-[ioq.bypass]
-; System administrators can choose to submit specific classes of IO directly
-; to the underlying file descriptor or OS process, bypassing the queues
-; altogether. Installing a bypass can yield higher throughput and lower
-; latency, but relinquishes some control over prioritization. The following
-; classes are recognized with the following defaults:
-
-; Messages on their way to an external process (e.g., couchjs) are bypassed
-os_process = true
-
-; Disk IO fulfilling interactive read requests is bypassed
-read = true
-
-; Disk IO required to update a database is bypassed
-write = true
-
-; Disk IO required to update views and other secondary indexes is bypassed
-view_update = true
-
-; Disk IO issued by the background replication processes that fix any
-; inconsistencies between shard copies is queued
-shard_sync = false
-
-; Disk IO issued by compaction jobs is queued
-compaction = false
-
-[dreyfus]
-; The name and location of the Clouseau Java service required to
-; enable Search functionality.
-; name = clouseau@127.0.0.1
-
-; CouchDB will try to re-connect to Clouseau using a bounded
-; exponential backoff with the following number of iterations.
-; retry_limit = 5
-
-; The default number of results returned from a global search query.
-; limit = 25
-
-; The default number of results returned from a search on a partition
-; of a database.
-; limit_partitions = 2000
-
-; The maximum number of results that can be returned from a global
-; search query (or any search query on a database without user-defined
-; partitions). Attempts to set ?limit=N higher than this value will
-; be rejected.
-; max_limit = 200
-
-; The maximum number of results that can be returned when searching
-; a partition of a database. Attempts to set ?limit=N higher than this
-; value will be rejected. If this config setting is not defined,
-; CouchDB will use the value of `max_limit` instead. If neither is
-; defined, the default is 2000 as stated here.
-; max_limit_partitions = 2000
-
-[reshard]
-;max_jobs = 48
-;max_history = 20
-;max_retries = 1
-;retry_interval_sec = 10
-;delete_source = true
-;update_shard_map_timeout_sec = 60
-;source_close_timeout_sec = 600
-;require_node_param = false
-;require_range_param = false
-
 [couch_jobs]
 ;
 ; Maximum jitter used when checking for active job timeouts
diff --git a/rel/overlay/etc/local.ini b/rel/overlay/etc/local.ini
index 2c9e899..b788e82 100644
--- a/rel/overlay/etc/local.ini
+++ b/rel/overlay/etc/local.ini
@@ -8,19 +8,6 @@
 ;max_document_size = 4294967296 ; bytes
 ;os_process_timeout = 5000
 
-[couch_peruser]
-; If enabled, couch_peruser ensures that a private per-user database
-; exists for each document in _users. These databases are writable only
-; by the corresponding user. Databases are in the following form:
-; userdb-{hex encoded username}
-;enable = true
-; If set to true and a user is deleted, the respective database gets
-; deleted as well.
-;delete_dbs = true
-; Set a default q value for peruser-created databases that is different from
-; cluster / q
-;q = 1
-
 [chttpd]
 ;port = 5984
 ;bind_address = 127.0.0.1
diff --git a/rel/reltool.config b/rel/reltool.config
index 16096ce..002be40 100644
--- a/rel/reltool.config
+++ b/rel/reltool.config
@@ -35,42 +35,27 @@
         couch,
         couch_epi,
         couch_jobs,
-        couch_index,
         couch_log,
-        couch_mrview,
         couch_plugins,
         couch_replicator,
         couch_stats,
         couch_eval,
         couch_js,
-        couch_event,
-        couch_peruser,
         couch_views,
-        ddoc_cache,
-        dreyfus,
         ebtree,
         erlfdb,
         ets_lru,
         fabric,
         folsom,
-        global_changes,
         hyper,
         ibrowse,
-        ioq,
         jaeger_passage,
         jiffy,
         jwtf,
-        ken,
-        khash,
         local,
         mango,
-        mem3,
         mochiweb,
         passage,
-        rexi,
-        setup,
-        smoosh,
-        snappy,
         thrift_protocol,
         %% extra
         recon
@@ -108,40 +93,25 @@
     {app, couch_eval, [{incl_cond, include}]},
     {app, couch_js, [{incl_cond, include}]},
     {app, couch_jobs, [{incl_cond, include}]},
-    {app, couch_index, [{incl_cond, include}]},
     {app, couch_log, [{incl_cond, include}]},
-    {app, couch_mrview, [{incl_cond, include}]},
     {app, couch_plugins, [{incl_cond, include}]},
     {app, couch_replicator, [{incl_cond, include}]},
     {app, couch_stats, [{incl_cond, include}]},
-    {app, couch_event, [{incl_cond, include}]},
-    {app, couch_peruser, [{incl_cond, include}]},
     {app, couch_views, [{incl_cond, include}]},
-    {app, ddoc_cache, [{incl_cond, include}]},
-    {app, dreyfus, [{incl_cond, include}]},
     {app, erlfdb, [{incl_cond, include}]},
     {app, ebtree, [{incl_cond, include}]},
     {app, ets_lru, [{incl_cond, include}]},
     {app, fabric, [{incl_cond, include}]},
     {app, folsom, [{incl_cond, include}]},
-    {app, global_changes, [{incl_cond, include}]},
     {app, hyper, [{incl_cond, include}]},
     {app, ibrowse, [{incl_cond, include}]},
-    {app, ioq, [{incl_cond, include}]},
     {app, jaeger_passage, [{incl_cond, include}]},
     {app, jiffy, [{incl_cond, include}]},
     {app, jwtf, [{incl_cond, include}]},
-    {app, ken, [{incl_cond, include}]},
     {app, local, [{incl_cond, include}]},
-    {app, khash, [{incl_cond, include}]},
     {app, mango, [{incl_cond, include}]},
-    {app, mem3, [{incl_cond, include}]},
     {app, mochiweb, [{incl_cond, include}]},
     {app, passage, [{incl_cond, include}]},
-    {app, rexi, [{incl_cond, include}]},
-    {app, setup, [{incl_cond, include}]},
-    {app, smoosh, [{incl_cond, include}]},
-    {app, snappy, [{incl_cond, include}]},
     {app, thrift_protocol, [{incl_cond, include}]},
 
     %% extra
diff --git a/src/chttpd/src/chttpd.erl b/src/chttpd/src/chttpd.erl
index b124375..8567ada 100644
--- a/src/chttpd/src/chttpd.erl
+++ b/src/chttpd/src/chttpd.erl
@@ -123,6 +123,12 @@ start_link(Name, Options) ->
          end,
     ok = couch_httpd:validate_bind_address(IP),
 
+    % Ensure uuid is set so that concurrent replications
+    % get the same value. This used to in the backend (:5986) httpd
+    % start_link and was moved here for now. Ideally this should be set
+    % in FDB or coordinated across all the nodes
+    couch_server:get_uuid(),
+
     set_auth_handlers(),
 
     Options1 = Options ++ [
@@ -153,7 +159,6 @@ stop() ->
     mochiweb_http:stop(?MODULE).
 
 handle_request(MochiReq0) ->
-    erlang:put(?REWRITE_COUNT, 0),
     MochiReq = couch_httpd_vhost:dispatch_host(MochiReq0),
     handle_request_int(MochiReq).
 
diff --git a/src/chttpd/src/chttpd_changes.erl b/src/chttpd/src/chttpd_changes.erl
index 8bf33ec..79ca4d1 100644
--- a/src/chttpd/src/chttpd_changes.erl
+++ b/src/chttpd/src/chttpd_changes.erl
@@ -12,7 +12,7 @@
 
 -module(chttpd_changes).
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 
 -export([
     handle_db_changes/3,
@@ -530,9 +530,7 @@ send_lookup_changes(FullDocInfos, StartSeq, Dir, Db, Fun, Acc0) ->
         fwd ->
             FinalAcc0 = case element(1, FinalAcc) of
                 changes_acc -> % we came here via couch_http or internal call
-                    FinalAcc#changes_acc{seq = fabric2_db:get_update_seq(Db)};
-                fabric_changes_acc -> % we came here via chttpd / fabric / rexi
-                    FinalAcc#fabric_changes_acc{seq = couch_db:get_update_seq(Db)}
+                    FinalAcc#changes_acc{seq = fabric2_db:get_update_seq(Db)}
             end,
             {ok, FinalAcc0};
         rev -> {ok, FinalAcc}
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index ac3d3b1..5045e21 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -15,9 +15,7 @@
 -compile(tuple_calls).
 
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 
 -export([handle_request/1, handle_compact_req/2, handle_design_req/2,
     db_req/2, couch_doc_open/4,handle_changes_req/2,
@@ -683,7 +681,7 @@ db_req(#httpd{method='GET',path_parts=[_,OP]}=Req, Db) when ?IS_ALL_DOCS(OP) ->
 db_req(#httpd{method='POST',
     path_parts=[_, OP, <<"queries">>]}=Req, Db) when ?IS_ALL_DOCS(OP) ->
     Props = chttpd:json_body_obj(Req),
-    case couch_mrview_util:get_view_queries(Props) of
+    case couch_views_util:get_view_queries(Props) of
         undefined ->
             throw({bad_request,
                 <<"POST body must include `queries` parameter.">>});
@@ -1016,7 +1014,7 @@ send_all_docs_keys(Db, #mrargs{} = Args, VAcc0) ->
                     doc = DocValue
                 }
         end,
-        Row1 = fabric_view:transform_row(Row0),
+        Row1 = couch_views_http:transform_row(Row0),
         view_cb(Row1, Acc)
     end,
     {ok, VAcc2} = fabric2_db:fold_docs(Db, Keys, CB, VAcc1, OpenOpts),
@@ -1257,7 +1255,7 @@ db_doc_req(#httpd{method='COPY'}=Req, Db, SourceDocId) ->
         missing_rev -> nil;
         Rev -> Rev
     end,
-    {TargetDocId0, TargetRevs} = couch_httpd_db:parse_copy_destination_header(Req),
+    {TargetDocId0, TargetRevs} = chttpd_util:parse_copy_destination_header(Req),
     TargetDocId = list_to_binary(mochiweb_util:unquote(TargetDocId0)),
     % open old doc
     Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
@@ -2106,7 +2104,7 @@ set_namespace(<<"_local_docs">>, Args) ->
 set_namespace(<<"_design_docs">>, Args) ->
     set_namespace(<<"_design">>, Args);
 set_namespace(NS, #mrargs{} = Args) ->
-    couch_mrview_util:set_extra(Args, namespace, NS).
+    couch_views_util:set_extra(Args, namespace, NS).
 
 
 %% /db/_bulk_get stuff
diff --git a/src/chttpd/src/chttpd_httpd_handlers.erl b/src/chttpd/src/chttpd_httpd_handlers.erl
index d501159..2acd6de 100644
--- a/src/chttpd/src/chttpd_httpd_handlers.erl
+++ b/src/chttpd/src/chttpd_httpd_handlers.erl
@@ -15,9 +15,11 @@
 -export([url_handler/1, db_handler/1, design_handler/1, handler_info/3]).
 
 -export([
-    not_supported/2,
     not_supported/3,
-    not_implemented/2
+    not_supported/2,
+    not_supported/1,
+    not_implemented/2,
+    not_implemented/1
 ]).
 
 
@@ -38,6 +40,10 @@ url_handler(<<"_replicate">>)      -> fun chttpd_misc:handle_replicate_req/1;
 url_handler(<<"_uuids">>)          -> fun chttpd_misc:handle_uuids_req/1;
 url_handler(<<"_session">>)        -> fun chttpd_auth:handle_session_req/1;
 url_handler(<<"_up">>)             -> fun chttpd_misc:handle_up_req/1;
+url_handler(<<"_membership">>)     -> fun ?MODULE:not_supported/1;
+url_handler(<<"_reshard">>)        -> fun ?MODULE:not_supported/1;
+url_handler(<<"_db_updates">>)     -> fun ?MODULE:not_implemented/1;
+url_handler(<<"_cluster_setup">>)  -> fun ?MODULE:not_implemented/1;
 url_handler(_) -> no_match.
 
 db_handler(<<"_view_cleanup">>) -> fun chttpd_db:handle_view_cleanup_req/2;
@@ -48,6 +54,8 @@ db_handler(<<"_temp_view">>)    -> fun ?MODULE:not_supported/2;
 db_handler(<<"_changes">>)      -> fun chttpd_db:handle_changes_req/2;
 db_handler(<<"_purge">>)        -> fun ?MODULE:not_implemented/2;
 db_handler(<<"_purged_infos_limit">>) -> fun ?MODULE:not_implemented/2;
+db_handler(<<"_shards">>)       -> fun ?MODULE:not_supported/2;
+db_handler(<<"_sync_shards">>)  -> fun ?MODULE:not_supported/2;
 db_handler(_) -> no_match.
 
 design_handler(<<"_view">>)    -> fun chttpd_view:handle_view_req/3;
@@ -186,7 +194,6 @@ handler_info(Method, [<<"_", _/binary>> = Part| Rest], Req) ->
     % on for known system databases.
     DbName = case Part of
         <<"_dbs">> -> '_dbs';
-        <<"_global_changes">> -> '_global_changes';
         <<"_metadata">> -> '_metadata';
         <<"_nodes">> -> '_nodes';
         <<"_replicator">> -> '_replicator';
@@ -497,7 +504,7 @@ handler_info(_, _, _) ->
 
 get_copy_destination(Req) ->
     try
-        {DocIdStr, _} = couch_httpd_db:parse_copy_destination_header(Req),
+        {DocIdStr, _} = chttpd_util:parse_copy_destination_header(Req),
         list_to_binary(mochiweb_util:unquote(DocIdStr))
     catch _:_ ->
         unknown
@@ -509,10 +516,18 @@ not_supported(#httpd{} = Req, Db, _DDoc) ->
 
 
 not_supported(#httpd{} = Req, _Db) ->
+    not_supported(Req).
+
+
+not_supported(#httpd{} = Req) ->
     Msg = <<"resource is not supported in CouchDB >= 4.x">>,
     chttpd:send_error(Req, 410, gone, Msg).
 
 
 not_implemented(#httpd{} = Req, _Db) ->
+    not_implemented(Req).
+
+
+not_implemented(#httpd{} = Req) ->
     Msg = <<"resource is not implemented">>,
     chttpd:send_error(Req, 501, not_implemented, Msg).
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index 5cfd0f7..3f81c1b 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -30,10 +30,10 @@
 ]).
 
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 
 -import(chttpd,
-    [send_json/2,send_json/3,send_method_not_allowed/2,
+    [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
     send_chunk/2,start_chunked_response/3]).
 
 -define(MAX_DB_NUM_FOR_DBS_INFO, 100).
@@ -61,12 +61,7 @@ handle_welcome_req(Req, _) ->
     send_method_not_allowed(Req, "GET,HEAD").
 
 get_features() ->
-    case clouseau_rpc:connected() of
-        true ->
-            [search | config:features()];
-        false ->
-            config:features()
-    end.
+    config:features().
 
 handle_favicon_req(Req) ->
     handle_favicon_req(Req, get_docroot()).
@@ -120,7 +115,7 @@ handle_all_dbs_req(#httpd{method='GET'}=Req) ->
         direction = Dir,
         limit = Limit,
         skip = Skip
-    } = couch_mrview_http:parse_params(Req, undefined),
+    } = couch_views_http_util:parse_params(Req, undefined),
 
     Options = [
         {start_key, StartKey},
@@ -142,7 +137,7 @@ all_dbs_callback({meta, _Meta}, #vacc{resp=Resp0}=Acc) ->
     {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "["),
     {ok, Acc#vacc{resp=Resp1}};
 all_dbs_callback({row, Row}, #vacc{resp=Resp0}=Acc) ->
-    Prepend = couch_mrview_http:prepend_val(Acc),
+    Prepend = couch_views_http_util:prepend_val(Acc),
     DbName = couch_util:get_value(id, Row),
     {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, [Prepend, ?JSON_ENCODE(DbName)]),
     {ok, Acc#vacc{prepend=",", resp=Resp1}};
@@ -160,7 +155,7 @@ handle_dbs_info_req(#httpd{method = 'GET'} = Req) ->
 handle_dbs_info_req(#httpd{method='POST', user_ctx=UserCtx}=Req) ->
     chttpd:validate_ctype(Req, "application/json"),
     Props = chttpd:json_body_obj(Req),
-    Keys = couch_mrview_util:get_view_keys(Props),
+    Keys = couch_views_util:get_view_keys(Props),
     case Keys of
         undefined -> throw({bad_request, "`keys` member must exist."});
         _ -> ok
@@ -253,7 +248,7 @@ send_db_infos(Req, ListFunctionName) ->
         direction = Dir,
         limit = Limit,
         skip = Skip
-    } = couch_mrview_http:parse_params(Req, undefined),
+    } = couch_views_http_util:parse_params(Req, undefined),
 
     Options = [
         {start_key, StartKey},
@@ -280,7 +275,7 @@ dbs_info_callback({meta, _Meta}, #vacc{resp = Resp0} = Acc) ->
     {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "["),
     {ok, Acc#vacc{resp = Resp1}};
 dbs_info_callback({row, Props}, #vacc{resp = Resp0} = Acc) ->
-    Prepend = couch_mrview_http:prepend_val(Acc),
+    Prepend = couch_views_http_util:prepend_val(Acc),
     Chunk = [Prepend, ?JSON_ENCODE({Props})],
     {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, Chunk),
     {ok, Acc#vacc{prepend = ",", resp = Resp1}};
@@ -334,9 +329,33 @@ handle_reload_query_servers_req(#httpd{method='POST'}=Req) ->
 handle_reload_query_servers_req(Req) ->
     send_method_not_allowed(Req, "POST").
 
+handle_uuids_req(#httpd{method='GET'}=Req) ->
+    Max = list_to_integer(config:get("uuids","max_count","1000")),
+    Count = try list_to_integer(couch_httpd:qs_value(Req, "count", "1")) of
+        N when N > Max ->
+            throw({bad_request, <<"count parameter too large">>});
+        N when N < 0 ->
+            throw({bad_request, <<"count must be a positive integer">>});
+        N -> N
+    catch
+        error:badarg ->
+            throw({bad_request, <<"count must be a positive integer">>})
+    end,
+    UUIDs = [couch_uuids:new() || _ <- lists:seq(1, Count)],
+    Etag = couch_httpd:make_etag(UUIDs),
+    couch_httpd:etag_respond(Req, Etag, fun() ->
+        CacheBustingHeaders = [
+            {"Date", couch_util:rfc1123_date()},
+            {"Cache-Control", "no-cache"},
+            % Past date, ON PURPOSE!
+            {"Expires", "Mon, 01 Jan 1990 00:00:00 GMT"},
+            {"Pragma", "no-cache"},
+            {"ETag", Etag}
+        ],
+        send_json(Req, 200, CacheBustingHeaders, {[{<<"uuids">>, UUIDs}]})
+    end);
 handle_uuids_req(Req) ->
-    couch_httpd_misc_handlers:handle_uuids_req(Req).
-
+    send_method_not_allowed(Req, "GET").
 
 handle_up_req(#httpd{method='GET'} = Req) ->
     case config:get("couchdb", "maintenance_mode") of
diff --git a/src/chttpd/src/chttpd_node.erl b/src/chttpd/src/chttpd_node.erl
index f657384..0a4ccfa 100644
--- a/src/chttpd/src/chttpd_node.erl
+++ b/src/chttpd/src/chttpd_node.erl
@@ -231,7 +231,6 @@ get_stats() ->
         {process_count, erlang:system_info(process_count)},
         {process_limit, erlang:system_info(process_limit)},
         {message_queues, {MessageQueues}},
-        {internal_replication_jobs, mem3_sync:get_backlog()},
         {distribution, {get_distribution_stats()}}
     ].
 
diff --git a/src/chttpd/src/chttpd_rewrite.erl b/src/chttpd/src/chttpd_rewrite.erl
deleted file mode 100644
index 1c2c1f3..0000000
--- a/src/chttpd/src/chttpd_rewrite.erl
+++ /dev/null
@@ -1,487 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-%
-% bind_path is based on bind method from Webmachine
-
-
-%% @doc Module for URL rewriting by pattern matching.
-
--module(chttpd_rewrite).
-
--compile(tuple_calls).
-
--export([handle_rewrite_req/3]).
--include_lib("couch/include/couch_db.hrl").
-
--define(SEPARATOR, $\/).
--define(MATCH_ALL, {bind, <<"*">>}).
-
-
-handle_rewrite_req(#httpd{}=Req, Db, DDoc) ->
-    RewritesSoFar = erlang:get(?REWRITE_COUNT),
-    MaxRewrites = config:get_integer("httpd", "rewrite_limit", 100),
-    case RewritesSoFar >= MaxRewrites of
-        true ->
-            throw({bad_request, <<"Exceeded rewrite recursion limit">>});
-        false ->
-            erlang:put(?REWRITE_COUNT, RewritesSoFar + 1)
-    end,
-    case get_rules(DDoc) of
-        Rules when is_list(Rules) ->
-            do_rewrite(Req, Rules);
-        Rules when is_binary(Rules) ->
-            case couch_query_servers:rewrite(Req, Db, DDoc) of
-                undefined ->
-                    chttpd:send_error(Req, 404, <<"rewrite_error">>,
-                        <<"Invalid path.">>);
-                Rewrite ->
-                    do_rewrite(Req, Rewrite)
-            end;
-        undefined ->
-            chttpd:send_error(Req, 404, <<"rewrite_error">>,
-                <<"Invalid path.">>)
-    end.
-
-
-get_rules(#doc{body={Props}}) ->
-    couch_util:get_value(<<"rewrites">>, Props).
-
-
-do_rewrite(#httpd{mochi_req=MochiReq}=Req, {Props}=Rewrite) when is_list(Props) ->
-    case couch_util:get_value(<<"code">>, Props) of
-        undefined ->
-            Method = rewrite_method(Req, Rewrite),
-            Headers = rewrite_headers(Req, Rewrite),
-            Path = ?b2l(rewrite_path(Req, Rewrite)),
-            NewMochiReq = mochiweb_request:new(MochiReq:get(socket),
-                                               Method,
-                                               Path,
-                                               MochiReq:get(version),
-                                               Headers),
-            Body = case couch_util:get_value(<<"body">>, Props) of
-                undefined -> erlang:get(mochiweb_request_body);
-                B -> B
-            end,
-            NewMochiReq:cleanup(),
-            case Body of
-                undefined -> [];
-                _ -> erlang:put(mochiweb_request_body, Body)
-            end,
-            couch_log:debug("rewrite to ~p", [Path]),
-            chttpd:handle_request_int(NewMochiReq);
-        Code ->
-            chttpd:send_response(
-                Req,
-                Code,
-                case couch_util:get_value(<<"headers">>, Props) of
-                    undefined -> [];
-                    {H1} -> H1
-                end,
-                rewrite_body(Rewrite))
-    end;
-do_rewrite(#httpd{method=Method,
-                  path_parts=[_DbName, <<"_design">>, _DesignName, _Rewrite|PathParts],
-                  mochi_req=MochiReq}=Req,
-           Rules) when is_list(Rules) ->
-    % create dispatch list from rules
-    Prefix = path_prefix(Req),
-    QueryList = lists:map(fun decode_query_value/1, chttpd:qs(Req)),
-
-    DispatchList =  [make_rule(Rule) || {Rule} <- Rules],
-    Method1 = couch_util:to_binary(Method),
-
-    %% get raw path by matching url to a rule.
-    RawPath = case try_bind_path(DispatchList, Method1,
-            PathParts, QueryList) of
-        no_dispatch_path ->
-            throw(not_found);
-        {NewPathParts, Bindings} ->
-            Parts = [quote_plus(X) || X <- NewPathParts],
-
-            % build new path, reencode query args, eventually convert
-            % them to json
-            Bindings1 = maybe_encode_bindings(Bindings),
-            Path = iolist_to_binary([
-                string:join(Parts, [?SEPARATOR]),
-                [["?", mochiweb_util:urlencode(Bindings1)] || Bindings1 =/= []]
-            ]),
-
-            % if path is relative detect it and rewrite path
-            safe_relative_path(Prefix, Path)
-        end,
-
-    % normalize final path (fix levels "." and "..")
-    RawPath1 = ?b2l(normalize_path(RawPath)),
-
-    couch_log:debug("rewrite to ~p ~n", [RawPath1]),
-
-    % build a new mochiweb request
-    MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
-                                     MochiReq:get(method),
-                                     RawPath1,
-                                     MochiReq:get(version),
-                                     MochiReq:get(headers)),
-
-    % cleanup, It force mochiweb to reparse raw uri.
-    MochiReq1:cleanup(),
-
-    chttpd:handle_request_int(MochiReq1).
-
-
-rewrite_method(#httpd{method=Method}, {Props}) ->
-    DefaultMethod = couch_util:to_binary(Method),
-    couch_util:get_value(<<"method">>, Props, DefaultMethod).
-
-rewrite_path(#httpd{}=Req, {Props}=Rewrite) ->
-    Prefix = path_prefix(Req),
-    RewritePath = case couch_util:get_value(<<"path">>, Props) of
-        undefined ->
-            throw({<<"rewrite_error">>,
-                   <<"Rewrite result must produce a new path.">>});
-        P -> P
-    end,
-    SafeRelativePath = safe_relative_path(Prefix, RewritePath),
-    NormalizedPath = normalize_path(SafeRelativePath),
-    QueryParams = rewrite_query_params(Req, Rewrite),
-    case QueryParams of
-        <<"">> ->
-            NormalizedPath;
-        QueryParams ->
-            <<NormalizedPath/binary, "?", QueryParams/binary>>
-    end.
-
-rewrite_query_params(#httpd{}=Req, {Props}) ->
-    RequestQS = chttpd:qs(Req),
-    RewriteQS = case couch_util:get_value(<<"query">>, Props) of
-        undefined -> RequestQS;
-        {V} -> V
-    end,
-    RewriteQSEsc = [{chttpd:quote(K), chttpd:quote(V)} || {K, V} <- RewriteQS],
-    iolist_to_binary(string:join([[K, "=", V] || {K, V} <- RewriteQSEsc], "&")).
-
-rewrite_headers(#httpd{mochi_req=MochiReq}, {Props}) ->
-    case couch_util:get_value(<<"headers">>, Props) of
-        undefined ->
-            MochiReq:get(headers);
-        {H} ->
-            mochiweb_headers:enter_from_list(
-                lists:map(fun({Key, Val}) -> {?b2l(Key), ?b2l(Val)} end, H),
-                MochiReq:get(headers))
-    end.
-
-rewrite_body({Props}) ->
-    Body = case couch_util:get_value(<<"body">>, Props) of
-        undefined -> erlang:get(mochiweb_request_body);
-        B -> B
-    end,
-    case Body of
-        undefined ->
-            [];
-        _ ->
-            erlang:put(mochiweb_request_body, Body),
-            Body
-    end.
-
-
-path_prefix(#httpd{path_parts=[DbName, <<"_design">>, DesignName | _]}) ->
-    EscapedDesignName = ?l2b(couch_util:url_encode(DesignName)),
-    EscapedDbName = ?l2b(couch_util:url_encode(DbName)),
-    DesignId = <<"_design/", EscapedDesignName/binary>>,
-    <<"/", EscapedDbName/binary, "/", DesignId/binary>>.
-
-safe_relative_path(Prefix, Path) ->
-    case mochiweb_util:safe_relative_path(?b2l(Path)) of
-        undefined ->
-            <<Prefix/binary, "/", Path/binary>>;
-        V0 ->
-            V1 = ?l2b(V0),
-            <<Prefix/binary, "/", V1/binary>>
-    end.
-
-
-quote_plus({bind, X}) ->
-    mochiweb_util:quote_plus(X);
-quote_plus(X) ->
-    mochiweb_util:quote_plus(X).
-
-%% @doc Try to find a rule matching current url. If none is found
-%% 404 error not_found is raised
-try_bind_path([], _Method, _PathParts, _QueryList) ->
-    no_dispatch_path;
-try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) ->
-    [{PathParts1, Method1}, RedirectPath, QueryArgs, Formats] = Dispatch,
-    case bind_method(Method1, Method) of
-        true ->
-            case bind_path(PathParts1, PathParts, []) of
-                {ok, Remaining, Bindings} ->
-                    Bindings1 = Bindings ++ QueryList,
-                    % we parse query args from the rule and fill
-                    % it eventually with bindings vars
-                    QueryArgs1 = make_query_list(QueryArgs, Bindings1,
-                        Formats, []),
-                    % remove params in QueryLists1 that are already in
-                    % QueryArgs1
-                    Bindings2 = lists:foldl(fun({K, V}, Acc) ->
-                        K1 = to_binding(K),
-                        KV = case couch_util:get_value(K1, QueryArgs1) of
-                            undefined -> [{K1, V}];
-                            _V1 -> []
-                        end,
-                        Acc ++ KV
-                    end, [], Bindings1),
-
-                    FinalBindings = Bindings2 ++ QueryArgs1,
-                    NewPathParts = make_new_path(RedirectPath, FinalBindings,
-                                    Remaining, []),
-                    {NewPathParts, FinalBindings};
-                fail ->
-                    try_bind_path(Rest, Method, PathParts, QueryList)
-            end;
-        false ->
-            try_bind_path(Rest, Method, PathParts, QueryList)
-    end.
-
-%% rewriting dynamically the quey list given as query member in
-%% rewrites. Each value is replaced by one binding or an argument
-%% passed in url.
-make_query_list([], _Bindings, _Formats, Acc) ->
-    Acc;
-make_query_list([{Key, {Value}}|Rest], Bindings, Formats, Acc) ->
-    Value1 = {Value},
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_binary(Value) ->
-    Value1 = replace_var(Value, Bindings, Formats),
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_list(Value) ->
-    Value1 = replace_var(Value, Bindings, Formats),
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) ->
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value}|Acc]).
-
-replace_var(<<"*">>=Value, Bindings, Formats) ->
-    get_var(Value, Bindings, Value, Formats);
-replace_var(<<":", Var/binary>> = Value, Bindings, Formats) ->
-    get_var(Var, Bindings, Value, Formats);
-replace_var(Value, _Bindings, _Formats) when is_binary(Value) ->
-    Value;
-replace_var(Value, Bindings, Formats) when is_list(Value) ->
-    lists:reverse(lists:foldl(fun
-                (<<":", Var/binary>>=Value1, Acc) ->
-                    [get_var(Var, Bindings, Value1, Formats)|Acc];
-                (Value1, Acc) ->
-                    [Value1|Acc]
-            end, [], Value));
-replace_var(Value, _Bindings, _Formats) ->
-    Value.
-
-maybe_json(Key, Value) ->
-    case lists:member(Key, [<<"key">>, <<"startkey">>, <<"start_key">>,
-                <<"endkey">>, <<"end_key">>, <<"keys">>]) of
-        true ->
-            ?JSON_ENCODE(Value);
-        false ->
-            Value
-    end.
-
-get_var(VarName, Props, Default, Formats) ->
-    VarName1 = to_binding(VarName),
-    Val = couch_util:get_value(VarName1, Props, Default),
-    maybe_format(VarName, Val, Formats).
-
-maybe_format(VarName, Value, Formats) ->
-    case couch_util:get_value(VarName, Formats) of
-        undefined ->
-             Value;
-        Format ->
-            format(Format, Value)
-    end.
-
-format(<<"int">>, Value) when is_integer(Value) ->
-    Value;
-format(<<"int">>, Value) when is_binary(Value) ->
-    format(<<"int">>, ?b2l(Value));
-format(<<"int">>, Value) when is_list(Value) ->
-    case (catch list_to_integer(Value)) of
-        IntVal when is_integer(IntVal) ->
-            IntVal;
-        _ ->
-            Value
-    end;
-format(<<"bool">>, Value) when is_binary(Value) ->
-    format(<<"bool">>, ?b2l(Value));
-format(<<"bool">>, Value) when is_list(Value) ->
-    case string:to_lower(Value) of
-        "true" -> true;
-        "false" -> false;
-        _ -> Value
-    end;
-format(_Format, Value) ->
-   Value.
-
-%% doc: build new patch from bindings. bindings are query args
-%% (+ dynamic query rewritten if needed) and bindings found in
-%% bind_path step.
-make_new_path([], _Bindings, _Remaining, Acc) ->
-    lists:reverse(Acc);
-make_new_path([?MATCH_ALL], _Bindings, Remaining, Acc) ->
-    Acc1 = lists:reverse(Acc) ++ Remaining,
-    Acc1;
-make_new_path([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
-    Acc1 = lists:reverse(Acc) ++ Remaining,
-    Acc1;
-make_new_path([{bind, P}|Rest], Bindings, Remaining, Acc) ->
-    P2 = case couch_util:get_value({bind, P}, Bindings) of
-        undefined -> << "undefined">>;
-        P1 ->
-            iolist_to_binary(P1)
-    end,
-    make_new_path(Rest, Bindings, Remaining, [P2|Acc]);
-make_new_path([P|Rest], Bindings, Remaining, Acc) ->
-    make_new_path(Rest, Bindings, Remaining, [P|Acc]).
-
-
-%% @doc If method of the query fith the rule method. If the
-%% method rule is '*', which is the default, all
-%% request method will bind. It allows us to make rules
-%% depending on HTTP method.
-bind_method(?MATCH_ALL, _Method) ->
-    true;
-bind_method({bind, Method}, Method) ->
-    true;
-bind_method(_, _) ->
-    false.
-
-
-%% @doc bind path. Using the rule from we try to bind variables given
-%% to the current url by pattern matching
-bind_path([], [], Bindings) ->
-    {ok, [], Bindings};
-bind_path([?MATCH_ALL], Rest, Bindings) when is_list(Rest) ->
-    {ok, Rest, Bindings};
-bind_path(_, [], _) ->
-    fail;
-bind_path([{bind, Token}|RestToken],[Match|RestMatch],Bindings) ->
-    bind_path(RestToken, RestMatch, [{{bind, Token}, Match}|Bindings]);
-bind_path([Token|RestToken], [Token|RestMatch], Bindings) ->
-    bind_path(RestToken, RestMatch, Bindings);
-bind_path(_, _, _) ->
-    fail.
-
-
-%% normalize path.
-normalize_path(Path) when is_binary(Path)->
-    normalize_path(?b2l(Path));
-normalize_path(Path) when is_list(Path)->
-    Segments = normalize_path1(string:tokens(Path, "/"), []),
-    NormalizedPath = string:join(Segments, [?SEPARATOR]),
-    iolist_to_binary(["/", NormalizedPath]).
-
-
-normalize_path1([], Acc) ->
-    lists:reverse(Acc);
-normalize_path1([".."|Rest], Acc) ->
-    Acc1 = case Acc of
-        [] -> [".."|Acc];
-        [T|_] when T =:= ".." -> [".."|Acc];
-        [_|R] -> R
-    end,
-    normalize_path1(Rest, Acc1);
-normalize_path1(["."|Rest], Acc) ->
-    normalize_path1(Rest, Acc);
-normalize_path1([Path|Rest], Acc) ->
-    normalize_path1(Rest, [Path|Acc]).
-
-
-%% @doc transform json rule in erlang for pattern matching
-make_rule(Rule) ->
-    Method = case couch_util:get_value(<<"method">>, Rule) of
-        undefined -> ?MATCH_ALL;
-        M -> to_binding(M)
-    end,
-    QueryArgs = case couch_util:get_value(<<"query">>, Rule) of
-        undefined -> [];
-        {Args} -> Args
-        end,
-    FromParts  = case couch_util:get_value(<<"from">>, Rule) of
-        undefined -> [?MATCH_ALL];
-        From ->
-            parse_path(From)
-        end,
-    ToParts  = case couch_util:get_value(<<"to">>, Rule) of
-        undefined ->
-            throw({error, invalid_rewrite_target});
-        To ->
-            parse_path(To)
-        end,
-    Formats = case couch_util:get_value(<<"formats">>, Rule) of
-        undefined -> [];
-        {Fmts} -> Fmts
-    end,
-    [{FromParts, Method}, ToParts, QueryArgs, Formats].
-
-parse_path(Path) ->
-    {ok, SlashRE} = re:compile(<<"\\/">>),
-    path_to_list(re:split(Path, SlashRE), [], 0).
-
-%% @doc convert a path rule (from or to) to an erlang list
-%% * and path variable starting by ":" are converted
-%% in erlang atom.
-path_to_list([], Acc, _DotDotCount) ->
-    lists:reverse(Acc);
-path_to_list([<<>>|R], Acc, DotDotCount) ->
-    path_to_list(R, Acc, DotDotCount);
-path_to_list([<<"*">>|R], Acc, DotDotCount) ->
-    path_to_list(R, [?MATCH_ALL|Acc], DotDotCount);
-path_to_list([<<"..">>|R], Acc, DotDotCount) when DotDotCount == 2 ->
-    case config:get("httpd", "secure_rewrites", "true") of
-    "false" ->
-        path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
-    _Else ->
-        couch_log:notice("insecure_rewrite_rule ~p blocked", [lists:reverse(Acc) ++ [<<"..">>] ++ R]),
-        throw({insecure_rewrite_rule, "too many ../.. segments"})
-    end;
-path_to_list([<<"..">>|R], Acc, DotDotCount) ->
-    path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
-path_to_list([P|R], Acc, DotDotCount) ->
-    P1 = case P of
-        <<":", Var/binary>> ->
-            to_binding(Var);
-        _ -> P
-    end,
-    path_to_list(R, [P1|Acc], DotDotCount).
-
-maybe_encode_bindings([]) ->
-    [];
-maybe_encode_bindings(Props) ->
-    lists:foldl(fun
-            ({{bind, <<"*">>}, _V}, Acc) ->
-                Acc;
-            ({{bind, K}, V}, Acc) ->
-                V1 = iolist_to_binary(maybe_json(K, V)),
-                [{K, V1}|Acc]
-        end, [], Props).
-
-decode_query_value({K,V}) ->
-    case lists:member(K, ["key", "startkey", "start_key",
-                "endkey", "end_key", "keys"]) of
-        true ->
-            {to_binding(K), ?JSON_DECODE(V)};
-        false ->
-            {to_binding(K), ?l2b(V)}
-    end.
-
-to_binding({bind, V}) ->
-    {bind, V};
-to_binding(V) when is_list(V) ->
-    to_binding(?l2b(V));
-to_binding(V) ->
-    {bind, V}.
diff --git a/src/chttpd/src/chttpd_show.erl b/src/chttpd/src/chttpd_show.erl
index 8a15bdc..0390d21 100644
--- a/src/chttpd/src/chttpd_show.erl
+++ b/src/chttpd/src/chttpd_show.erl
@@ -12,15 +12,11 @@
 
 -module(chttpd_show).
 
--export([handle_doc_show_req/3, handle_doc_update_req/3, handle_view_list_req/3]).
+-export([handle_doc_update_req/3]).
 
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 
-% /db/_design/foo/_show/bar/docid
-% show converts a json doc to a response of any content-type.
-% it looks up the doc an then passes it to the query server.
-% then it sends the response from the query server to the http client.
 
 maybe_open_doc(Db, DocId, Options) ->
     case fabric:open_doc(Db, DocId, Options) of
@@ -31,70 +27,6 @@ maybe_open_doc(Db, DocId, Options) ->
         nil
     end.
 
-handle_doc_show_req(#httpd{
-        path_parts=[_, _, _, _, ShowName, DocId]
-    }=Req, Db, DDoc) ->
-
-    % open the doc
-    Options = [conflicts, {user_ctx, Req#httpd.user_ctx}],
-    Doc = maybe_open_doc(Db, DocId, Options),
-
-    % we don't handle revs here b/c they are an internal api
-    % returns 404 if there is no doc with DocId
-    handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId);
-
-handle_doc_show_req(#httpd{
-        path_parts=[_, _, _, _, ShowName, DocId|Rest]
-    }=Req, Db, DDoc) ->
-
-    DocParts = [DocId|Rest],
-    DocId1 = ?l2b(string:join([?b2l(P)|| P <- DocParts], "/")),
-
-    % open the doc
-    Options = [conflicts, {user_ctx, Req#httpd.user_ctx}],
-    Doc = maybe_open_doc(Db, DocId1, Options),
-
-    % we don't handle revs here b/c they are an internal api
-    % pass 404 docs to the show function
-    handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId1);
-
-handle_doc_show_req(#httpd{
-        path_parts=[_, _, _, _, ShowName]
-    }=Req, Db, DDoc) ->
-    % with no docid the doc is nil
-    handle_doc_show(Req, Db, DDoc, ShowName, nil);
-
-handle_doc_show_req(Req, _Db, _DDoc) ->
-    chttpd:send_error(Req, 404, <<"show_error">>, <<"Invalid path.">>).
-
-handle_doc_show(Req, Db, DDoc, ShowName, Doc) ->
-    handle_doc_show(Req, Db, DDoc, ShowName, Doc, null).
-
-handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId) ->
-    %% Will throw an exception if the _show handler is missing
-    couch_util:get_nested_json_value(DDoc#doc.body, [<<"shows">>, ShowName]),
-    % get responder for ddoc/showname
-    CurrentEtag = show_etag(Req, Doc, DDoc, []),
-    chttpd:etag_respond(Req, CurrentEtag, fun() ->
-        JsonReq = chttpd_external:json_req_obj(Req, Db, DocId),
-        JsonDoc = couch_query_servers:json_doc(Doc),
-        [<<"resp">>, ExternalResp] =
-            couch_query_servers:ddoc_prompt(DDoc, [<<"shows">>, ShowName],
-                [JsonDoc, JsonReq]),
-        JsonResp = apply_etag(ExternalResp, CurrentEtag),
-        chttpd_external:send_external_response(Req, JsonResp)
-    end).
-
-
-show_etag(#httpd{user_ctx=UserCtx}=Req, Doc, DDoc, More) ->
-    Accept = chttpd:header_value(Req, "Accept"),
-    DocPart = case Doc of
-        nil -> nil;
-        Doc -> chttpd:doc_etag(Doc)
-    end,
-    couch_httpd:make_etag({couch_httpd:doc_etag(DDoc), DocPart, Accept,
-        UserCtx#user_ctx.roles, More}).
-
 % /db/_design/foo/update/bar/docid
 % updates a doc based on a request
 % handle_doc_update_req(#httpd{method = 'GET'}=Req, _Db, _DDoc) ->
@@ -133,7 +65,7 @@ send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId) ->
                 Options = [{user_ctx, Req#httpd.user_ctx}]
             end,
             NewDoc = couch_db:doc_from_json_obj_validate(Db, {NewJsonDoc}),
-            couch_doc:validate_docid(NewDoc#doc.id),
+            fabric2_db:validate_docid(NewDoc#doc.id),
             {UpdateResult, NewRev} = fabric:update_doc(Db, NewDoc, Options),
             chttpd_stats:incr_writes(),
             NewRevStr = couch_doc:rev_to_str(NewRev),
@@ -154,86 +86,6 @@ send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId) ->
     % todo set location field
     chttpd_external:send_external_response(Req, JsonResp).
 
-
-% view-list request with view and list from same design doc.
-handle_view_list_req(#httpd{method=Method,
-        path_parts=[_, _, DesignName, _, ListName, ViewName]}=Req, Db, DDoc)
-        when Method =:= 'GET' orelse Method =:= 'OPTIONS' ->
-    Keys = chttpd:qs_json_value(Req, "keys", undefined),
-    handle_view_list(Req, Db, DDoc, ListName, {DesignName, ViewName}, Keys);
-
-% view-list request with view and list from different design docs.
-handle_view_list_req(#httpd{method=Method,
-        path_parts=[_, _, _, _, ListName, DesignName, ViewName]}=Req, Db, DDoc)
-        when Method =:= 'GET' orelse Method =:= 'OPTIONS' ->
-    Keys = chttpd:qs_json_value(Req, "keys", undefined),
-    handle_view_list(Req, Db, DDoc, ListName, {DesignName, ViewName}, Keys);
-
-handle_view_list_req(#httpd{method=Method}=Req, _Db, _DDoc)
-        when Method =:= 'GET' orelse Method =:= 'OPTIONS' ->
-    chttpd:send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>);
-
-handle_view_list_req(#httpd{method='POST',
-        path_parts=[_, _, DesignName, _, ListName, ViewName]}=Req, Db, DDoc) ->
-    chttpd:validate_ctype(Req, "application/json"),
-    ReqBody = chttpd:body(Req),
-    {Props2} = ?JSON_DECODE(ReqBody),
-    Keys = proplists:get_value(<<"keys">>, Props2, undefined),
-    handle_view_list(Req#httpd{req_body=ReqBody}, Db, DDoc, ListName,
-        {DesignName, ViewName}, Keys);
-
-handle_view_list_req(#httpd{method='POST',
-        path_parts=[_, _, _, _, ListName, DesignName, ViewName]}=Req, Db, DDoc) ->
-    chttpd:validate_ctype(Req, "application/json"),
-    ReqBody = chttpd:body(Req),
-    {Props2} = ?JSON_DECODE(ReqBody),
-    Keys = proplists:get_value(<<"keys">>, Props2, undefined),
-    handle_view_list(Req#httpd{req_body=ReqBody}, Db, DDoc, ListName,
-        {DesignName, ViewName}, Keys);
-
-handle_view_list_req(#httpd{method='POST'}=Req, _Db, _DDoc) ->
-    chttpd:send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>);
-
-handle_view_list_req(Req, _Db, _DDoc) ->
-    chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
-handle_view_list(Req, Db, DDoc, LName, {ViewDesignName, ViewName}, Keys) ->
-    %% Will throw an exception if the _list handler is missing
-    couch_util:get_nested_json_value(DDoc#doc.body, [<<"lists">>, LName]),
-    DbName = couch_db:name(Db),
-    {ok, VDoc} = ddoc_cache:open(DbName, <<"_design/", ViewDesignName/binary>>),
-    CB = fun list_cb/2,
-    QueryArgs = couch_mrview_http:parse_body_and_query(Req, Keys),
-    Options = [{user_ctx, Req#httpd.user_ctx}],
-    couch_query_servers:with_ddoc_proc(DDoc, fun(QServer) ->
-        Acc = #lacc{
-            lname = LName,
-            req = Req,
-            qserver = QServer,
-            db = Db
-        },
-        case ViewName of
-            <<"_all_docs">> ->
-                fabric:all_docs(Db, Options, CB, Acc, QueryArgs);
-            _ ->
-                fabric:query_view(Db, Options, VDoc, ViewName,
-                    CB, Acc, QueryArgs)
-        end
-    end).
-
-
-list_cb({row, Row} = Msg, Acc) ->
-    case lists:keymember(doc, 1, Row) of
-        true -> chttpd_stats:incr_reads();
-        false -> ok
-    end,
-    chttpd_stats:incr_rows(),
-    couch_mrview_show:list_cb(Msg, Acc);
-
-list_cb(Msg, Acc) ->
-    couch_mrview_show:list_cb(Msg, Acc).
-
-
 % Maybe this is in the proplists API
 % todo move to couch_util
 json_apply_field(H, {L}) ->
@@ -248,17 +100,6 @@ json_apply_field({Key, NewValue}, [], Acc) ->
     % end of list, add ours
     {[{Key, NewValue}|Acc]}.
 
-apply_etag(JsonResp, undefined) ->
-    JsonResp;
-apply_etag({ExternalResponse}, CurrentEtag) ->
-    % Here we embark on the delicate task of replacing or creating the
-    % headers on the JsonResponse object. We need to control the Etag and
-    % Vary headers. If the external function controls the Etag, we'd have to
-    % run it to check for a match, which sort of defeats the purpose.
-    apply_headers(ExternalResponse, [
-        {<<"ETag">>, CurrentEtag},
-        {<<"Vary">>, <<"Accept">>}
-    ]).
 
 apply_headers(JsonResp, []) ->
     JsonResp;
diff --git a/src/chttpd/src/chttpd_util.erl b/src/chttpd/src/chttpd_util.erl
new file mode 100644
index 0000000..fcaa09d
--- /dev/null
+++ b/src/chttpd/src/chttpd_util.erl
@@ -0,0 +1,41 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_util).
+
+
+-export([
+    parse_copy_destination_header/1
+]).
+
+
+parse_copy_destination_header(Req) ->
+    case couch_httpd:header_value(Req, "Destination") of
+    undefined ->
+        throw({bad_request, "Destination header is mandatory for COPY."});
+    Destination ->
+        case re:run(Destination, "^https?://", [{capture, none}]) of
+        match ->
+            throw({bad_request, "Destination URL must be relative."});
+        nomatch ->
+            % see if ?rev=revid got appended to the Destination header
+            case re:run(Destination, "\\?", [{capture, none}]) of
+            nomatch ->
+                {list_to_binary(Destination), {0, []}};
+            match ->
+                [DocId, RevQs] = re:split(Destination, "\\?", [{return, list}]),
+                [_RevQueryKey, Rev] = re:split(RevQs, "=", [{return, list}]),
+                {Pos, RevId} = couch_doc:parse_rev(Rev),
+                {list_to_binary(DocId), {Pos, [RevId]}}
+            end
+        end
+    end.
diff --git a/src/chttpd/src/chttpd_view.erl b/src/chttpd/src/chttpd_view.erl
index 8d40101..e0001da 100644
--- a/src/chttpd/src/chttpd_view.erl
+++ b/src/chttpd/src/chttpd_view.erl
@@ -12,7 +12,7 @@
 
 -module(chttpd_view).
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 
 -export([
     handle_view_req/3,
@@ -35,10 +35,10 @@ multi_query_view(Req, Db, DDoc, ViewName, Queries) ->
 
 
 stream_multi_query_view(Req, Db, DDoc, ViewName, Args0, Queries) ->
-    {ok, #mrst{views=Views}} = couch_mrview_util:ddoc_to_mrst(Db, DDoc),
-    Args1 = couch_mrview_util:set_view_type(Args0, ViewName, Views),
+    {ok, #mrst{views=Views}} = couch_views_util:ddoc_to_mrst(Db, DDoc),
+    Args1 = couch_views_util:set_view_type(Args0, ViewName, Views),
     ArgQueries = parse_queries(Req, Args1, Queries, fun(QueryArg) ->
-        couch_mrview_util:set_view_type(QueryArg, ViewName, Views)
+        couch_views_util:set_view_type(QueryArg, ViewName, Views)
     end),
     VAcc0 = #vacc{db=Db, req=Req, prepend="\r\n"},
     FirstChunk = "{\"results\":[",
@@ -54,9 +54,9 @@ stream_multi_query_view(Req, Db, DDoc, ViewName, Args0, Queries) ->
 
 
 paginate_multi_query_view(Req, Db, DDoc, ViewName, Args0, Queries) ->
-    {ok, #mrst{views=Views}} = couch_mrview_util:ddoc_to_mrst(Db, DDoc),
+    {ok, #mrst{views=Views}} = couch_views_util:ddoc_to_mrst(Db, DDoc),
     ArgQueries = parse_queries(Req, Args0, Queries, fun(QueryArg) ->
-        couch_mrview_util:set_view_type(QueryArg, ViewName, Views)
+        couch_views_util:set_view_type(QueryArg, ViewName, Views)
     end),
     KeyFun = fun({Props}) ->
         {couch_util:get_value(id, Props), couch_util:get_value(key, Props)}
@@ -76,7 +76,7 @@ paginate_multi_query_view(Req, Db, DDoc, ViewName, Args0, Queries) ->
 
 
 design_doc_post_view(Req, Props, Db, DDoc, ViewName, Keys) ->
-    Args = couch_mrview_http:parse_body_and_query(Req, Props, Keys),
+    Args = couch_views_http_util:parse_body_and_query(Req, Props, Keys),
     fabric_query_view(Db, Req, DDoc, ViewName, Args).
 
 design_doc_view(Req, Db, DDoc, ViewName, Keys) ->
@@ -134,7 +134,7 @@ handle_view_req(#httpd{method='POST',
     path_parts=[_, _, _, _, ViewName, <<"queries">>]}=Req, Db, DDoc) ->
     chttpd:validate_ctype(Req, "application/json"),
     Props = couch_httpd:json_body_obj(Req),
-    case couch_mrview_util:get_view_queries(Props) of
+    case couch_views_util:get_view_queries(Props) of
         undefined ->
             throw({bad_request,
                 <<"POST body must include `queries` parameter.">>});
@@ -156,8 +156,8 @@ handle_view_req(#httpd{method='POST',
         path_parts=[_, _, _, _, ViewName]}=Req, Db, DDoc) ->
     chttpd:validate_ctype(Req, "application/json"),
     Props = couch_httpd:json_body_obj(Req),
-    assert_no_queries_param(couch_mrview_util:get_view_queries(Props)),
-    Keys = couch_mrview_util:get_view_keys(Props),
+    assert_no_queries_param(couch_views_util:get_view_queries(Props)),
+    Keys = couch_views_util:get_view_keys(Props),
     couch_stats:increment_counter([couchdb, httpd, view_reads]),
     design_doc_post_view(Req, Props, Db, DDoc, ViewName, Keys);
 
@@ -299,7 +299,7 @@ t_check_user_can_override_individual_query_type() ->
 
 setup_all() ->
     Views = [#mrview{reduce_funs = [{<<"v">>, <<"_count">>}]}],
-    meck:expect(couch_mrview_util, ddoc_to_mrst, 2, {ok, #mrst{views = Views}}),
+    meck:expect(couch_views_util, ddoc_to_mrst, 2, {ok, #mrst{views = Views}}),
     meck:expect(chttpd, start_delayed_json_response, 4, {ok, resp}),
     meck:expect(couch_views, query, 6, {ok, #vacc{}}),
     meck:expect(chttpd, send_delayed_chunk, 2, {ok, resp}),
@@ -314,7 +314,7 @@ setup() ->
     meck:reset([
         chttpd,
         couch_views,
-        couch_mrview_util
+        couch_views_util
     ]).
 
 
diff --git a/src/couch/include/couch_db.hrl b/src/couch/include/couch_db.hrl
index cc1fb5d..2289089 100644
--- a/src/couch/include/couch_db.hrl
+++ b/src/couch/include/couch_db.hrl
@@ -13,18 +13,13 @@
 -define(LOCAL_DOC_PREFIX, "_local/").
 -define(DESIGN_DOC_PREFIX0, "_design").
 -define(DESIGN_DOC_PREFIX, "_design/").
--define(DEFAULT_COMPRESSION, snappy).
 
 -define(MIN_STR, <<"">>).
 -define(MAX_STR, <<255>>). % illegal utf string
 
--define(REWRITE_COUNT, couch_rewrite_count).
-
 -define(JSON_ENCODE(V), couch_util:json_encode(V)).
 -define(JSON_DECODE(V), couch_util:json_decode(V)).
 
--define(IS_OLD_RECORD(V, R), (tuple_size(V) /= tuple_size(R))).
-
 -define(b2l(V), binary_to_list(V)).
 -define(l2b(V), list_to_binary(V)).
 -define(i2b(V), couch_util:integer_to_boolean(V)).
@@ -39,7 +34,6 @@
 
 -define(SYSTEM_DATABASES, [
     <<"_dbs">>,
-    <<"_global_changes">>,
     <<"_metadata">>,
     <<"_nodes">>,
     <<"_replicator">>,
@@ -128,18 +122,6 @@
     handler
 }).
 
--record(view_fold_helper_funs, {
-    reduce_count,
-    passed_end,
-    start_response,
-    send_row
-}).
-
--record(reduce_fold_helper_funs, {
-    start_response,
-    send_row
-}).
-
 -record(extern_resp_args, {
     code = 200,
     stop = false,
@@ -149,13 +131,6 @@
     json = nil
 }).
 
--record(index_header, {
-    seq=0,
-    purge_seq=0,
-    id_btree_state=nil,
-    view_states=nil
-}).
-
 % small value used in revision trees to indicate the revision isn't stored
 -define(REV_MISSING, []).
 
@@ -176,16 +151,6 @@
     db_open_options = []
 }).
 
--record(btree, {
-    fd,
-    root,
-    extract_kv,
-    assemble_kv,
-    less,
-    reduce = nil,
-    compression = ?DEFAULT_COMPRESSION
-}).
-
 -record(proc, {
     pid,
     lang,
@@ -204,15 +169,6 @@
     atts = []
 }).
 
--record (fabric_changes_acc, {
-    db,
-    seq,
-    args,
-    options,
-    pending,
-    epochs
-}).
-
 -type doc() :: #doc{}.
 -type ddoc() :: #doc{}.
 -type user_ctx() :: #user_ctx{}.
diff --git a/src/couch/priv/couch_ejson_compare/couch_ejson_compare.c b/src/couch/priv/couch_ejson_compare/couch_ejson_compare.c
index 49d6cd8..08195ac 100644
--- a/src/couch/priv/couch_ejson_compare/couch_ejson_compare.c
+++ b/src/couch/priv/couch_ejson_compare/couch_ejson_compare.c
@@ -68,7 +68,7 @@ static __inline UCollator* get_collator();
 
 /* Should match the <<255,255,255,255>> in:
  *  - src/mango/src/mango_idx_view.hrl#L13
- *  - src/couch_mrview/src/couch_mrview_util.erl#L40 */
+ *  - src/couch_views/src/couch_views_validate.erl */
 static const unsigned char max_utf8_marker[]  = {255, 255, 255, 255};
 
 
diff --git a/src/couch/src/couch.app.src b/src/couch/src/couch.app.src
index 6116c79..171ee77 100644
--- a/src/couch/src/couch.app.src
+++ b/src/couch/src/couch.app.src
@@ -14,15 +14,12 @@
     {description, "Apache CouchDB"},
     {vsn, git},
     {registered, [
-        couch_db_update,
-        couch_db_update_notifier_sup,
         couch_httpd,
         couch_primary_services,
         couch_proc_manager,
         couch_secondary_services,
         couch_server,
-        couch_sup,
-        couch_task_status
+        couch_sup
     ]},
     {mod, {couch_app, []}},
     {applications, [
@@ -42,43 +39,7 @@
         couch_epi,
         b64url,
         couch_log,
-        couch_event,
-        ioq,
         couch_stats,
         hyper
-    ]},
-    {env, [
-        { httpd_global_handlers, [
-            {"/", "{couch_httpd_misc_handlers, handle_welcome_req, <<\"Welcome\">>}"},
-            {"favicon.ico", "{couch_httpd_misc_handlers, handle_favicon_req, \"{{prefix}}/share/www\"}"},
-            {"_utils", "{couch_httpd_misc_handlers, handle_utils_dir_req, \"{{prefix}}/share/www\"}"},
-            {"_all_dbs", "{couch_httpd_misc_handlers, handle_all_dbs_req}"},
-            {"_active_tasks", "{couch_httpd_misc_handlers, handle_task_status_req}"},
-            {"_config", "{couch_httpd_misc_handlers, handle_config_req}"},
-            {"_replicate", "{couch_replicator_httpd, handle_req}"},
-            {"_uuids", "{couch_httpd_misc_handlers, handle_uuids_req}"},
-            {"_stats", "{couch_stats_httpd, handle_stats_req}"},
-            {"_session", "{couch_httpd_auth, handle_session_req}"},
-            {"_plugins", "{couch_plugins_httpd, handle_req}"}
-        ]},
-          { httpd_db_handlers, [
-            {"_all_docs", "{couch_mrview_http, handle_all_docs_req}"},
-            {"_local_docs", "{couch_mrview_http, handle_local_docs_req}"},
-            {"_design_docs", "{couch_mrview_http, handle_design_docs_req}"},
-            {"_changes", "{couch_httpd_db, handle_db_changes_req}"},
-            {"_compact", "{couch_httpd_db, handle_compact_req}"},
-            {"_design", "{couch_httpd_db, handle_design_req}"},
-            {"_temp_view", "{couch_mrview_http, handle_temp_view_req}"},
-            {"_view_cleanup", "{couch_mrview_http, handle_cleanup_req}"}
-        ]},
-        { httpd_design_handlers, [
-            {"_compact", "{couch_mrview_http, handle_compact_req}"},
-            {"_info", "{couch_mrview_http, handle_info_req}"},
-            {"_list", "{couch_mrview_show, handle_view_list_req}"},
-            {"_rewrite", "{couch_httpd_rewrite, handle_rewrite_req}"},
-            {"_show", "{couch_mrview_show, handle_doc_show_req}"},
-            {"_update", "{couch_mrview_show, handle_doc_update_req}"},
-            {"_view", "{couch_mrview_http, handle_view_req}"}
-        ]}
     ]}
 ]}.
diff --git a/src/couch/src/couch_att.erl b/src/couch/src/couch_att.erl
index b4c95e9..9009b52 100644
--- a/src/couch/src/couch_att.erl
+++ b/src/couch/src/couch_att.erl
@@ -690,192 +690,3 @@ validate_attachment_size(AttName, AttSize, MaxAttSize)
     throw({request_entity_too_large, {attachment, AttName}});
 validate_attachment_size(_AttName, _AttSize, _MAxAttSize) ->
     ok.
-
-
-%% -ifdef(TEST).
-%% -include_lib("eunit/include/eunit.hrl").
-%%
-%% % Eww...
-%% -include("couch_bt_engine.hrl").
-%%
-%% %% Test utilities
-%%
-%%
-%% empty_att() -> new().
-%%
-%%
-%% upgraded_empty_att() ->
-%%     new([{headers, undefined}]).
-%%
-%%
-%% %% Test groups
-%%
-%%
-%% attachment_upgrade_test_() ->
-%%     {"Lazy record upgrade tests", [
-%%         {"Existing record fields don't upgrade",
-%%             {with, empty_att(), [fun test_non_upgrading_fields/1]}
-%%         },
-%%         {"New fields upgrade",
-%%             {with, empty_att(), [fun test_upgrading_fields/1]}
-%%         }
-%%     ]}.
-%%
-%%
-%% attachment_defaults_test_() ->
-%%     {"Attachment defaults tests", [
-%%         {"Records retain old default values", [
-%%             {with, empty_att(), [fun test_legacy_defaults/1]}
-%%         ]},
-%%         {"Upgraded records inherit defaults", [
-%%             {with, upgraded_empty_att(), [fun test_legacy_defaults/1]}
-%%         ]},
-%%         {"Undefined entries are elided on upgrade", [
-%%             {with, upgraded_empty_att(), [fun test_elided_entries/1]}
-%%         ]}
-%%     ]}.
-%%
-%% attachment_field_api_test_() ->
-%%     {"Basic attachment field api", [
-%%         fun test_construction/0,
-%%         fun test_store_and_fetch/0,
-%%         fun test_transform/0
-%%     ]}.
-%%
-%%
-%% attachment_disk_term_test_() ->
-%%     BaseAttachment = new([
-%%         {name, <<"empty">>},
-%%         {type, <<"application/octet-stream">>},
-%%         {att_len, 0},
-%%         {disk_len, 0},
-%%         {md5, <<212,29,140,217,143,0,178,4,233,128,9,152,236,248,66,126>>},
-%%         {revpos, 4},
-%%         {data, {stream, {couch_bt_engine_stream, {fake_fd, fake_sp}}}},
-%%         {encoding, identity}
-%%     ]),
-%%     BaseDiskTerm = {
-%%         <<"empty">>,
-%%         <<"application/octet-stream">>,
-%%         fake_sp,
-%%         0, 0, 4,
-%%         <<212,29,140,217,143,0,178,4,233,128,9,152,236,248,66,126>>,
-%%         identity
-%%     },
-%%     Headers = [{<<"X-Foo">>, <<"bar">>}],
-%%     ExtendedAttachment = store(headers, Headers, BaseAttachment),
-%%     ExtendedDiskTerm = {BaseDiskTerm, [{headers, Headers}]},
-%%     FakeDb = test_util:fake_db([{engine, {couch_bt_engine, #st{fd=fake_fd}}}]),
-%%     {"Disk term tests", [
-%%         ?_assertEqual(BaseDiskTerm, to_disk_term(BaseAttachment)),
-%%         ?_assertEqual(BaseAttachment, from_disk_term(FakeDb, BaseDiskTerm)),
-%%         ?_assertEqual(ExtendedDiskTerm, to_disk_term(ExtendedAttachment)),
-%%         ?_assertEqual(ExtendedAttachment, from_disk_term(FakeDb, ExtendedDiskTerm))
-%%     ]}.
-%%
-%%
-%% attachment_json_term_test_() ->
-%%     Props = [
-%%         {<<"content_type">>, <<"application/json">>},
-%%         {<<"digest">>, <<"md5-QCNtWUNXV0UzJnEjMk92YUk1JA==">>},
-%%         {<<"length">>, 14},
-%%         {<<"revpos">>, 1}
-%%     ],
-%%     PropsInline = [{<<"data">>, <<"eyJhbnN3ZXIiOiA0Mn0=">>}] ++ Props,
-%%     InvalidProps = [{<<"data">>, <<"!Base64Encoded$">>}] ++ Props,
-%%     Att = couch_att:new([
-%%         {name, <<"attachment.json">>},
-%%         {type, <<"application/json">>}
-%%     ]),
-%%     ResultStub = couch_att:new([
-%%         {name, <<"attachment.json">>},
-%%         {type, <<"application/json">>},
-%%         {att_len, 14},
-%%         {disk_len, 14},
-%%         {md5, <<"@#mYCWWE3&q#2OvaI5$">>},
-%%         {revpos, 1},
-%%         {data, stub},
-%%         {encoding, identity}
-%%     ]),
-%%     ResultFollows = ResultStub#att{data = follows},
-%%     ResultInline = ResultStub#att{md5 = <<>>, data = <<"{\"answer\": 42}">>},
-%%     {"JSON term tests", [
-%%         ?_assertEqual(ResultStub, stub_from_json(Att, Props)),
-%%         ?_assertEqual(ResultFollows, follow_from_json(Att, Props)),
-%%         ?_assertEqual(ResultInline, inline_from_json(Att, PropsInline)),
-%%         ?_assertThrow({bad_request, _}, inline_from_json(Att, Props)),
-%%         ?_assertThrow({bad_request, _}, inline_from_json(Att, InvalidProps))
-%%     ]}.
-%%
-%%
-%% attachment_stub_merge_test_() ->
-%%     %% Stub merging needs to demonstrate revpos matching, skipping, and missing
-%%     %% attachment errors.
-%%     {"Attachment stub merging tests", []}.
-%%
-%%
-%% %% Test generators
-%%
-%%
-%% test_non_upgrading_fields(Attachment) ->
-%%     Pairs = [
-%%         {name, "cat.gif"},
-%%         {type, "text/very-very-plain"},
-%%         {att_len, 1024},
-%%         {disk_len, 42},
-%%         {md5, <<"md5-hashhashhash">>},
-%%         {revpos, 4},
-%%         {data, stub},
-%%         {encoding, gzip}
-%%     ],
-%%     lists:foreach(
-%%         fun({Field, Value}) ->
-%%             ?assertMatch(#att{}, Attachment),
-%%             Updated = store(Field, Value, Attachment),
-%%             ?assertMatch(#att{}, Updated)
-%%         end,
-%%     Pairs).
-%%
-%%
-%% test_upgrading_fields(Attachment) ->
-%%     ?assertMatch(#att{}, Attachment),
-%%     UpdatedHeaders = store(headers, [{<<"Ans">>, <<"42">>}], Attachment),
-%%     ?assertMatch(X when is_list(X), UpdatedHeaders),
-%%     UpdatedHeadersUndefined = store(headers, undefined, Attachment),
-%%     ?assertMatch(X when is_list(X), UpdatedHeadersUndefined).
-%%
-%%
-%% test_legacy_defaults(Attachment) ->
-%%     ?assertEqual(<<>>, fetch(md5, Attachment)),
-%%     ?assertEqual(0, fetch(revpos, Attachment)),
-%%     ?assertEqual(identity, fetch(encoding, Attachment)).
-%%
-%%
-%% test_elided_entries(Attachment) ->
-%%     ?assertNot(lists:keymember(name, 1, Attachment)),
-%%     ?assertNot(lists:keymember(type, 1, Attachment)),
-%%     ?assertNot(lists:keymember(att_len, 1, Attachment)),
-%%     ?assertNot(lists:keymember(disk_len, 1, Attachment)),
-%%     ?assertNot(lists:keymember(data, 1, Attachment)).
-%%
-%%
-%% test_construction() ->
-%%     ?assert(new() == new()),
-%%     Initialized = new([{name, <<"foo.bar">>}, {type, <<"application/qux">>}]),
-%%     ?assertEqual(<<"foo.bar">>, fetch(name, Initialized)),
-%%     ?assertEqual(<<"application/qux">>, fetch(type, Initialized)).
-%%
-%%
-%% test_store_and_fetch() ->
-%%     Attachment = empty_att(),
-%%     ?assertEqual(<<"abc">>, fetch(name, store(name, <<"abc">>, Attachment))),
-%%     ?assertEqual(42, fetch(ans, store(ans, 42, Attachment))).
-%%
-%%
-%% test_transform() ->
-%%     Attachment = new([{counter, 0}]),
-%%     Transformed = transform(counter, fun(Count) -> Count + 1 end, Attachment),
-%%     ?assertEqual(1, fetch(counter, Transformed)).
-%%
-%%
-%% -endif.
diff --git a/src/couch/src/couch_bt_engine.erl b/src/couch/src/couch_bt_engine.erl
deleted file mode 100644
index 48e751a..0000000
--- a/src/couch/src/couch_bt_engine.erl
+++ /dev/null
@@ -1,1246 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine).
--behavior(couch_db_engine).
-
--export([
-    exists/1,
-
-    delete/3,
-    delete_compaction_files/3,
-
-    init/2,
-    terminate/2,
-    handle_db_updater_call/2,
-    handle_db_updater_info/2,
-
-    incref/1,
-    decref/1,
-    monitored_by/1,
-
-    last_activity/1,
-
-    get_compacted_seq/1,
-    get_del_doc_count/1,
-    get_disk_version/1,
-    get_doc_count/1,
-    get_epochs/1,
-    get_purge_seq/1,
-    get_oldest_purge_seq/1,
-    get_purge_infos_limit/1,
-    get_revs_limit/1,
-    get_security/1,
-    get_props/1,
-    get_size_info/1,
-    get_partition_info/2,
-    get_update_seq/1,
-    get_uuid/1,
-
-    set_revs_limit/2,
-    set_purge_infos_limit/2,
-    set_security/2,
-    set_props/2,
-
-    set_update_seq/2,
-
-    open_docs/2,
-    open_local_docs/2,
-    read_doc_body/2,
-    load_purge_infos/2,
-
-    serialize_doc/2,
-    write_doc_body/2,
-    write_doc_infos/3,
-    purge_docs/3,
-    copy_purge_infos/2,
-
-    commit_data/1,
-
-    open_write_stream/2,
-    open_read_stream/2,
-    is_active_stream/2,
-
-    fold_docs/4,
-    fold_local_docs/4,
-    fold_changes/5,
-    fold_purge_infos/5,
-    count_changes_since/2,
-
-    start_compaction/4,
-    finish_compaction/4
-]).
-
-
--export([
-    init_state/4
-]).
-
-
--export([
-    id_tree_split/1,
-    id_tree_join/2,
-    id_tree_reduce/2,
-
-    seq_tree_split/1,
-    seq_tree_join/2,
-    seq_tree_reduce/2,
-
-    local_tree_split/1,
-    local_tree_join/2,
-
-    purge_tree_split/1,
-    purge_tree_join/2,
-    purge_tree_reduce/2,
-    purge_seq_tree_split/1,
-    purge_seq_tree_join/2
-]).
-
-
-% Used by the compactor
--export([
-    update_header/2,
-    copy_security/2,
-    copy_props/2
-]).
-
-
--include_lib("kernel/include/file.hrl").
--include_lib("couch/include/couch_db.hrl").
--include("couch_bt_engine.hrl").
-
-
-exists(FilePath) ->
-    case is_file(FilePath) of
-        true ->
-            true;
-        false ->
-            is_file(FilePath ++ ".compact")
-    end.
-
-
-delete(RootDir, FilePath, Async) ->
-    %% Delete any leftover compaction files. If we don't do this a
-    %% subsequent request for this DB will try to open them to use
-    %% as a recovery.
-    delete_compaction_files(RootDir, FilePath, [{context, compaction}]),
-
-    % Delete the actual database file
-    couch_file:delete(RootDir, FilePath, Async).
-
-
-delete_compaction_files(RootDir, FilePath, DelOpts) ->
-    lists:foreach(fun(Ext) ->
-        couch_file:delete(RootDir, FilePath ++ Ext, DelOpts)
-    end, [".compact", ".compact.data", ".compact.meta"]).
-
-
-init(FilePath, Options) ->
-    {ok, Fd} = open_db_file(FilePath, Options),
-    Header = case lists:member(create, Options) of
-        true ->
-            delete_compaction_files(FilePath),
-            Header0 = couch_bt_engine_header:new(),
-            Header1 = init_set_props(Fd, Header0, Options),
-            ok = couch_file:write_header(Fd, Header1),
-            Header1;
-        false ->
-            case couch_file:read_header(Fd) of
-                {ok, Header0} ->
-                    Header0;
-                no_valid_header ->
-                    delete_compaction_files(FilePath),
-                    Header0 =  couch_bt_engine_header:new(),
-                    ok = couch_file:write_header(Fd, Header0),
-                    Header0
-            end
-    end,
-    {ok, init_state(FilePath, Fd, Header, Options)}.
-
-
-terminate(_Reason, St) ->
-    % If the reason we died is because our fd disappeared
-    % then we don't need to try closing it again.
-    Ref = St#st.fd_monitor,
-    if Ref == closed -> ok; true ->
-        ok = couch_file:close(St#st.fd),
-        receive
-            {'DOWN', Ref, _,  _, _} ->
-                ok
-            after 500 ->
-                ok
-        end
-    end,
-    couch_util:shutdown_sync(St#st.fd),
-    ok.
-
-
-handle_db_updater_call(Msg, St) ->
-    {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
-
-
-handle_db_updater_info({'DOWN', Ref, _, _, _}, #st{fd_monitor=Ref} = St) ->
-    {stop, normal, St#st{fd=undefined, fd_monitor=closed}}.
-
-
-incref(St) ->
-    {ok, St#st{fd_monitor = erlang:monitor(process, St#st.fd)}}.
-
-
-decref(St) ->
-    true = erlang:demonitor(St#st.fd_monitor, [flush]),
-    ok.
-
-
-monitored_by(St) ->
-    case erlang:process_info(St#st.fd, monitored_by) of
-        {monitored_by, Pids} ->
-            lists:filter(fun is_pid/1, Pids);
-        _ ->
-            []
-    end.
-
-
-last_activity(#st{fd = Fd}) ->
-    couch_file:last_read(Fd).
-
-
-get_compacted_seq(#st{header = Header}) ->
-    couch_bt_engine_header:get(Header, compacted_seq).
-
-
-get_del_doc_count(#st{} = St) ->
-    {ok, Reds} = couch_btree:full_reduce(St#st.id_tree),
-    element(2, Reds).
-
-
-get_disk_version(#st{header = Header}) ->
-    couch_bt_engine_header:get(Header, disk_version).
-
-
-get_doc_count(#st{} = St) ->
-    {ok, Reds} = couch_btree:full_reduce(St#st.id_tree),
-    element(1, Reds).
-
-
-get_epochs(#st{header = Header}) ->
-    couch_bt_engine_header:get(Header, epochs).
-
-
-get_purge_seq(#st{purge_seq_tree = PurgeSeqTree}) ->
-    Fun = fun({PurgeSeq, _, _, _}, _Reds, _Acc) ->
-        {stop, PurgeSeq}
-    end,
-    {ok, _, PurgeSeq} = couch_btree:fold(PurgeSeqTree, Fun, 0, [{dir, rev}]),
-    PurgeSeq.
-
-
-get_oldest_purge_seq(#st{purge_seq_tree = PurgeSeqTree}) ->
-    Fun = fun({PurgeSeq, _, _, _}, _Reds, _Acc) ->
-        {stop, PurgeSeq}
-    end,
-    {ok, _, PurgeSeq} = couch_btree:fold(PurgeSeqTree, Fun, 0, []),
-    PurgeSeq.
-
-
-get_purge_infos_limit(#st{header = Header}) ->
-    couch_bt_engine_header:get(Header, purge_infos_limit).
-
-
-get_revs_limit(#st{header = Header}) ->
-    couch_bt_engine_header:get(Header, revs_limit).
-
-
-get_size_info(#st{} = St) ->
-    {ok, FileSize} = couch_file:bytes(St#st.fd),
-    {ok, DbReduction} = couch_btree:full_reduce(St#st.id_tree),
-    SizeInfo0 = element(3, DbReduction),
-    SizeInfo = case SizeInfo0 of
-        SI when is_record(SI, size_info) ->
-            SI;
-        {AS, ES} ->
-            #size_info{active=AS, external=ES};
-        AS ->
-            #size_info{active=AS}
-    end,
-    ActiveSize = active_size(St, SizeInfo),
-    ExternalSize = SizeInfo#size_info.external,
-    [
-        {active, ActiveSize},
-        {external, ExternalSize},
-        {file, FileSize}
-    ].
-
-
-partition_size_cb(traverse, Key, {DC, DDC, Sizes}, {Partition, DCAcc, DDCAcc, SizesAcc}) ->
-    case couch_partition:is_member(Key, Partition) of
-        true ->
-            {skip, {Partition, DC + DCAcc, DDC + DDCAcc, reduce_sizes(Sizes, SizesAcc)}};
-        false ->
-            {ok, {Partition, DCAcc, DDCAcc, SizesAcc}}
-    end;
-
-partition_size_cb(visit, FDI, _PrevReds, {Partition, DCAcc, DDCAcc, Acc}) ->
-    InPartition = couch_partition:is_member(FDI#full_doc_info.id, Partition),
-    Deleted = FDI#full_doc_info.deleted,
-    case {InPartition, Deleted} of
-        {true, true} ->
-            {ok, {Partition, DCAcc, DDCAcc + 1,
-                reduce_sizes(FDI#full_doc_info.sizes, Acc)}};
-        {true, false} ->
-            {ok, {Partition, DCAcc + 1, DDCAcc,
-                reduce_sizes(FDI#full_doc_info.sizes, Acc)}};
-        {false, _} ->
-            {ok, {Partition, DCAcc, DDCAcc, Acc}}
-    end.
-
-
-get_partition_info(#st{} = St, Partition) ->
-    StartKey = couch_partition:start_key(Partition),
-    EndKey = couch_partition:end_key(Partition),
-    Fun = fun partition_size_cb/4,
-    InitAcc = {Partition, 0, 0, #size_info{}},
-    Options = [{start_key, StartKey}, {end_key, EndKey}],
-    {ok, _, OutAcc} = couch_btree:fold(St#st.id_tree, Fun, InitAcc, Options),
-    {Partition, DocCount, DocDelCount, SizeInfo} = OutAcc,
-    [
-        {partition, Partition},
-        {doc_count, DocCount},
-        {doc_del_count, DocDelCount},
-        {sizes, [
-            {active, SizeInfo#size_info.active},
-            {external, SizeInfo#size_info.external}
-        ]}
-    ].
-
-
-get_security(#st{header = Header} = St) ->
-    case couch_bt_engine_header:get(Header, security_ptr) of
-        undefined ->
-            [];
-        Pointer ->
-            {ok, SecProps} = couch_file:pread_term(St#st.fd, Pointer),
-            SecProps
-    end.
-
-
-get_props(#st{header = Header} = St) ->
-    case couch_bt_engine_header:get(Header, props_ptr) of
-        undefined ->
-            [];
-        Pointer ->
-            {ok, Props} = couch_file:pread_term(St#st.fd, Pointer),
-            Props
-    end.
-
-
-get_update_seq(#st{header = Header}) ->
-    couch_bt_engine_header:get(Header, update_seq).
-
-
-get_uuid(#st{header = Header}) ->
-    couch_bt_engine_header:get(Header, uuid).
-
-
-set_revs_limit(#st{header = Header} = St, RevsLimit) ->
-    NewSt = St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {revs_limit, RevsLimit}
-        ]),
-        needs_commit = true
-    },
-    {ok, increment_update_seq(NewSt)}.
-
-
-set_purge_infos_limit(#st{header = Header} = St, PurgeInfosLimit) ->
-    NewSt = St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {purge_infos_limit, PurgeInfosLimit}
-        ]),
-        needs_commit = true
-    },
-    {ok, increment_update_seq(NewSt)}.
-
-
-set_security(#st{header = Header} = St, NewSecurity) ->
-    Options = [{compression, St#st.compression}],
-    {ok, Ptr, _} = couch_file:append_term(St#st.fd, NewSecurity, Options),
-    NewSt = St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {security_ptr, Ptr}
-        ]),
-        needs_commit = true
-    },
-    {ok, increment_update_seq(NewSt)}.
-
-
-set_props(#st{header = Header} = St, Props) ->
-    Options = [{compression, St#st.compression}],
-    {ok, Ptr, _} = couch_file:append_term(St#st.fd, Props, Options),
-    NewSt = St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {props_ptr, Ptr}
-        ]),
-        needs_commit = true
-    },
-    {ok, increment_update_seq(NewSt)}.
-
-
-open_docs(#st{} = St, DocIds) ->
-    Results = couch_btree:lookup(St#st.id_tree, DocIds),
-    lists:map(fun
-        ({ok, FDI}) -> FDI;
-        (not_found) -> not_found
-    end, Results).
-
-
-open_local_docs(#st{} = St, DocIds) ->
-    Results = couch_btree:lookup(St#st.local_tree, DocIds),
-    lists:map(fun
-        ({ok, Doc}) -> Doc;
-        (not_found) -> not_found
-    end, Results).
-
-
-read_doc_body(#st{} = St, #doc{} = Doc) ->
-    {ok, {Body, Atts}} = couch_file:pread_term(St#st.fd, Doc#doc.body),
-    Doc#doc{
-        body = Body,
-        atts = Atts
-    }.
-
-
-load_purge_infos(St, UUIDs) ->
-    Results = couch_btree:lookup(St#st.purge_tree, UUIDs),
-    lists:map(fun
-        ({ok, Info}) -> Info;
-        (not_found) -> not_found
-    end, Results).
-
-
-serialize_doc(#st{} = St, #doc{} = Doc) ->
-    Compress = fun(Term) ->
-        case couch_compress:is_compressed(Term, St#st.compression) of
-            true -> Term;
-            false -> couch_compress:compress(Term, St#st.compression)
-        end
-    end,
-    Body = Compress(Doc#doc.body),
-    Atts = Compress(Doc#doc.atts),
-    SummaryBin = ?term_to_bin({Body, Atts}),
-    Md5 = couch_hash:md5_hash(SummaryBin),
-    Data = couch_file:assemble_file_chunk(SummaryBin, Md5),
-    % TODO: This is a terrible hack to get around the issues
-    %       in COUCHDB-3255. We'll need to come back and figure
-    %       out a better approach to handling the case when we
-    %       need to generate a new revision id after the doc
-    %       has been serialized.
-    Doc#doc{
-        body = Data,
-        meta = [{comp_body, Body} | Doc#doc.meta]
-    }.
-
-
-write_doc_body(St, #doc{} = Doc) ->
-    #st{
-        fd = Fd
-    } = St,
-    {ok, Ptr, Written} = couch_file:append_raw_chunk(Fd, Doc#doc.body),
-    {ok, Doc#doc{body = Ptr}, Written}.
-
-
-write_doc_infos(#st{} = St, Pairs, LocalDocs) ->
-    #st{
-        id_tree = IdTree,
-        seq_tree = SeqTree,
-        local_tree = LocalTree
-    } = St,
-    FinalAcc = lists:foldl(fun({OldFDI, NewFDI}, Acc) ->
-        {AddAcc, RemIdsAcc, RemSeqsAcc} = Acc,
-        case {OldFDI, NewFDI} of
-            {not_found, #full_doc_info{}} ->
-                {[NewFDI | AddAcc], RemIdsAcc, RemSeqsAcc};
-            {#full_doc_info{id = Id}, #full_doc_info{id = Id}} ->
-                NewAddAcc = [NewFDI | AddAcc],
-                NewRemSeqsAcc = [OldFDI#full_doc_info.update_seq | RemSeqsAcc],
-                {NewAddAcc, RemIdsAcc, NewRemSeqsAcc};
-            {#full_doc_info{id = Id}, not_found} ->
-                NewRemIdsAcc = [Id | RemIdsAcc],
-                NewRemSeqsAcc = [OldFDI#full_doc_info.update_seq | RemSeqsAcc],
-                {AddAcc, NewRemIdsAcc, NewRemSeqsAcc}
-        end
-    end, {[], [], []}, Pairs),
-
-    {Add, RemIds, RemSeqs} = FinalAcc,
-    {ok, IdTree2} = couch_btree:add_remove(IdTree, Add, RemIds),
-    {ok, SeqTree2} = couch_btree:add_remove(SeqTree, Add, RemSeqs),
-
-    {AddLDocs, RemLDocIds} = lists:foldl(fun(Doc, {AddAcc, RemAcc}) ->
-        case Doc#doc.deleted of
-            true ->
-                {AddAcc, [Doc#doc.id | RemAcc]};
-            false ->
-                {[Doc | AddAcc], RemAcc}
-        end
-    end, {[], []}, LocalDocs),
-    {ok, LocalTree2} = couch_btree:add_remove(LocalTree, AddLDocs, RemLDocIds),
-
-    NewUpdateSeq = lists:foldl(fun(#full_doc_info{update_seq=Seq}, Acc) ->
-        erlang:max(Seq, Acc)
-    end, get_update_seq(St), Add),
-
-    NewHeader = couch_bt_engine_header:set(St#st.header, [
-        {update_seq, NewUpdateSeq}
-    ]),
-
-    {ok, St#st{
-        header = NewHeader,
-        id_tree = IdTree2,
-        seq_tree = SeqTree2,
-        local_tree = LocalTree2,
-        needs_commit = true
-    }}.
-
-
-purge_docs(#st{} = St, Pairs, PurgeInfos) ->
-    #st{
-        id_tree = IdTree,
-        seq_tree = SeqTree,
-        purge_tree = PurgeTree,
-        purge_seq_tree = PurgeSeqTree
-    } = St,
-
-    RemDocIds = [Old#full_doc_info.id || {Old, not_found} <- Pairs],
-    RemSeqs = [Old#full_doc_info.update_seq || {Old, _} <- Pairs],
-    DocsToAdd = [New || {_, New} <- Pairs, New /= not_found],
-    CurrSeq = couch_bt_engine_header:get(St#st.header, update_seq),
-    Seqs = [FDI#full_doc_info.update_seq || FDI <- DocsToAdd],
-    NewSeq = lists:max([CurrSeq | Seqs]),
-
-    % We bump NewUpdateSeq because we have to ensure that
-    % indexers see that they need to process the new purge
-    % information.
-    UpdateSeq = case NewSeq == CurrSeq of
-        true -> CurrSeq + 1;
-        false -> NewSeq
-    end,
-    Header = couch_bt_engine_header:set(St#st.header, [
-        {update_seq, UpdateSeq}
-    ]),
-
-    {ok, IdTree2} = couch_btree:add_remove(IdTree, DocsToAdd, RemDocIds),
-    {ok, SeqTree2} = couch_btree:add_remove(SeqTree, DocsToAdd, RemSeqs),
-    {ok, PurgeTree2} = couch_btree:add(PurgeTree, PurgeInfos),
-    {ok, PurgeSeqTree2} = couch_btree:add(PurgeSeqTree, PurgeInfos),
-    {ok, St#st{
-        header = Header,
-        id_tree = IdTree2,
-        seq_tree = SeqTree2,
-        purge_tree = PurgeTree2,
-        purge_seq_tree = PurgeSeqTree2,
-        needs_commit = true
-    }}.
-
-
-copy_purge_infos(#st{} = St, PurgeInfos) ->
-    #st{
-        purge_tree = PurgeTree,
-        purge_seq_tree = PurgeSeqTree
-    } = St,
-    {ok, PurgeTree2} = couch_btree:add(PurgeTree, PurgeInfos),
-    {ok, PurgeSeqTree2} = couch_btree:add(PurgeSeqTree, PurgeInfos),
-    {ok, St#st{
-       purge_tree = PurgeTree2,
-       purge_seq_tree = PurgeSeqTree2,
-       needs_commit = true
-    }}.
-
-
-commit_data(St) ->
-    #st{
-        fd = Fd,
-        header = OldHeader,
-        needs_commit = NeedsCommit
-    } = St,
-
-    NewHeader = update_header(St, OldHeader),
-
-    case NewHeader /= OldHeader orelse NeedsCommit of
-        true ->
-            couch_file:sync(Fd),
-            ok = couch_file:write_header(Fd, NewHeader),
-            couch_file:sync(Fd),
-            {ok, St#st{
-                header = NewHeader,
-                needs_commit = false
-            }};
-        false ->
-            {ok, St}
-    end.
-
-
-open_write_stream(#st{} = St, Options) ->
-    couch_stream:open({couch_bt_engine_stream, {St#st.fd, []}}, Options).
-
-
-open_read_stream(#st{} = St, StreamSt) ->
-    {ok, {couch_bt_engine_stream, {St#st.fd, StreamSt}}}.
-
-
-is_active_stream(#st{} = St, {couch_bt_engine_stream, {Fd, _}}) ->
-    St#st.fd == Fd;
-is_active_stream(_, _) ->
-    false.
-
-
-fold_docs(St, UserFun, UserAcc, Options) ->
-    fold_docs_int(St, St#st.id_tree, UserFun, UserAcc, Options).
-
-
-fold_local_docs(St, UserFun, UserAcc, Options) ->
-    case fold_docs_int(St, St#st.local_tree, UserFun, UserAcc, Options) of
-        {ok, _Reds, FinalAcc} -> {ok, null, FinalAcc};
-        {ok, FinalAcc} -> {ok, FinalAcc}
-    end.
-
-
-fold_changes(St, SinceSeq, UserFun, UserAcc, Options) ->
-    Fun = fun drop_reductions/4,
-    InAcc = {UserFun, UserAcc},
-    Opts = [{start_key, SinceSeq + 1}] ++ Options,
-    {ok, _, OutAcc} = couch_btree:fold(St#st.seq_tree, Fun, InAcc, Opts),
-    {_, FinalUserAcc} = OutAcc,
-    {ok, FinalUserAcc}.
-
-
-fold_purge_infos(St, StartSeq0, UserFun, UserAcc, Options) ->
-    PurgeSeqTree = St#st.purge_seq_tree,
-    StartSeq = StartSeq0 + 1,
-    MinSeq = get_oldest_purge_seq(St),
-    if MinSeq =< StartSeq -> ok; true ->
-        erlang:error({invalid_start_purge_seq, StartSeq0})
-    end,
-    Wrapper = fun(Info, _Reds, UAcc) ->
-        UserFun(Info, UAcc)
-    end,
-    Opts = [{start_key, StartSeq}] ++ Options,
-    {ok, _, OutAcc} = couch_btree:fold(PurgeSeqTree, Wrapper, UserAcc, Opts),
-    {ok, OutAcc}.
-
-
-count_changes_since(St, SinceSeq) ->
-    BTree = St#st.seq_tree,
-    FoldFun = fun(_SeqStart, PartialReds, 0) ->
-        {ok, couch_btree:final_reduce(BTree, PartialReds)}
-    end,
-    Opts = [{start_key, SinceSeq + 1}],
-    {ok, Changes} = couch_btree:fold_reduce(BTree, FoldFun, 0, Opts),
-    Changes.
-
-
-start_compaction(St, DbName, Options, Parent) ->
-    Args = [St, DbName, Options, Parent],
-    Pid = spawn_link(couch_bt_engine_compactor, start, Args),
-    {ok, St, Pid}.
-
-
-finish_compaction(OldState, DbName, Options, CompactFilePath) ->
-    {ok, NewState1} = ?MODULE:init(CompactFilePath, Options),
-    OldSeq = get_update_seq(OldState),
-    NewSeq = get_update_seq(NewState1),
-    case OldSeq == NewSeq of
-        true ->
-            finish_compaction_int(OldState, NewState1);
-        false ->
-            couch_log:info("Compaction file still behind main file "
-                           "(update seq=~p. compact update seq=~p). Retrying.",
-                           [OldSeq, NewSeq]),
-            ok = decref(NewState1),
-            start_compaction(OldState, DbName, Options, self())
-    end.
-
-
-id_tree_split(#full_doc_info{}=Info) ->
-    #full_doc_info{
-        id = Id,
-        update_seq = Seq,
-        deleted = Deleted,
-        sizes = SizeInfo,
-        rev_tree = Tree
-    } = Info,
-    {Id, {Seq, ?b2i(Deleted), split_sizes(SizeInfo), disk_tree(Tree)}}.
-
-
-id_tree_join(Id, {HighSeq, Deleted, DiskTree}) ->
-    % Handle old formats before data_size was added
-    id_tree_join(Id, {HighSeq, Deleted, #size_info{}, DiskTree});
-
-id_tree_join(Id, {HighSeq, Deleted, Sizes, DiskTree}) ->
-    #full_doc_info{
-        id = Id,
-        update_seq = HighSeq,
-        deleted = ?i2b(Deleted),
-        sizes = couch_db_updater:upgrade_sizes(Sizes),
-        rev_tree = rev_tree(DiskTree)
-    }.
-
-
-id_tree_reduce(reduce, FullDocInfos) ->
-    lists:foldl(fun(Info, {NotDeleted, Deleted, Sizes}) ->
-        Sizes2 = reduce_sizes(Sizes, Info#full_doc_info.sizes),
-        case Info#full_doc_info.deleted of
-        true ->
-            {NotDeleted, Deleted + 1, Sizes2};
-        false ->
-            {NotDeleted + 1, Deleted, Sizes2}
-        end
-    end, {0, 0, #size_info{}}, FullDocInfos);
-id_tree_reduce(rereduce, Reds) ->
-    lists:foldl(fun
-        ({NotDeleted, Deleted}, {AccNotDeleted, AccDeleted, _AccSizes}) ->
-            % pre 1.2 format, will be upgraded on compaction
-            {AccNotDeleted + NotDeleted, AccDeleted + Deleted, nil};
-        ({NotDeleted, Deleted, Sizes}, {AccNotDeleted, AccDeleted, AccSizes}) ->
-            AccSizes2 = reduce_sizes(AccSizes, Sizes),
-            {AccNotDeleted + NotDeleted, AccDeleted + Deleted, AccSizes2}
-    end, {0, 0, #size_info{}}, Reds).
-
-
-seq_tree_split(#full_doc_info{}=Info) ->
-    #full_doc_info{
-        id = Id,
-        update_seq = Seq,
-        deleted = Del,
-        sizes = SizeInfo,
-        rev_tree = Tree
-    } = Info,
-    {Seq, {Id, ?b2i(Del), split_sizes(SizeInfo), disk_tree(Tree)}}.
-
-
-seq_tree_join(Seq, {Id, Del, DiskTree}) when is_integer(Del) ->
-    seq_tree_join(Seq, {Id, Del, {0, 0}, DiskTree});
-
-seq_tree_join(Seq, {Id, Del, Sizes, DiskTree}) when is_integer(Del) ->
-    #full_doc_info{
-        id = Id,
-        update_seq = Seq,
-        deleted = ?i2b(Del),
-        sizes = join_sizes(Sizes),
-        rev_tree = rev_tree(DiskTree)
-    };
-
-seq_tree_join(KeySeq, {Id, RevInfos, DeletedRevInfos}) ->
-    % Older versions stored #doc_info records in the seq_tree.
-    % Compact to upgrade.
-    Revs = lists:map(fun({Rev, Seq, Bp}) ->
-        #rev_info{rev = Rev, seq = Seq, deleted = false, body_sp = Bp}
-    end, RevInfos),
-    DeletedRevs = lists:map(fun({Rev, Seq, Bp}) ->
-        #rev_info{rev = Rev, seq = Seq, deleted = true, body_sp = Bp}
-    end, DeletedRevInfos),
-    #doc_info{
-        id = Id,
-        high_seq = KeySeq,
-        revs = Revs ++ DeletedRevs
-    }.
-
-
-seq_tree_reduce(reduce, DocInfos) ->
-    % count the number of documents
-    length(DocInfos);
-seq_tree_reduce(rereduce, Reds) ->
-    lists:sum(Reds).
-
-
-local_tree_split(#doc{revs = {0, [Rev]}} = Doc) when is_binary(Rev) ->
-    #doc{
-        id = Id,
-        body = BodyData
-    } = Doc,
-    {Id, {binary_to_integer(Rev), BodyData}};
-
-local_tree_split(#doc{revs = {0, [Rev]}} = Doc) when is_integer(Rev) ->
-    #doc{
-        id = Id,
-        body = BodyData
-    } = Doc,
-    {Id, {Rev, BodyData}}.
-
-
-local_tree_join(Id, {Rev, BodyData}) when is_binary(Rev) ->
-    #doc{
-        id = Id,
-        revs = {0, [Rev]},
-        body = BodyData
-    };
-
-local_tree_join(Id, {Rev, BodyData}) when is_integer(Rev) ->
-    #doc{
-        id = Id,
-        revs = {0, [integer_to_binary(Rev)]},
-        body = BodyData
-    }.
-
-
-purge_tree_split({PurgeSeq, UUID, DocId, Revs}) ->
-    {UUID, {PurgeSeq, DocId, Revs}}.
-
-
-purge_tree_join(UUID, {PurgeSeq, DocId, Revs}) ->
-    {PurgeSeq, UUID, DocId, Revs}.
-
-
-purge_seq_tree_split({PurgeSeq, UUID, DocId, Revs}) ->
-    {PurgeSeq, {UUID, DocId, Revs}}.
-
-
-purge_seq_tree_join(PurgeSeq, {UUID, DocId, Revs}) ->
-    {PurgeSeq, UUID, DocId, Revs}.
-
-
-purge_tree_reduce(reduce, IdRevs) ->
-    % count the number of purge requests
-    length(IdRevs);
-purge_tree_reduce(rereduce, Reds) ->
-    lists:sum(Reds).
-
-
-set_update_seq(#st{header = Header} = St, UpdateSeq) ->
-    {ok, St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {update_seq, UpdateSeq}
-        ]),
-        needs_commit = true
-    }}.
-
-
-copy_security(#st{header = Header} = St, SecProps) ->
-    Options = [{compression, St#st.compression}],
-    {ok, Ptr, _} = couch_file:append_term(St#st.fd, SecProps, Options),
-    {ok, St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {security_ptr, Ptr}
-        ]),
-        needs_commit = true
-    }}.
-
-
-copy_props(#st{header = Header} = St, Props) ->
-    Options = [{compression, St#st.compression}],
-    {ok, Ptr, _} = couch_file:append_term(St#st.fd, Props, Options),
-    {ok, St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {props_ptr, Ptr}
-        ]),
-        needs_commit = true
-    }}.
-
-
-open_db_file(FilePath, Options) ->
-    case couch_file:open(FilePath, Options) of
-        {ok, Fd} ->
-            {ok, Fd};
-        {error, enoent} ->
-            % Couldn't find file. is there a compact version? This ca
-            % happen (rarely) if we crashed during the file switch.
-            case couch_file:open(FilePath ++ ".compact", [nologifmissing]) of
-                {ok, Fd} ->
-                    Fmt = "Recovering from compaction file: ~s~s",
-                    couch_log:info(Fmt, [FilePath, ".compact"]),
-                    ok = file:rename(FilePath ++ ".compact", FilePath),
-                    ok = couch_file:sync(Fd),
-                    {ok, Fd};
-                {error, enoent} ->
-                    throw({not_found, no_db_file})
-            end;
-        Error ->
-            throw(Error)
-    end.
-
-
-init_state(FilePath, Fd, Header0, Options) ->
-    ok = couch_file:sync(Fd),
-
-    Compression = couch_compress:get_compression_method(),
-
-    Header1 = couch_bt_engine_header:upgrade(Header0),
-    Header2 = set_default_security_object(Fd, Header1, Compression, Options),
-    Header = upgrade_purge_info(Fd, Header2),
-
-    IdTreeState = couch_bt_engine_header:id_tree_state(Header),
-    {ok, IdTree} = couch_btree:open(IdTreeState, Fd, [
-            {split, fun ?MODULE:id_tree_split/1},
-            {join, fun ?MODULE:id_tree_join/2},
-            {reduce, fun ?MODULE:id_tree_reduce/2},
-            {compression, Compression}
-        ]),
-
-    SeqTreeState = couch_bt_engine_header:seq_tree_state(Header),
-    {ok, SeqTree} = couch_btree:open(SeqTreeState, Fd, [
-            {split, fun ?MODULE:seq_tree_split/1},
-            {join, fun ?MODULE:seq_tree_join/2},
-            {reduce, fun ?MODULE:seq_tree_reduce/2},
-            {compression, Compression}
-        ]),
-
-    LocalTreeState = couch_bt_engine_header:local_tree_state(Header),
-    {ok, LocalTree} = couch_btree:open(LocalTreeState, Fd, [
-            {split, fun ?MODULE:local_tree_split/1},
-            {join, fun ?MODULE:local_tree_join/2},
-            {compression, Compression}
-        ]),
-
-    PurgeTreeState = couch_bt_engine_header:purge_tree_state(Header),
-    {ok, PurgeTree} = couch_btree:open(PurgeTreeState, Fd, [
-        {split, fun ?MODULE:purge_tree_split/1},
-        {join, fun ?MODULE:purge_tree_join/2},
-        {reduce, fun ?MODULE:purge_tree_reduce/2}
-    ]),
-
-    PurgeSeqTreeState = couch_bt_engine_header:purge_seq_tree_state(Header),
-    {ok, PurgeSeqTree} = couch_btree:open(PurgeSeqTreeState, Fd, [
-        {split, fun ?MODULE:purge_seq_tree_split/1},
-        {join, fun ?MODULE:purge_seq_tree_join/2},
-        {reduce, fun ?MODULE:purge_tree_reduce/2}
-    ]),
-
-    ok = couch_file:set_db_pid(Fd, self()),
-
-    St = #st{
-        filepath = FilePath,
-        fd = Fd,
-        fd_monitor = erlang:monitor(process, Fd),
-        header = Header,
-        needs_commit = false,
-        id_tree = IdTree,
-        seq_tree = SeqTree,
-        local_tree = LocalTree,
-        compression = Compression,
-        purge_tree = PurgeTree,
-        purge_seq_tree = PurgeSeqTree
-    },
-
-    % If this is a new database we've just created a
-    % new UUID and default security object which need
-    % to be written to disk.
-    case Header /= Header0 of
-        true ->
-            {ok, NewSt} = commit_data(St#st{needs_commit = true}),
-            NewSt;
-        false ->
-            St
-    end.
-
-
-update_header(St, Header) ->
-    couch_bt_engine_header:set(Header, [
-        {seq_tree_state, couch_btree:get_state(St#st.seq_tree)},
-        {id_tree_state, couch_btree:get_state(St#st.id_tree)},
-        {local_tree_state, couch_btree:get_state(St#st.local_tree)},
-        {purge_tree_state, couch_btree:get_state(St#st.purge_tree)},
-        {purge_seq_tree_state, couch_btree:get_state(St#st.purge_seq_tree)}
-    ]).
-
-
-increment_update_seq(#st{header = Header} = St) ->
-    UpdateSeq = couch_bt_engine_header:get(Header, update_seq),
-    St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {update_seq, UpdateSeq + 1}
-        ])
-    }.
-
-
-set_default_security_object(Fd, Header, Compression, Options) ->
-    case couch_bt_engine_header:get(Header, security_ptr) of
-        Pointer when is_integer(Pointer) ->
-            Header;
-        _ ->
-            Default = couch_util:get_value(default_security_object, Options),
-            AppendOpts = [{compression, Compression}],
-            {ok, Ptr, _} = couch_file:append_term(Fd, Default, AppendOpts),
-            couch_bt_engine_header:set(Header, security_ptr, Ptr)
-    end.
-
-
-% This function is here, and not in couch_bt_engine_header
-% because it requires modifying file contents
-upgrade_purge_info(Fd, Header) ->
-    case couch_bt_engine_header:get(Header, purge_tree_state) of
-        nil ->
-            Header;
-        Ptr when is_tuple(Ptr) ->
-            Header;
-        PurgeSeq when is_integer(PurgeSeq)->
-            % Pointer to old purged ids/revs is in purge_seq_tree_state
-            Ptr = couch_bt_engine_header:get(Header, purge_seq_tree_state),
-
-            case Ptr of
-                nil ->
-                    PTS = couch_bt_engine_header:purge_tree_state(Header),
-                    PurgeTreeSt = case PTS of 0 -> nil; Else -> Else end,
-                    couch_bt_engine_header:set(Header, [
-                        {purge_tree_state, PurgeTreeSt}
-                    ]);
-                _ ->
-                    {ok, PurgedIdsRevs} = couch_file:pread_term(Fd, Ptr),
-
-                    {Infos, _} = lists:foldl(fun({Id, Revs}, {InfoAcc, PSeq}) ->
-                        Info = {PSeq, couch_uuids:random(), Id, Revs},
-                        {[Info | InfoAcc], PSeq + 1}
-                    end, {[], PurgeSeq}, PurgedIdsRevs),
-
-                    {ok, PurgeTree} = couch_btree:open(nil, Fd, [
-                        {split, fun ?MODULE:purge_tree_split/1},
-                        {join, fun ?MODULE:purge_tree_join/2},
-                        {reduce, fun ?MODULE:purge_tree_reduce/2}
-                    ]),
-                    {ok, PurgeTree2} = couch_btree:add(PurgeTree, Infos),
-                    PurgeTreeSt = couch_btree:get_state(PurgeTree2),
-
-                    {ok, PurgeSeqTree} = couch_btree:open(nil, Fd, [
-                        {split, fun ?MODULE:purge_seq_tree_split/1},
-                        {join, fun ?MODULE:purge_seq_tree_join/2},
-                        {reduce, fun ?MODULE:purge_tree_reduce/2}
-                    ]),
-                    {ok, PurgeSeqTree2} = couch_btree:add(PurgeSeqTree, Infos),
-                    PurgeSeqTreeSt = couch_btree:get_state(PurgeSeqTree2),
-
-                    couch_bt_engine_header:set(Header, [
-                        {purge_tree_state, PurgeTreeSt},
-                        {purge_seq_tree_state, PurgeSeqTreeSt}
-                    ])
-            end
-    end.
-
-
-init_set_props(Fd, Header, Options) ->
-    case couch_util:get_value(props, Options) of
-        undefined ->
-            Header;
-        InitialProps ->
-            Compression = couch_compress:get_compression_method(),
-            AppendOpts = [{compression, Compression}],
-            {ok, Ptr, _} = couch_file:append_term(Fd, InitialProps, AppendOpts),
-            couch_bt_engine_header:set(Header, props_ptr, Ptr)
-    end.
-
-
-delete_compaction_files(FilePath) ->
-    RootDir = config:get("couchdb", "database_dir", "."),
-    DelOpts = [{context, compaction}],
-    delete_compaction_files(RootDir, FilePath, DelOpts).
-
-
-rev_tree(DiskTree) ->
-    couch_key_tree:map(fun
-        (_RevId, {Del, Ptr, Seq}) ->
-            #leaf{
-                deleted = ?i2b(Del),
-                ptr = Ptr,
-                seq = Seq
-            };
-        (_RevId, {Del, Ptr, Seq, Size}) ->
-            #leaf{
-                deleted = ?i2b(Del),
-                ptr = Ptr,
-                seq = Seq,
-                sizes = couch_db_updater:upgrade_sizes(Size)
-            };
-        (_RevId, {Del, Ptr, Seq, Sizes, Atts}) ->
-            #leaf{
-                deleted = ?i2b(Del),
-                ptr = Ptr,
-                seq = Seq,
-                sizes = couch_db_updater:upgrade_sizes(Sizes),
-                atts = Atts
-            };
-        (_RevId, ?REV_MISSING) ->
-            ?REV_MISSING
-    end, DiskTree).
-
-
-disk_tree(RevTree) ->
-    couch_key_tree:map(fun
-        (_RevId, ?REV_MISSING) ->
-            ?REV_MISSING;
-        (_RevId, #leaf{} = Leaf) ->
-            #leaf{
-                deleted = Del,
-                ptr = Ptr,
-                seq = Seq,
-                sizes = Sizes,
-                atts = Atts
-            } = Leaf,
-            {?b2i(Del), Ptr, Seq, split_sizes(Sizes), Atts}
-    end, RevTree).
-
-
-split_sizes(#size_info{}=SI) ->
-    {SI#size_info.active, SI#size_info.external}.
-
-
-join_sizes({Active, External}) when is_integer(Active), is_integer(External) ->
-    #size_info{active=Active, external=External}.
-
-
-reduce_sizes(nil, _) ->
-    nil;
-reduce_sizes(_, nil) ->
-    nil;
-reduce_sizes(#size_info{}=S1, #size_info{}=S2) ->
-    #size_info{
-        active = S1#size_info.active + S2#size_info.active,
-        external = S1#size_info.external + S2#size_info.external
-    };
-reduce_sizes(S1, S2) ->
-    US1 = couch_db_updater:upgrade_sizes(S1),
-    US2 = couch_db_updater:upgrade_sizes(S2),
-    reduce_sizes(US1, US2).
-
-
-active_size(#st{} = St, #size_info{} = SI) ->
-    Trees = [
-        St#st.id_tree,
-        St#st.seq_tree,
-        St#st.local_tree,
-        St#st.purge_tree,
-        St#st.purge_seq_tree
-    ],
-    lists:foldl(fun(T, Acc) ->
-        case couch_btree:size(T) of
-            _ when Acc == null ->
-                null;
-            nil ->
-                null;
-            Size ->
-                Acc + Size
-        end
-    end, SI#size_info.active, Trees).
-
-
-fold_docs_int(St, Tree, UserFun, UserAcc, Options) ->
-    Fun = case lists:member(include_deleted, Options) of
-        true -> fun include_deleted/4;
-        false -> fun skip_deleted/4
-    end,
-    RedFun = case lists:member(include_reductions, Options) of
-        true -> fun include_reductions/4;
-        false -> fun drop_reductions/4
-    end,
-    InAcc = {RedFun, {UserFun, UserAcc}},
-    {ok, Reds, OutAcc} = couch_btree:fold(Tree, Fun, InAcc, Options),
-    {_, {_, FinalUserAcc}} = OutAcc,
-    case lists:member(include_reductions, Options) of
-        true when Tree == St#st.id_tree ->
-            {ok, fold_docs_reduce_to_count(Reds), FinalUserAcc};
-        true when Tree == St#st.local_tree ->
-            {ok, 0, FinalUserAcc};
-        false ->
-            {ok, FinalUserAcc}
-    end.
-
-
-include_deleted(Case, Entry, Reds, {UserFun, UserAcc}) ->
-    {Go, NewUserAcc} = UserFun(Case, Entry, Reds, UserAcc),
-    {Go, {UserFun, NewUserAcc}}.
-
-
-% First element of the reductions is the total
-% number of undeleted documents.
-skip_deleted(traverse, _Entry, {0, _, _} = _Reds, Acc) ->
-    {skip, Acc};
-skip_deleted(visit, #full_doc_info{deleted = true}, _, Acc) ->
-    {ok, Acc};
-skip_deleted(Case, Entry, Reds, {UserFun, UserAcc}) ->
-    {Go, NewUserAcc} = UserFun(Case, Entry, Reds, UserAcc),
-    {Go, {UserFun, NewUserAcc}}.
-
-
-include_reductions(visit, FDI, Reds, {UserFun, UserAcc}) ->
-    {Go, NewUserAcc} = UserFun(FDI, Reds, UserAcc),
-    {Go, {UserFun, NewUserAcc}};
-include_reductions(_, _, _, Acc) ->
-    {ok, Acc}.
-
-
-drop_reductions(visit, FDI, _Reds, {UserFun, UserAcc}) ->
-    {Go, NewUserAcc} = UserFun(FDI, UserAcc),
-    {Go, {UserFun, NewUserAcc}};
-drop_reductions(_, _, _, Acc) ->
-    {ok, Acc}.
-
-
-fold_docs_reduce_to_count(Reds) ->
-    RedFun = fun id_tree_reduce/2,
-    FinalRed = couch_btree:final_reduce(RedFun, Reds),
-    element(1, FinalRed).
-
-
-finish_compaction_int(#st{} = OldSt, #st{} = NewSt1) ->
-    #st{
-        filepath = FilePath,
-        local_tree = OldLocal
-    } = OldSt,
-    #st{
-        filepath = CompactDataPath,
-        header = Header,
-        local_tree = NewLocal1
-    } = NewSt1,
-
-    % suck up all the local docs into memory and write them to the new db
-    LoadFun = fun(Value, _Offset, Acc) ->
-        {ok, [Value | Acc]}
-    end,
-    {ok, _, LocalDocs} = couch_btree:foldl(OldLocal, LoadFun, []),
-    {ok, NewLocal2} = couch_btree:add(NewLocal1, LocalDocs),
-
-    {ok, NewSt2} = commit_data(NewSt1#st{
-        header = couch_bt_engine_header:set(Header, [
-            {compacted_seq, get_update_seq(OldSt)},
-            {revs_limit, get_revs_limit(OldSt)},
-            {purge_infos_limit, get_purge_infos_limit(OldSt)}
-        ]),
-        local_tree = NewLocal2
-    }),
-
-    % Rename our *.compact.data file to *.compact so that if we
-    % die between deleting the old file and renaming *.compact
-    % we can recover correctly.
-    ok = file:rename(CompactDataPath, FilePath ++ ".compact"),
-
-    % Remove the uncompacted database file
-    RootDir = config:get("couchdb", "database_dir", "."),
-    couch_file:delete(RootDir, FilePath),
-
-    % Move our compacted file into its final location
-    ok = file:rename(FilePath ++ ".compact", FilePath),
-
-    % Delete the old meta compaction file after promoting
-    % the compaction file.
-    couch_file:delete(RootDir, FilePath ++ ".compact.meta"),
-
-    % We're finished with our old state
-    decref(OldSt),
-
-    % And return our finished new state
-    {ok, NewSt2#st{
-        filepath = FilePath
-    }, undefined}.
-
-
-is_file(Path) ->
-    case file:read_file_info(Path, [raw]) of
-        {ok, #file_info{type = regular}} -> true;
-        {ok, #file_info{type = directory}} -> true;
-        _ -> false
-    end.
diff --git a/src/couch/src/couch_bt_engine.hrl b/src/couch/src/couch_bt_engine.hrl
deleted file mode 100644
index e3c1d49..0000000
--- a/src/couch/src/couch_bt_engine.hrl
+++ /dev/null
@@ -1,27 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(st, {
-    filepath,
-    fd,
-    fd_monitor,
-    % deprecated but keeping it here to avoid altering the record size
-    fsync_options_deprecated,
-    header,
-    needs_commit,
-    id_tree,
-    seq_tree,
-    local_tree,
-    compression,
-    purge_tree,
-    purge_seq_tree
-}).
diff --git a/src/couch/src/couch_bt_engine_compactor.erl b/src/couch/src/couch_bt_engine_compactor.erl
deleted file mode 100644
index 0b3fb22..0000000
--- a/src/couch/src/couch_bt_engine_compactor.erl
+++ /dev/null
@@ -1,590 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_compactor).
-
-
--export([
-    start/4
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_bt_engine.hrl").
-
-
--record(comp_header, {
-    db_header,
-    meta_state
-}).
-
--record(merge_st, {
-    id_tree,
-    seq_tree,
-    curr,
-    rem_seqs,
-    infos
-}).
-
-
-start(#st{} = St, DbName, Options, Parent) ->
-    erlang:put(io_priority, {db_compact, DbName}),
-    #st{
-        filepath = FilePath,
-        header = Header
-    } = St,
-    couch_log:debug("Compaction process spawned for db \"~s\"", [DbName]),
-
-    couch_db_engine:trigger_on_compact(DbName),
-
-    {ok, NewSt, DName, DFd, MFd, Retry} =
-            open_compaction_files(Header, FilePath, Options),
-    erlang:monitor(process, MFd),
-
-    % This is a bit worrisome. init_db/4 will monitor the data fd
-    % but it doesn't know about the meta fd. For now I'll maintain
-    % that the data fd is the old normal fd and meta fd is special
-    % and hope everything works out for the best.
-    unlink(DFd),
-
-    NewSt1 = copy_purge_info(DbName, St, NewSt, Retry),
-    NewSt2 = copy_compact(DbName, St, NewSt1, Retry),
-    NewSt3 = sort_meta_data(NewSt2),
-    NewSt4 = commit_compaction_data(NewSt3),
-    NewSt5 = copy_meta_data(NewSt4),
-    {ok, NewSt6} = couch_bt_engine:commit_data(NewSt5),
-    ok = couch_bt_engine:decref(NewSt6),
-    ok = couch_file:close(MFd),
-
-    % Done
-    gen_server:cast(Parent, {compact_done, couch_bt_engine, DName}).
-
-
-open_compaction_files(SrcHdr, DbFilePath, Options) ->
-    DataFile = DbFilePath ++ ".compact.data",
-    MetaFile = DbFilePath ++ ".compact.meta",
-    {ok, DataFd, DataHdr} = open_compaction_file(DataFile),
-    {ok, MetaFd, MetaHdr} = open_compaction_file(MetaFile),
-    DataHdrIsDbHdr = couch_bt_engine_header:is_header(DataHdr),
-    case {DataHdr, MetaHdr} of
-        {#comp_header{}=A, #comp_header{}=A} ->
-            DbHeader = A#comp_header.db_header,
-            St0 = couch_bt_engine:init_state(
-                    DataFile, DataFd, DbHeader, Options),
-            St1 = bind_emsort(St0, MetaFd, A#comp_header.meta_state),
-            {ok, St1, DataFile, DataFd, MetaFd, St0#st.id_tree};
-        _ when DataHdrIsDbHdr ->
-            Header = couch_bt_engine_header:from(SrcHdr),
-            ok = reset_compaction_file(MetaFd, Header),
-            St0 = couch_bt_engine:init_state(
-                    DataFile, DataFd, DataHdr, Options),
-            St1 = bind_emsort(St0, MetaFd, nil),
-            {ok, St1, DataFile, DataFd, MetaFd, St0#st.id_tree};
-        _ ->
-            Header = couch_bt_engine_header:from(SrcHdr),
-            ok = reset_compaction_file(DataFd, Header),
-            ok = reset_compaction_file(MetaFd, Header),
-            St0 = couch_bt_engine:init_state(DataFile, DataFd, Header, Options),
-            St1 = bind_emsort(St0, MetaFd, nil),
-            {ok, St1, DataFile, DataFd, MetaFd, nil}
-    end.
-
-
-copy_purge_info(DbName, OldSt, NewSt, Retry) ->
-    MinPurgeSeq = couch_util:with_db(DbName, fun(Db) ->
-        couch_db:get_minimum_purge_seq(Db)
-    end),
-    OldPSTree = OldSt#st.purge_seq_tree,
-    StartSeq = couch_bt_engine:get_purge_seq(NewSt) + 1,
-    BufferSize = config:get_integer(
-            "database_compaction", "doc_buffer_size", 524288),
-    CheckpointAfter = config:get(
-            "database_compaction", "checkpoint_after", BufferSize * 10),
-
-    EnumFun = fun(Info, _Reds, {StAcc0, InfosAcc, InfosSize, CopiedSize}) ->
-        NewInfosSize = InfosSize + ?term_size(Info),
-        if NewInfosSize >= BufferSize ->
-            StAcc1 = copy_purge_infos(
-                    OldSt, StAcc0, [Info | InfosAcc], MinPurgeSeq, Retry),
-            NewCopiedSize = CopiedSize + NewInfosSize,
-            if NewCopiedSize >= CheckpointAfter ->
-                StAcc2 = commit_compaction_data(StAcc1),
-                {ok, {StAcc2, [], 0, 0}};
-            true ->
-                {ok, {StAcc1, [], 0, NewCopiedSize}}
-            end;
-        true ->
-            NewInfosAcc = [Info | InfosAcc],
-            {ok, {StAcc0, NewInfosAcc, NewInfosSize, CopiedSize}}
-        end
-    end,
-
-    InitAcc = {NewSt, [], 0, 0},
-    Opts = [{start_key, StartSeq}],
-    {ok, _, FinalAcc} = couch_btree:fold(OldPSTree, EnumFun, InitAcc, Opts),
-    {NewStAcc, Infos, _, _} = FinalAcc,
-    copy_purge_infos(OldSt, NewStAcc, Infos, MinPurgeSeq, Retry).
-
-
-copy_purge_infos(OldSt, NewSt0, Infos, MinPurgeSeq, Retry) ->
-    #st{
-        id_tree = OldIdTree
-    } = OldSt,
-
-    % Re-bind our id_tree to the backing btree
-    NewIdTreeState = couch_bt_engine_header:id_tree_state(NewSt0#st.header),
-    MetaFd = couch_emsort:get_fd(NewSt0#st.id_tree),
-    MetaState = couch_emsort:get_state(NewSt0#st.id_tree),
-    NewSt1 = bind_id_tree(NewSt0, NewSt0#st.fd, NewIdTreeState),
-
-    #st{
-        id_tree = NewIdTree0,
-        seq_tree = NewSeqTree0,
-        purge_tree = NewPurgeTree0,
-        purge_seq_tree = NewPurgeSeqTree0
-    } = NewSt1,
-
-    % Copy over the purge infos
-    InfosToAdd = lists:filter(fun({PSeq, _, _, _}) ->
-        PSeq > MinPurgeSeq
-    end, Infos),
-    {ok, NewPurgeTree1} = couch_btree:add(NewPurgeTree0, InfosToAdd),
-    {ok, NewPurgeSeqTree1} = couch_btree:add(NewPurgeSeqTree0, InfosToAdd),
-
-    NewSt2 = NewSt1#st{
-        purge_tree = NewPurgeTree1,
-        purge_seq_tree = NewPurgeSeqTree1
-    },
-
-    % If we're peforming a retry compaction we have to check if
-    % any of the referenced docs have been completely purged
-    % from the database. Any doc that has been completely purged
-    % must then be removed from our partially compacted database.
-    NewSt3 = if Retry == nil -> NewSt2; true ->
-        AllDocIds = [DocId || {_PurgeSeq, _UUID, DocId, _Revs} <- Infos],
-        UniqDocIds = lists:usort(AllDocIds),
-        OldIdResults = couch_btree:lookup(OldIdTree, UniqDocIds),
-        OldZipped = lists:zip(UniqDocIds, OldIdResults),
-
-        % The list of non-existant docs in the database being compacted
-        MaybeRemDocIds = [DocId || {DocId, not_found} <- OldZipped],
-
-        % Removing anything that exists in the partially compacted database
-        NewIdResults = couch_btree:lookup(NewIdTree0, MaybeRemDocIds),
-        ToRemove = [Doc || {ok, Doc} <- NewIdResults, Doc /= {ok, not_found}],
-
-        {RemIds, RemSeqs} = lists:unzip(lists:map(fun(FDI) ->
-            #full_doc_info{
-                id = Id,
-                update_seq = Seq
-            } = FDI,
-            {Id, Seq}
-        end, ToRemove)),
-
-        {ok, NewIdTree1} = couch_btree:add_remove(NewIdTree0, [], RemIds),
-        {ok, NewSeqTree1} = couch_btree:add_remove(NewSeqTree0, [], RemSeqs),
-
-        NewSt2#st{
-            id_tree = NewIdTree1,
-            seq_tree = NewSeqTree1
-        }
-    end,
-
-    Header = couch_bt_engine:update_header(NewSt3, NewSt3#st.header),
-    NewSt4 = NewSt3#st{
-        header = Header
-    },
-    bind_emsort(NewSt4, MetaFd, MetaState).
-
-
-copy_compact(DbName, St, NewSt0, Retry) ->
-    Compression = couch_compress:get_compression_method(),
-    NewSt = NewSt0#st{compression = Compression},
-    NewUpdateSeq = couch_bt_engine:get_update_seq(NewSt0),
-    TotalChanges = couch_bt_engine:count_changes_since(St, NewUpdateSeq),
-    BufferSize = list_to_integer(
-        config:get("database_compaction", "doc_buffer_size", "524288")),
-    CheckpointAfter = couch_util:to_integer(
-        config:get("database_compaction", "checkpoint_after",
-            BufferSize * 10)),
-
-    EnumBySeqFun =
-    fun(DocInfo, _Offset,
-            {AccNewSt, AccUncopied, AccUncopiedSize, AccCopiedSize}) ->
-
-        Seq = case DocInfo of
-            #full_doc_info{} -> DocInfo#full_doc_info.update_seq;
-            #doc_info{} -> DocInfo#doc_info.high_seq
-        end,
-
-        AccUncopiedSize2 = AccUncopiedSize + ?term_size(DocInfo),
-        if AccUncopiedSize2 >= BufferSize ->
-            NewSt2 = copy_docs(
-                St, AccNewSt, lists:reverse([DocInfo | AccUncopied]), Retry),
-            AccCopiedSize2 = AccCopiedSize + AccUncopiedSize2,
-            if AccCopiedSize2 >= CheckpointAfter ->
-                {ok, NewSt3} = couch_bt_engine:set_update_seq(NewSt2, Seq),
-                CommNewSt3 = commit_compaction_data(NewSt3),
-                {ok, {CommNewSt3, [], 0, 0}};
-            true ->
-                {ok, NewSt3} = couch_bt_engine:set_update_seq(NewSt2, Seq),
-                {ok, {NewSt3, [], 0, AccCopiedSize2}}
-            end;
-        true ->
-            {ok, {AccNewSt, [DocInfo | AccUncopied], AccUncopiedSize2,
-                AccCopiedSize}}
-        end
-    end,
-
-    TaskProps0 = [
-        {type, database_compaction},
-        {database, DbName},
-        {progress, 0},
-        {changes_done, 0},
-        {total_changes, TotalChanges}
-    ],
-    case (Retry =/= nil) and couch_task_status:is_task_added() of
-    true ->
-        couch_task_status:update([
-            {retry, true},
-            {progress, 0},
-            {changes_done, 0},
-            {total_changes, TotalChanges}
-        ]);
-    false ->
-        couch_task_status:add_task(TaskProps0),
-        couch_task_status:set_update_frequency(500)
-    end,
-
-    {ok, _, {NewSt2, Uncopied, _, _}} =
-        couch_btree:foldl(St#st.seq_tree, EnumBySeqFun,
-            {NewSt, [], 0, 0},
-            [{start_key, NewUpdateSeq + 1}]),
-
-    NewSt3 = copy_docs(St, NewSt2, lists:reverse(Uncopied), Retry),
-
-    % Copy the security information over
-    SecProps = couch_bt_engine:get_security(St),
-    {ok, NewSt4} = couch_bt_engine:copy_security(NewSt3, SecProps),
-
-    % Copy general properties over
-    Props = couch_bt_engine:get_props(St),
-    {ok, NewSt5} = couch_bt_engine:set_props(NewSt4, Props),
-
-    FinalUpdateSeq = couch_bt_engine:get_update_seq(St),
-    {ok, NewSt6} = couch_bt_engine:set_update_seq(NewSt5, FinalUpdateSeq),
-    commit_compaction_data(NewSt6).
-
-
-copy_docs(St, #st{} = NewSt, MixedInfos, Retry) ->
-    DocInfoIds = [Id || #doc_info{id=Id} <- MixedInfos],
-    LookupResults = couch_btree:lookup(St#st.id_tree, DocInfoIds),
-    % COUCHDB-968, make sure we prune duplicates during compaction
-    NewInfos0 = lists:usort(fun(#full_doc_info{id=A}, #full_doc_info{id=B}) ->
-        A =< B
-    end, merge_lookups(MixedInfos, LookupResults)),
-
-    NewInfos1 = lists:map(fun(Info) ->
-        {NewRevTree, FinalAcc} = couch_key_tree:mapfold(fun
-            ({RevPos, RevId}, #leaf{ptr=Sp}=Leaf, leaf, SizesAcc) ->
-                {Body, AttInfos} = copy_doc_attachments(St, Sp, NewSt),
-                #size_info{external = OldExternalSize} = Leaf#leaf.sizes,
-                ExternalSize = case OldExternalSize of
-                    0 when is_binary(Body) ->
-                        couch_compress:uncompressed_size(Body);
-                    0 ->
-                        couch_ejson_size:encoded_size(Body);
-                    N -> N
-                end,
-                Doc0 = #doc{
-                    id = Info#full_doc_info.id,
-                    revs = {RevPos, [RevId]},
-                    deleted = Leaf#leaf.deleted,
-                    body = Body,
-                    atts = AttInfos
-                },
-                Doc1 = couch_bt_engine:serialize_doc(NewSt, Doc0),
-                {ok, Doc2, ActiveSize} =
-                        couch_bt_engine:write_doc_body(NewSt, Doc1),
-                AttSizes = [{element(3,A), element(4,A)} || A <- AttInfos],
-                NewLeaf = Leaf#leaf{
-                    ptr = Doc2#doc.body,
-                    sizes = #size_info{
-                        active = ActiveSize,
-                        external = ExternalSize
-                    },
-                    atts = AttSizes
-                },
-                {NewLeaf, couch_db_updater:add_sizes(leaf, NewLeaf, SizesAcc)};
-            (_Rev, _Leaf, branch, SizesAcc) ->
-                {?REV_MISSING, SizesAcc}
-        end, {0, 0, []}, Info#full_doc_info.rev_tree),
-        {FinalAS, FinalES, FinalAtts} = FinalAcc,
-        TotalAttSize = lists:foldl(fun({_, S}, A) -> S + A end, 0, FinalAtts),
-        NewActiveSize = FinalAS + TotalAttSize,
-        NewExternalSize = FinalES + TotalAttSize,
-        Info#full_doc_info{
-            rev_tree = NewRevTree,
-            sizes = #size_info{
-                active = NewActiveSize,
-                external = NewExternalSize
-            }
-        }
-    end, NewInfos0),
-
-    Limit = couch_bt_engine:get_revs_limit(St),
-    NewInfos = lists:map(fun(FDI) ->
-        FDI#full_doc_info{
-            rev_tree = couch_key_tree:stem(FDI#full_doc_info.rev_tree, Limit)
-        }
-    end, NewInfos1),
-
-    RemoveSeqs =
-    case Retry of
-    nil ->
-        [];
-    OldDocIdTree ->
-        % Compaction is being rerun to catch up to writes during the
-        % first pass. This means we may have docs that already exist
-        % in the seq_tree in the .data file. Here we lookup any old
-        % update_seqs so that they can be removed.
-        Ids = [Id || #full_doc_info{id=Id} <- NewInfos],
-        Existing = couch_btree:lookup(OldDocIdTree, Ids),
-        [Seq || {ok, #full_doc_info{update_seq=Seq}} <- Existing]
-    end,
-
-    {ok, SeqTree} = couch_btree:add_remove(
-            NewSt#st.seq_tree, NewInfos, RemoveSeqs),
-
-    FDIKVs = lists:map(fun(#full_doc_info{id=Id, update_seq=Seq}=FDI) ->
-        {{Id, Seq}, FDI}
-    end, NewInfos),
-    {ok, IdEms} = couch_emsort:add(NewSt#st.id_tree, FDIKVs),
-    update_compact_task(length(NewInfos)),
-    NewSt#st{id_tree=IdEms, seq_tree=SeqTree}.
-
-
-copy_doc_attachments(#st{} = SrcSt, SrcSp, DstSt) ->
-    {ok, {BodyData, BinInfos0}} = couch_file:pread_term(SrcSt#st.fd, SrcSp),
-    BinInfos = case BinInfos0 of
-    _ when is_binary(BinInfos0) ->
-        couch_compress:decompress(BinInfos0);
-    _ when is_list(BinInfos0) ->
-        % pre 1.2 file format
-        BinInfos0
-    end,
-    % copy the bin values
-    NewBinInfos = lists:map(
-        fun({Name, Type, BinSp, AttLen, RevPos, ExpectedMd5}) ->
-            % 010 UPGRADE CODE
-            {ok, SrcStream} = couch_bt_engine:open_read_stream(SrcSt, BinSp),
-            {ok, DstStream} = couch_bt_engine:open_write_stream(DstSt, []),
-            ok = couch_stream:copy(SrcStream, DstStream),
-            {NewStream, AttLen, AttLen, ActualMd5, _IdentityMd5} =
-                couch_stream:close(DstStream),
-            {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
-            couch_util:check_md5(ExpectedMd5, ActualMd5),
-            {Name, Type, NewBinSp, AttLen, AttLen, RevPos, ExpectedMd5, identity};
-        ({Name, Type, BinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc1}) ->
-            {ok, SrcStream} = couch_bt_engine:open_read_stream(SrcSt, BinSp),
-            {ok, DstStream} = couch_bt_engine:open_write_stream(DstSt, []),
-            ok = couch_stream:copy(SrcStream, DstStream),
-            {NewStream, AttLen, _, ActualMd5, _IdentityMd5} =
-                couch_stream:close(DstStream),
-            {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
-            couch_util:check_md5(ExpectedMd5, ActualMd5),
-            Enc = case Enc1 of
-            true ->
-                % 0110 UPGRADE CODE
-                gzip;
-            false ->
-                % 0110 UPGRADE CODE
-                identity;
-            _ ->
-                Enc1
-            end,
-            {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc}
-        end, BinInfos),
-    {BodyData, NewBinInfos}.
-
-
-sort_meta_data(St0) ->
-    {ok, Ems} = couch_emsort:merge(St0#st.id_tree),
-    St0#st{id_tree=Ems}.
-
-
-copy_meta_data(#st{} = St) ->
-    #st{
-        fd = Fd,
-        header = Header,
-        id_tree = Src
-    } = St,
-    DstState = couch_bt_engine_header:id_tree_state(Header),
-    {ok, IdTree0} = couch_btree:open(DstState, Fd, [
-        {split, fun couch_bt_engine:id_tree_split/1},
-        {join, fun couch_bt_engine:id_tree_join/2},
-        {reduce, fun couch_bt_engine:id_tree_reduce/2}
-    ]),
-    {ok, Iter} = couch_emsort:iter(Src),
-    Acc0 = #merge_st{
-        id_tree=IdTree0,
-        seq_tree=St#st.seq_tree,
-        rem_seqs=[],
-        infos=[]
-    },
-    Acc = merge_docids(Iter, Acc0),
-    {ok, IdTree} = couch_btree:add(Acc#merge_st.id_tree, Acc#merge_st.infos),
-    {ok, SeqTree} = couch_btree:add_remove(
-        Acc#merge_st.seq_tree, [], Acc#merge_st.rem_seqs
-    ),
-    St#st{id_tree=IdTree, seq_tree=SeqTree}.
-
-
-open_compaction_file(FilePath) ->
-    case couch_file:open(FilePath, [nologifmissing]) of
-        {ok, Fd} ->
-            case couch_file:read_header(Fd) of
-                {ok, Header} -> {ok, Fd, Header};
-                no_valid_header -> {ok, Fd, nil}
-            end;
-        {error, enoent} ->
-            {ok, Fd} = couch_file:open(FilePath, [create]),
-            {ok, Fd, nil}
-    end.
-
-
-reset_compaction_file(Fd, Header) ->
-    ok = couch_file:truncate(Fd, 0),
-    ok = couch_file:write_header(Fd, Header).
-
-
-commit_compaction_data(#st{}=St) ->
-    % Compaction needs to write headers to both the data file
-    % and the meta file so if we need to restart we can pick
-    % back up from where we left off.
-    commit_compaction_data(St, couch_emsort:get_fd(St#st.id_tree)),
-    commit_compaction_data(St, St#st.fd).
-
-
-commit_compaction_data(#st{header = OldHeader} = St0, Fd) ->
-    DataState = couch_bt_engine_header:id_tree_state(OldHeader),
-    MetaFd = couch_emsort:get_fd(St0#st.id_tree),
-    MetaState = couch_emsort:get_state(St0#st.id_tree),
-    St1 = bind_id_tree(St0, St0#st.fd, DataState),
-    Header = couch_bt_engine:update_header(St1, St1#st.header),
-    CompHeader = #comp_header{
-        db_header = Header,
-        meta_state = MetaState
-    },
-    ok = couch_file:sync(Fd),
-    ok = couch_file:write_header(Fd, CompHeader),
-    St2 = St1#st{
-        header = Header
-    },
-    bind_emsort(St2, MetaFd, MetaState).
-
-
-bind_emsort(St, Fd, nil) ->
-    {ok, Ems} = couch_emsort:open(Fd),
-    St#st{id_tree=Ems};
-bind_emsort(St, Fd, State) ->
-    {ok, Ems} = couch_emsort:open(Fd, [{root, State}]),
-    St#st{id_tree=Ems}.
-
-
-bind_id_tree(St, Fd, State) ->
-    {ok, IdBtree} = couch_btree:open(State, Fd, [
-        {split, fun couch_bt_engine:id_tree_split/1},
-        {join, fun couch_bt_engine:id_tree_join/2},
-        {reduce, fun couch_bt_engine:id_tree_reduce/2}
-    ]),
-    St#st{id_tree=IdBtree}.
-
-
-merge_lookups(Infos, []) ->
-    Infos;
-merge_lookups([], _) ->
-    [];
-merge_lookups([#doc_info{}=DI | RestInfos], [{ok, FDI} | RestLookups]) ->
-    % Assert we've matched our lookups
-    if DI#doc_info.id == FDI#full_doc_info.id -> ok; true ->
-        erlang:error({mismatched_doc_infos, DI#doc_info.id})
-    end,
-    [FDI | merge_lookups(RestInfos, RestLookups)];
-merge_lookups([FDI | RestInfos], Lookups) ->
-    [FDI | merge_lookups(RestInfos, Lookups)].
-
-
-merge_docids(Iter, #merge_st{infos=Infos}=Acc) when length(Infos) > 1000 ->
-    #merge_st{
-        id_tree=IdTree0,
-        seq_tree=SeqTree0,
-        rem_seqs=RemSeqs
-    } = Acc,
-    {ok, IdTree1} = couch_btree:add(IdTree0, Infos),
-    {ok, SeqTree1} = couch_btree:add_remove(SeqTree0, [], RemSeqs),
-    Acc1 = Acc#merge_st{
-        id_tree=IdTree1,
-        seq_tree=SeqTree1,
-        rem_seqs=[],
-        infos=[]
-    },
-    merge_docids(Iter, Acc1);
-merge_docids(Iter, #merge_st{curr=Curr}=Acc) ->
-    case next_info(Iter, Curr, []) of
-        {NextIter, NewCurr, FDI, Seqs} ->
-            Acc1 = Acc#merge_st{
-                infos = [FDI | Acc#merge_st.infos],
-                rem_seqs = Seqs ++ Acc#merge_st.rem_seqs,
-                curr = NewCurr
-            },
-            merge_docids(NextIter, Acc1);
-        {finished, FDI, Seqs} ->
-            Acc#merge_st{
-                infos = [FDI | Acc#merge_st.infos],
-                rem_seqs = Seqs ++ Acc#merge_st.rem_seqs,
-                curr = undefined
-            };
-        empty ->
-            Acc
-    end.
-
-
-next_info(Iter, undefined, []) ->
-    case couch_emsort:next(Iter) of
-        {ok, {{Id, Seq}, FDI}, NextIter} ->
-            next_info(NextIter, {Id, Seq, FDI}, []);
-        finished ->
-            empty
-    end;
-next_info(Iter, {Id, Seq, FDI}, Seqs) ->
-    case couch_emsort:next(Iter) of
-        {ok, {{Id, NSeq}, NFDI}, NextIter} ->
-            next_info(NextIter, {Id, NSeq, NFDI}, [Seq | Seqs]);
-        {ok, {{NId, NSeq}, NFDI}, NextIter} ->
-            {NextIter, {NId, NSeq, NFDI}, FDI, Seqs};
-        finished ->
-            {finished, FDI, Seqs}
-    end.
-
-
-update_compact_task(NumChanges) ->
-    [Changes, Total] = couch_task_status:get([changes_done, total_changes]),
-    Changes2 = Changes + NumChanges,
-    Progress = case Total of
-    0 ->
-        0;
-    _ ->
-        (Changes2 * 100) div Total
-    end,
-    couch_task_status:update([{changes_done, Changes2}, {progress, Progress}]).
-
diff --git a/src/couch/src/couch_bt_engine_header.erl b/src/couch/src/couch_bt_engine_header.erl
deleted file mode 100644
index 3f9f518..0000000
--- a/src/couch/src/couch_bt_engine_header.erl
+++ /dev/null
@@ -1,451 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_header).
-
-
--export([
-    new/0,
-    from/1,
-    is_header/1,
-    upgrade/1,
-    get/2,
-    get/3,
-    set/2,
-    set/3
-]).
-
--export([
-    disk_version/1,
-    latest_disk_version/0,
-    update_seq/1,
-    id_tree_state/1,
-    seq_tree_state/1,
-    latest/1,
-    local_tree_state/1,
-    purge_tree_state/1,
-    purge_seq_tree_state/1,
-    purge_infos_limit/1,
-    security_ptr/1,
-    revs_limit/1,
-    uuid/1,
-    epochs/1,
-    compacted_seq/1
-]).
-
-
-% This should be updated anytime a header change happens that requires more
-% than filling in new defaults.
-%
-% As long the changes are limited to new header fields (with inline
-% defaults) added to the end of the record, then there is no need to increment
-% the disk revision number.
-%
-% if the disk revision is incremented, then new upgrade logic will need to be
-% added to couch_db_updater:init_db.
-
--define(LATEST_DISK_VERSION, 8).
-
--record(db_header, {
-    disk_version = ?LATEST_DISK_VERSION,
-    update_seq = 0,
-    unused = 0,
-    id_tree_state = nil,
-    seq_tree_state = nil,
-    local_tree_state = nil,
-    purge_tree_state = nil,
-    purge_seq_tree_state = nil, %purge tree: purge_seq -> uuid
-    security_ptr = nil,
-    revs_limit = 1000,
-    uuid,
-    epochs,
-    compacted_seq,
-    purge_infos_limit = 1000,
-    props_ptr
-}).
-
-
--define(PARTITION_DISK_VERSION, 8).
-
-
-new() ->
-    #db_header{
-        uuid = couch_uuids:random(),
-        epochs = [{node(), 0}]
-    }.
-
-
-from(Header0) ->
-    Header = upgrade(Header0),
-    #db_header{
-        uuid = Header#db_header.uuid,
-        epochs = Header#db_header.epochs,
-        compacted_seq = Header#db_header.compacted_seq
-    }.
-
-
-is_header(Header) ->
-    try
-        upgrade(Header),
-        true
-    catch _:_ ->
-        false
-    end.
-
-
-upgrade(Header) ->
-    Funs = [
-        fun upgrade_tuple/1,
-        fun upgrade_disk_version/1,
-        fun upgrade_uuid/1,
-        fun upgrade_epochs/1,
-        fun upgrade_compacted_seq/1
-    ],
-    lists:foldl(fun(F, HdrAcc) ->
-        F(HdrAcc)
-    end, Header, Funs).
-
-
-get(Header, Key) ->
-    ?MODULE:get(Header, Key, undefined).
-
-
-get(Header, Key, Default) ->
-    get_field(Header, Key, Default).
-
-
-set(Header, Key, Value) ->
-    ?MODULE:set(Header, [{Key, Value}]).
-
-
-set(Header0, Fields) ->
-    % A subtlety here is that if a database was open during
-    % the release upgrade that updates to uuids and epochs then
-    % this dynamic upgrade also assigns a uuid and epoch.
-    Header = upgrade(Header0),
-    lists:foldl(fun({Field, Value}, HdrAcc) ->
-        set_field(HdrAcc, Field, Value)
-    end, Header, Fields).
-
-
-disk_version(Header) ->
-    get_field(Header, disk_version).
-
-
-latest_disk_version() ->
-        ?LATEST_DISK_VERSION.
-
-
-update_seq(Header) ->
-    get_field(Header, update_seq).
-
-
-id_tree_state(Header) ->
-    get_field(Header, id_tree_state).
-
-
-seq_tree_state(Header) ->
-    get_field(Header, seq_tree_state).
-
-
-local_tree_state(Header) ->
-    get_field(Header, local_tree_state).
-
-
-purge_tree_state(Header) ->
-    get_field(Header, purge_tree_state).
-
-
-purge_seq_tree_state(Header) ->
-    get_field(Header, purge_seq_tree_state).
-
-
-security_ptr(Header) ->
-    get_field(Header, security_ptr).
-
-
-revs_limit(Header) ->
-    get_field(Header, revs_limit).
-
-
-uuid(Header) ->
-    get_field(Header, uuid).
-
-
-epochs(Header) ->
-    get_field(Header, epochs).
-
-
-compacted_seq(Header) ->
-    get_field(Header, compacted_seq).
-
-
-purge_infos_limit(Header) ->
-    get_field(Header, purge_infos_limit).
-
-
-get_field(Header, Field) ->
-    get_field(Header, Field, undefined).
-
-
-get_field(Header, Field, Default) ->
-    Idx = index(Field),
-    case Idx > tuple_size(Header) of
-        true -> Default;
-        false -> element(index(Field), Header)
-    end.
-
-
-set_field(Header, Field, Value) ->
-    setelement(index(Field), Header, Value).
-
-
-index(Field) ->
-    couch_util:get_value(Field, indexes()).
-
-
-indexes() ->
-    Fields = record_info(fields, db_header),
-    Indexes = lists:seq(2, record_info(size, db_header)),
-    lists:zip(Fields, Indexes).
-
-
-upgrade_tuple(Old) when is_record(Old, db_header) ->
-    Old;
-upgrade_tuple(Old) when is_tuple(Old) ->
-    NewSize = record_info(size, db_header),
-    if tuple_size(Old) < NewSize -> ok; true ->
-        erlang:error({invalid_header_size, Old})
-    end,
-    {_, New} = lists:foldl(fun(Val, {Idx, Hdr}) ->
-        {Idx+1, setelement(Idx, Hdr, Val)}
-    end, {1, #db_header{}}, tuple_to_list(Old)),
-    if is_record(New, db_header) -> ok; true ->
-        erlang:error({invalid_header_extension, {Old, New}})
-    end,
-    New.
-
--define(OLD_DISK_VERSION_ERROR,
-    "Database files from versions smaller than 0.10.0 are no longer supported").
-
-upgrade_disk_version(#db_header{}=Header) ->
-    case element(2, Header) of
-        1 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
-        2 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
-        3 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
-        4 -> Header#db_header{security_ptr = nil}; % [0.10 - 0.11)
-        5 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre 1.2
-        6 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre clustered purge
-        7 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre partitioned dbs
-        ?LATEST_DISK_VERSION -> Header;
-        _ ->
-            Reason = "Incorrect disk header version",
-            throw({database_disk_version_error, Reason})
-    end.
-
-
-upgrade_uuid(#db_header{}=Header) ->
-    case Header#db_header.uuid of
-        undefined ->
-            % Upgrading this old db file to a newer
-            % on disk format that includes a UUID.
-            Header#db_header{uuid=couch_uuids:random()};
-        _ ->
-            Header
-    end.
-
-
-upgrade_epochs(#db_header{}=Header) ->
-    NewEpochs = case Header#db_header.epochs of
-        undefined ->
-            % This node is taking over ownership of shard with
-            % and old version of couch file. Before epochs there
-            % was always an implicit assumption that a file was
-            % owned since eternity by the node it was on. This
-            % just codifies that assumption.
-            [{node(), 0}];
-        [{Node, _} | _] = Epochs0 when Node == node() ->
-            % Current node is the current owner of this db
-            Epochs0;
-        Epochs1 ->
-            % This node is taking over ownership of this db
-            % and marking the update sequence where it happened.
-            [{node(), Header#db_header.update_seq} | Epochs1]
-    end,
-    % Its possible for a node to open a db and claim
-    % ownership but never make a write to the db. This
-    % removes nodes that claimed ownership but never
-    % changed the database.
-    DedupedEpochs = remove_dup_epochs(NewEpochs),
-    Header#db_header{epochs=DedupedEpochs}.
-
-
-% This is slightly relying on the udpate_seq's being sorted
-% in epochs due to how we only ever push things onto the
-% front. Although if we ever had a case where the update_seq
-% is not monotonically increasing I don't know that we'd
-% want to remove dupes (by calling a sort on the input to this
-% function). So for now we don't sort but are relying on the
-% idea that epochs is always sorted.
-remove_dup_epochs([_]=Epochs) ->
-    Epochs;
-remove_dup_epochs([{N1, S}, {_N2, S}]) ->
-    % Seqs match, keep the most recent owner
-    [{N1, S}];
-remove_dup_epochs([_, _]=Epochs) ->
-    % Seqs don't match.
-    Epochs;
-remove_dup_epochs([{N1, S}, {_N2, S} | Rest]) ->
-    % Seqs match, keep the most recent owner
-    remove_dup_epochs([{N1, S} | Rest]);
-remove_dup_epochs([{N1, S1}, {N2, S2} | Rest]) ->
-    % Seqs don't match, recurse to check others
-    [{N1, S1} | remove_dup_epochs([{N2, S2} | Rest])].
-
-
-upgrade_compacted_seq(#db_header{}=Header) ->
-    case Header#db_header.compacted_seq of
-        undefined ->
-            Header#db_header{compacted_seq=0};
-        _ ->
-            Header
-    end.
-
-latest(?LATEST_DISK_VERSION) ->
-    true;
-latest(N) when is_integer(N), N < ?LATEST_DISK_VERSION ->
-    false;
-latest(_Else) ->
-    undefined.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-mk_header(Vsn) ->
-    {
-        db_header, % record name
-        Vsn, % disk version
-        100, % update_seq
-        0, % unused
-        foo, % id_tree_state
-        bar, % seq_tree_state
-        bam, % local_tree_state
-        flam, % was purge_seq - now purge_tree_state
-        baz, % was purged_docs - now purge_seq_tree_state
-        bang, % security_ptr
-        999 % revs_limit
-    }.
-
-
--ifdef(run_broken_tests).
-
-upgrade_v3_test() ->
-    Vsn3Header = mk_header(3),
-    NewHeader = upgrade_tuple(Vsn3Header),
-
-    % Tuple upgrades don't change
-    ?assert(is_record(NewHeader, db_header)),
-    ?assertEqual(3, disk_version(NewHeader)),
-    ?assertEqual(100, update_seq(NewHeader)),
-    ?assertEqual(foo, id_tree_state(NewHeader)),
-    ?assertEqual(bar, seq_tree_state(NewHeader)),
-    ?assertEqual(bam, local_tree_state(NewHeader)),
-    ?assertEqual(flam, purge_tree_state(NewHeader)),
-    ?assertEqual(baz, purge_seq_tree_state(NewHeader)),
-    ?assertEqual(bang, security_ptr(NewHeader)),
-    ?assertEqual(999, revs_limit(NewHeader)),
-    ?assertEqual(undefined, uuid(NewHeader)),
-    ?assertEqual(undefined, epochs(NewHeader)),
-
-    % Security ptr isn't changed until upgrade_disk_version/1
-    NewNewHeader = upgrade_disk_version(NewHeader),
-    ?assert(is_record(NewNewHeader, db_header)),
-    ?assertEqual(nil, security_ptr(NewNewHeader)),
-
-    % Assert upgrade works on really old headers
-    NewestHeader = upgrade(Vsn3Header),
-    ?assertMatch(<<_:32/binary>>, uuid(NewestHeader)),
-    ?assertEqual([{node(), 0}], epochs(NewestHeader)).
-
--endif.
-
-upgrade_v5_to_v8_test() ->
-    Vsn5Header = mk_header(5),
-    NewHeader = upgrade_disk_version(upgrade_tuple(Vsn5Header)),
-
-    ?assert(is_record(NewHeader, db_header)),
-    ?assertEqual(8, disk_version(NewHeader)),
-
-    % Security ptr isn't changed for v5 headers
-    ?assertEqual(bang, security_ptr(NewHeader)).
-
-
-upgrade_uuid_test() ->
-    Vsn5Header = mk_header(5),
-
-    % Upgraded headers get a new UUID
-    NewHeader = upgrade_uuid(upgrade_disk_version(upgrade_tuple(Vsn5Header))),
-    ?assertMatch(<<_:32/binary>>, uuid(NewHeader)),
-
-    % Headers with a UUID don't have their UUID changed
-    NewNewHeader = upgrade_uuid(upgrade_disk_version(upgrade_tuple(NewHeader))),
-    ?assertEqual(uuid(NewHeader), uuid(NewNewHeader)),
-
-    % Derived empty headers maintain the same UUID
-    ResetHeader = from(NewNewHeader),
-    ?assertEqual(uuid(NewHeader), uuid(ResetHeader)).
-
-
-upgrade_epochs_test() ->
-    Vsn5Header = mk_header(5),
-
-    % Upgraded headers get a default epochs set
-    NewHeader = upgrade(Vsn5Header),
-    ?assertEqual([{node(), 0}], epochs(NewHeader)),
-
-    % Fake an old entry in epochs
-    FakeFields = [
-        {update_seq, 20},
-        {epochs, [{'someothernode@someotherhost', 0}]}
-    ],
-    NotOwnedHeader = set(NewHeader, FakeFields),
-
-    OwnedEpochs = [
-        {node(), 20},
-        {'someothernode@someotherhost', 0}
-    ],
-
-    % Upgrading a header not owned by the local node updates
-    % the epochs appropriately.
-    NowOwnedHeader = upgrade(NotOwnedHeader),
-    ?assertEqual(OwnedEpochs, epochs(NowOwnedHeader)),
-
-    % Headers with epochs stay the same after upgrades
-    NewNewHeader = upgrade(NowOwnedHeader),
-    ?assertEqual(OwnedEpochs, epochs(NewNewHeader)),
-
-    % Getting a reset header maintains the epoch data
-    ResetHeader = from(NewNewHeader),
-    ?assertEqual(OwnedEpochs, epochs(ResetHeader)).
-
-
-get_uuid_from_old_header_test() ->
-    Vsn5Header = mk_header(5),
-    ?assertEqual(undefined, uuid(Vsn5Header)).
-
-
-get_epochs_from_old_header_test() ->
-    Vsn5Header = mk_header(5),
-    ?assertEqual(undefined, epochs(Vsn5Header)).
-
-
--endif.
diff --git a/src/couch/src/couch_bt_engine_stream.erl b/src/couch/src/couch_bt_engine_stream.erl
deleted file mode 100644
index 431894a..0000000
--- a/src/couch/src/couch_bt_engine_stream.erl
+++ /dev/null
@@ -1,70 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_stream).
-
--export([
-    foldl/3,
-    seek/2,
-    write/2,
-    finalize/1,
-    to_disk_term/1
-]).
-
-
-foldl({_Fd, []}, _Fun, Acc) ->
-    Acc;
-
-foldl({Fd, [{Pos, _} | Rest]}, Fun, Acc) ->
-    foldl({Fd, [Pos | Rest]}, Fun, Acc);
-
-foldl({Fd, [Bin | Rest]}, Fun, Acc) when is_binary(Bin) ->
-    % We're processing the first bit of data
-    % after we did a seek for a range fold.
-    foldl({Fd, Rest}, Fun, Fun(Bin, Acc));
-
-foldl({Fd, [Pos | Rest]}, Fun, Acc) when is_integer(Pos) ->
-    {ok, Bin} = couch_file:pread_binary(Fd, Pos),
-    foldl({Fd, Rest}, Fun, Fun(Bin, Acc)).
-
-
-seek({Fd, [{Pos, Length} | Rest]}, Offset) ->
-    case Length =< Offset of
-        true ->
-            seek({Fd, Rest}, Offset - Length);
-        false ->
-            seek({Fd, [Pos | Rest]}, Offset)
-    end;
-
-seek({Fd, [Pos | Rest]}, Offset) when is_integer(Pos) ->
-    {ok, Bin} = couch_file:pread_binary(Fd, Pos),
-    case iolist_size(Bin) =< Offset of
-        true ->
-            seek({Fd, Rest}, Offset - size(Bin));
-        false ->
-            <<_:Offset/binary, Tail/binary>> = Bin,
-            {ok, {Fd, [Tail | Rest]}}
-    end.
-
-
-write({Fd, Written}, Data) when is_pid(Fd) ->
-    {ok, Pos, _} = couch_file:append_binary(Fd, Data),
-    {ok, {Fd, [{Pos, iolist_size(Data)} | Written]}}.
-
-
-finalize({Fd, Written}) ->
-    {ok, {Fd, lists:reverse(Written)}}.
-
-
-to_disk_term({_Fd, Written}) ->
-    {ok, Written}.
-
diff --git a/src/couch/src/couch_btree.erl b/src/couch/src/couch_btree.erl
deleted file mode 100644
index ea0cf69..0000000
--- a/src/couch/src/couch_btree.erl
+++ /dev/null
@@ -1,855 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_btree).
-
--export([open/2, open/3, query_modify/4, add/2, add_remove/3]).
--export([fold/4, full_reduce/1, final_reduce/2, size/1, foldl/3, foldl/4]).
--export([fold_reduce/4, lookup/2, get_state/1, set_options/2]).
--export([extract/2, assemble/3, less/3]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(FILL_RATIO, 0.5).
-
-extract(#btree{extract_kv=undefined}, Value) ->
-    Value;
-extract(#btree{extract_kv=Extract}, Value) ->
-    Extract(Value).
-
-assemble(#btree{assemble_kv=undefined}, Key, Value) ->
-    {Key, Value};
-assemble(#btree{assemble_kv=Assemble}, Key, Value) ->
-    Assemble(Key, Value).
-
-less(#btree{less=undefined}, A, B) ->
-    A < B;
-less(#btree{less=Less}, A, B) ->
-    Less(A, B).
-
-% pass in 'nil' for State if a new Btree.
-open(State, Fd) ->
-    {ok, #btree{root=State, fd=Fd}}.
-
-set_options(Bt, []) ->
-    Bt;
-set_options(Bt, [{split, Extract}|Rest]) ->
-    set_options(Bt#btree{extract_kv=Extract}, Rest);
-set_options(Bt, [{join, Assemble}|Rest]) ->
-    set_options(Bt#btree{assemble_kv=Assemble}, Rest);
-set_options(Bt, [{less, Less}|Rest]) ->
-    set_options(Bt#btree{less=Less}, Rest);
-set_options(Bt, [{reduce, Reduce}|Rest]) ->
-    set_options(Bt#btree{reduce=Reduce}, Rest);
-set_options(Bt, [{compression, Comp}|Rest]) ->
-    set_options(Bt#btree{compression=Comp}, Rest).
-
-open(State, Fd, Options) ->
-    {ok, set_options(#btree{root=State, fd=Fd}, Options)}.
-
-get_state(#btree{root=Root}) ->
-    Root.
-
-final_reduce(#btree{reduce=Reduce}, Val) ->
-    final_reduce(Reduce, Val);
-final_reduce(Reduce, {[], []}) ->
-    Reduce(reduce, []);
-final_reduce(_Bt, {[], [Red]}) ->
-    Red;
-final_reduce(Reduce, {[], Reductions}) ->
-    Reduce(rereduce, Reductions);
-final_reduce(Reduce, {KVs, Reductions}) ->
-    Red = Reduce(reduce, KVs),
-    final_reduce(Reduce, {[], [Red | Reductions]}).
-
-fold_reduce(#btree{root=Root}=Bt, Fun, Acc, Options) ->
-    Dir = couch_util:get_value(dir, Options, fwd),
-    StartKey = couch_util:get_value(start_key, Options),
-    InEndRangeFun = make_key_in_end_range_function(Bt, Dir, Options),
-    KeyGroupFun = get_group_fun(Bt, Options),
-    try
-        {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
-            reduce_stream_node(Bt, Dir, Root, StartKey, InEndRangeFun, undefined, [], [],
-            KeyGroupFun, Fun, Acc),
-        if GroupedKey2 == undefined ->
-            {ok, Acc2};
-        true ->
-            case Fun(GroupedKey2, {GroupedKVsAcc2, GroupedRedsAcc2}, Acc2) of
-            {ok, Acc3} -> {ok, Acc3};
-            {stop, Acc3} -> {ok, Acc3}
-            end
-        end
-    catch
-        throw:{stop, AccDone} -> {ok, AccDone}
-    end.
-
-full_reduce(#btree{root=nil,reduce=Reduce}) ->
-    {ok, Reduce(reduce, [])};
-full_reduce(#btree{root=Root}) ->
-    {ok, element(2, Root)}.
-
-size(#btree{root = nil}) ->
-    0;
-size(#btree{root = {_P, _Red}}) ->
-    % pre 1.2 format
-    nil;
-size(#btree{root = {_P, _Red, Size}}) ->
-    Size.
-
-get_group_fun(Bt, Options) ->
-    case couch_util:get_value(key_group_level, Options) of
-        exact ->
-            make_group_fun(Bt, exact);
-        0 ->
-            fun(_, _) -> true end;
-        N when is_integer(N), N > 0 ->
-            make_group_fun(Bt, N);
-        undefined ->
-            couch_util:get_value(key_group_fun, Options, fun(_,_) -> true end)
-    end.
-
-make_group_fun(Bt, exact) ->
-    fun({Key1, _}, {Key2, _}) ->
-        case less(Bt, {Key1, nil}, {Key2, nil}) of
-            false ->
-                case less(Bt, {Key2, nil}, {Key1, nil}) of
-                    false ->
-                        true;
-                    _ ->
-                        false
-                end;
-            _ ->
-                false
-        end
-    end;
-make_group_fun(Bt, GroupLevel) when is_integer(GroupLevel), GroupLevel > 0 ->
-    fun
-        GF({{p, Partition, Key1}, Val1}, {{p, Partition, Key2}, Val2}) ->
-            GF({Key1, Val1}, {Key2, Val2});
-        GF({[_|_] = Key1, _}, {[_|_] = Key2, _}) ->
-            SL1 = lists:sublist(Key1, GroupLevel),
-            SL2 = lists:sublist(Key2, GroupLevel),
-            case less(Bt, {SL1, nil}, {SL2, nil}) of
-                false ->
-                    case less(Bt, {SL2, nil}, {SL1, nil}) of
-                        false ->
-                            true;
-                        _ ->
-                            false
-                    end;
-                _ ->
-                    false
-            end;
-        GF({Key1, _}, {Key2, _}) ->
-            case less(Bt, {Key1, nil}, {Key2, nil}) of
-                false ->
-                    case less(Bt, {Key2, nil}, {Key1, nil}) of
-                        false ->
-                            true;
-                        _ ->
-                            false
-                    end;
-                _ ->
-                    false
-            end
-    end.
-
-% wraps a 2 arity function with the proper 3 arity function
-convert_fun_arity(Fun) when is_function(Fun, 2) ->
-    fun
-        (visit, KV, _Reds, AccIn) -> Fun(KV, AccIn);
-        (traverse, _K, _Red, AccIn) -> {ok, AccIn}
-    end;
-convert_fun_arity(Fun) when is_function(Fun, 3) ->
-    fun
-        (visit, KV, Reds, AccIn) -> Fun(KV, Reds, AccIn);
-        (traverse, _K, _Red, AccIn) -> {ok, AccIn}
-    end;
-convert_fun_arity(Fun) when is_function(Fun, 4) ->
-    Fun.    % Already arity 4
-
-make_key_in_end_range_function(Bt, fwd, Options) ->
-    case couch_util:get_value(end_key_gt, Options) of
-    undefined ->
-        case couch_util:get_value(end_key, Options) of
-        undefined ->
-            fun(_Key) -> true end;
-        LastKey ->
-            fun(Key) -> not less(Bt, LastKey, Key) end
-        end;
-    EndKey ->
-        fun(Key) -> less(Bt, Key, EndKey) end
-    end;
-make_key_in_end_range_function(Bt, rev, Options) ->
-    case couch_util:get_value(end_key_gt, Options) of
-    undefined ->
-        case couch_util:get_value(end_key, Options) of
-        undefined ->
-            fun(_Key) -> true end;
-        LastKey ->
-            fun(Key) -> not less(Bt, Key, LastKey) end
-        end;
-    EndKey ->
-        fun(Key) -> less(Bt, EndKey, Key) end
-    end.
-
-
-foldl(Bt, Fun, Acc) ->
-    fold(Bt, Fun, Acc, []).
-
-foldl(Bt, Fun, Acc, Options) ->
-    fold(Bt, Fun, Acc, Options).
-
-
-fold(#btree{root=nil}, _Fun, Acc, _Options) ->
-    {ok, {[], []}, Acc};
-fold(#btree{root=Root}=Bt, Fun, Acc, Options) ->
-    Dir = couch_util:get_value(dir, Options, fwd),
-    InRange = make_key_in_end_range_function(Bt, Dir, Options),
-    Result =
-    case couch_util:get_value(start_key, Options) of
-    undefined ->
-        stream_node(Bt, [], Bt#btree.root, InRange, Dir,
-                convert_fun_arity(Fun), Acc);
-    StartKey ->
-        stream_node(Bt, [], Bt#btree.root, StartKey, InRange, Dir,
-                convert_fun_arity(Fun), Acc)
-    end,
-    case Result of
-    {ok, Acc2}->
-        FullReduction = element(2, Root),
-        {ok, {[], [FullReduction]}, Acc2};
-    {stop, LastReduction, Acc2} ->
-        {ok, LastReduction, Acc2}
-    end.
-
-add(Bt, InsertKeyValues) ->
-    add_remove(Bt, InsertKeyValues, []).
-
-add_remove(Bt, InsertKeyValues, RemoveKeys) ->
-    {ok, [], Bt2} = query_modify(Bt, [], InsertKeyValues, RemoveKeys),
-    {ok, Bt2}.
-
-query_modify(Bt, LookupKeys, InsertValues, RemoveKeys) ->
-    #btree{root=Root} = Bt,
-    InsertActions = lists:map(
-        fun(KeyValue) ->
-            {Key, Value} = extract(Bt, KeyValue),
-            {insert, Key, Value}
-        end, InsertValues),
-    RemoveActions = [{remove, Key, nil} || Key <- RemoveKeys],
-    FetchActions = [{fetch, Key, nil} || Key <- LookupKeys],
-    SortFun =
-        fun({OpA, A, _}, {OpB, B, _}) ->
-            case A == B of
-            % A and B are equal, sort by op.
-            true -> op_order(OpA) < op_order(OpB);
-            false ->
-                less(Bt, A, B)
-            end
-        end,
-    Actions = lists:sort(SortFun, lists:append([InsertActions, RemoveActions, FetchActions])),
-    {ok, KeyPointers, QueryResults} = modify_node(Bt, Root, Actions, []),
-    {ok, NewRoot} = complete_root(Bt, KeyPointers),
-    {ok, QueryResults, Bt#btree{root=NewRoot}}.
-
-% for ordering different operations with the same key.
-% fetch < remove < insert
-op_order(fetch) -> 1;
-op_order(remove) -> 2;
-op_order(insert) -> 3.
-
-lookup(#btree{root=Root, less=Less}=Bt, Keys) ->
-    SortedKeys = case Less of
-        undefined -> lists:sort(Keys);
-        _ -> lists:sort(Less, Keys)
-    end,
-    {ok, SortedResults} = lookup(Bt, Root, SortedKeys),
-    % We want to return the results in the same order as the keys were input
-    % but we may have changed the order when we sorted. So we need to put the
-    % order back into the results.
-    couch_util:reorder_results(Keys, SortedResults).
-
-lookup(_Bt, nil, Keys) ->
-    {ok, [{Key, not_found} || Key <- Keys]};
-lookup(Bt, Node, Keys) ->
-    Pointer = element(1, Node),
-    {NodeType, NodeList} = get_node(Bt, Pointer),
-    case NodeType of
-    kp_node ->
-        lookup_kpnode(Bt, list_to_tuple(NodeList), 1, Keys, []);
-    kv_node ->
-        lookup_kvnode(Bt, list_to_tuple(NodeList), 1, Keys, [])
-    end.
-
-lookup_kpnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
-    {ok, lists:reverse(Output)};
-lookup_kpnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound ->
-    {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
-lookup_kpnode(Bt, NodeTuple, LowerBound, [FirstLookupKey | _] = LookupKeys, Output) ->
-    N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), FirstLookupKey),
-    {Key, PointerInfo} = element(N, NodeTuple),
-    SplitFun = fun(LookupKey) -> not less(Bt, Key, LookupKey) end,
-    case lists:splitwith(SplitFun, LookupKeys) of
-    {[], GreaterQueries} ->
-        lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, Output);
-    {LessEqQueries, GreaterQueries} ->
-        {ok, Results} = lookup(Bt, PointerInfo, LessEqQueries),
-        lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, lists:reverse(Results, Output))
-    end.
-
-
-lookup_kvnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
-    {ok, lists:reverse(Output)};
-lookup_kvnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound ->
-    % keys not found
-    {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
-lookup_kvnode(Bt, NodeTuple, LowerBound, [LookupKey | RestLookupKeys], Output) ->
-    N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), LookupKey),
-    {Key, Value} = element(N, NodeTuple),
-    case less(Bt, LookupKey, Key) of
-    true ->
-        % LookupKey is less than Key
-        lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, not_found} | Output]);
-    false ->
-        case less(Bt, Key, LookupKey) of
-        true ->
-            % LookupKey is greater than Key
-            lookup_kvnode(Bt, NodeTuple, N+1, RestLookupKeys, [{LookupKey, not_found} | Output]);
-        false ->
-            % LookupKey is equal to Key
-            lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, {ok, assemble(Bt, LookupKey, Value)}} | Output])
-        end
-    end.
-
-
-complete_root(_Bt, []) ->
-    {ok, nil};
-complete_root(_Bt, [{_Key, PointerInfo}])->
-    {ok, PointerInfo};
-complete_root(Bt, KPs) ->
-    {ok, ResultKeyPointers} = write_node(Bt, kp_node, KPs),
-    complete_root(Bt, ResultKeyPointers).
-
-%%%%%%%%%%%%% The chunkify function sucks! %%%%%%%%%%%%%
-% It is inaccurate as it does not account for compression when blocks are
-% written. Plus with the "case byte_size(term_to_binary(InList)) of" code
-% it's probably really inefficient.
-
-chunkify(InList) ->
-    BaseChunkSize = get_chunk_size(),
-    case ?term_size(InList) of
-    Size when Size > BaseChunkSize ->
-        NumberOfChunksLikely = ((Size div BaseChunkSize) + 1),
-        ChunkThreshold = Size div NumberOfChunksLikely,
-        chunkify(InList, ChunkThreshold, [], 0, []);
-    _Else ->
-        [InList]
-    end.
-
-chunkify([], _ChunkThreshold, [], 0, OutputChunks) ->
-    lists:reverse(OutputChunks);
-chunkify([], _ChunkThreshold, [Item], _OutListSize, [PrevChunk | RestChunks]) ->
-    NewPrevChunk = PrevChunk ++ [Item],
-    lists:reverse(RestChunks, [NewPrevChunk]);
-chunkify([], _ChunkThreshold, OutList, _OutListSize, OutputChunks) ->
-    lists:reverse([lists:reverse(OutList) | OutputChunks]);
-chunkify([InElement | RestInList], ChunkThreshold, OutList, OutListSize, OutputChunks) ->
-    case ?term_size(InElement) of
-    Size when (Size + OutListSize) > ChunkThreshold andalso OutList /= [] ->
-        chunkify(RestInList, ChunkThreshold, [], 0, [lists:reverse([InElement | OutList]) | OutputChunks]);
-    Size ->
-        chunkify(RestInList, ChunkThreshold, [InElement | OutList], OutListSize + Size, OutputChunks)
-    end.
-
--compile({inline,[get_chunk_size/0]}).
-get_chunk_size() ->
-    try
-        list_to_integer(config:get("couchdb", "btree_chunk_size", "1279"))
-    catch error:badarg ->
-        1279
-    end.
-
-modify_node(Bt, RootPointerInfo, Actions, QueryOutput) ->
-    {NodeType, NodeList} = case RootPointerInfo of
-    nil ->
-        {kv_node, []};
-    _Tuple ->
-        Pointer = element(1, RootPointerInfo),
-        get_node(Bt, Pointer)
-    end,
-    NodeTuple = list_to_tuple(NodeList),
-
-    {ok, NewNodeList, QueryOutput2} =
-    case NodeType of
-    kp_node -> modify_kpnode(Bt, NodeTuple, 1, Actions, [], QueryOutput);
-    kv_node -> modify_kvnode(Bt, NodeTuple, 1, Actions, [], QueryOutput)
-    end,
-    case NewNodeList of
-    [] ->  % no nodes remain
-        {ok, [], QueryOutput2};
-    NodeList ->  % nothing changed
-        {LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple),
-        {ok, [{LastKey, RootPointerInfo}], QueryOutput2};
-    _Else2 ->
-        {ok, ResultList} = case RootPointerInfo of
-        nil ->
-            write_node(Bt, NodeType, NewNodeList);
-        _ ->
-            {LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple),
-            OldNode = {LastKey, RootPointerInfo},
-            write_node(Bt, OldNode, NodeType, NodeList, NewNodeList)
-        end,
-        {ok, ResultList, QueryOutput2}
-    end.
-
-reduce_node(#btree{reduce=nil}, _NodeType, _NodeList) ->
-    [];
-reduce_node(#btree{reduce=R}, kp_node, NodeList) ->
-    R(rereduce, [element(2, Node) || {_K, Node} <- NodeList]);
-reduce_node(#btree{reduce=R}=Bt, kv_node, NodeList) ->
-    R(reduce, [assemble(Bt, K, V) || {K, V} <- NodeList]).
-
-reduce_tree_size(kv_node, NodeSize, _KvList) ->
-    NodeSize;
-reduce_tree_size(kp_node, NodeSize, []) ->
-    NodeSize;
-reduce_tree_size(kp_node, _NodeSize, [{_K, {_P, _Red}} | _]) ->
-    % pre 1.2 format
-    nil;
-reduce_tree_size(kp_node, _NodeSize, [{_K, {_P, _Red, nil}} | _]) ->
-    nil;
-reduce_tree_size(kp_node, NodeSize, [{_K, {_P, _Red, Sz}} | NodeList]) ->
-    reduce_tree_size(kp_node, NodeSize + Sz, NodeList).
-
-get_node(#btree{fd = Fd}, NodePos) ->
-    {ok, {NodeType, NodeList}} = couch_file:pread_term(Fd, NodePos),
-    {NodeType, NodeList}.
-
-write_node(#btree{fd = Fd, compression = Comp} = Bt, NodeType, NodeList) ->
-    % split up nodes into smaller sizes
-    NodeListList = chunkify(NodeList),
-    % now write out each chunk and return the KeyPointer pairs for those nodes
-    ResultList = [
-        begin
-            {ok, Pointer, Size} = couch_file:append_term(
-                Fd, {NodeType, ANodeList}, [{compression, Comp}]),
-            {LastKey, _} = lists:last(ANodeList),
-            SubTreeSize = reduce_tree_size(NodeType, Size, ANodeList),
-            {LastKey, {Pointer, reduce_node(Bt, NodeType, ANodeList), SubTreeSize}}
-        end
-    ||
-        ANodeList <- NodeListList
-    ],
-    {ok, ResultList}.
-
-
-write_node(Bt, _OldNode, NodeType, [], NewList) ->
-    write_node(Bt, NodeType, NewList);
-write_node(Bt, _OldNode, NodeType, [_], NewList) ->
-    write_node(Bt, NodeType, NewList);
-write_node(Bt, OldNode, NodeType, OldList, NewList) ->
-    case can_reuse_old_node(OldList, NewList) of
-        {true, Prefix, Suffix} ->
-            {ok, PrefixKVs} = case Prefix of
-                [] -> {ok, []};
-                _ -> write_node(Bt, NodeType, Prefix)
-            end,
-            {ok, SuffixKVs} = case Suffix of
-                [] -> {ok, []};
-                _ -> write_node(Bt, NodeType, Suffix)
-            end,
-            Result = PrefixKVs ++ [OldNode] ++ SuffixKVs,
-            {ok, Result};
-        false ->
-            write_node(Bt, NodeType, NewList)
-    end.
-
-can_reuse_old_node(OldList, NewList) ->
-    {Prefix, RestNewList} = remove_prefix_kvs(hd(OldList), NewList),
-    case old_list_is_prefix(OldList, RestNewList, 0) of
-        {true, Size, Suffix} ->
-            ReuseThreshold = get_chunk_size() * ?FILL_RATIO,
-            if Size < ReuseThreshold -> false; true ->
-                {true, Prefix, Suffix}
-            end;
-        false ->
-            false
-    end.
-
-remove_prefix_kvs(KV1, [KV2 | Rest]) when KV2 < KV1 ->
-    {Prefix, RestNewList} = remove_prefix_kvs(KV1, Rest),
-    {[KV2 | Prefix], RestNewList};
-remove_prefix_kvs(_, RestNewList) ->
-    {[], RestNewList}.
-
-% No more KV's in the old node so its a prefix
-old_list_is_prefix([], Suffix, Size) ->
-    {true, Size, Suffix};
-% Some KV's have been removed from the old node
-old_list_is_prefix(_OldList, [], _Size) ->
-    false;
-% KV is equal in both old and new node so continue
-old_list_is_prefix([KV | Rest1], [KV | Rest2], Acc) ->
-    old_list_is_prefix(Rest1, Rest2, ?term_size(KV) + Acc);
-% KV mismatch between old and new node so not a prefix
-old_list_is_prefix(_OldList, _NewList, _Acc) ->
-    false.
-
-modify_kpnode(Bt, {}, _LowerBound, Actions, [], QueryOutput) ->
-    modify_node(Bt, nil, Actions, QueryOutput);
-modify_kpnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
-    {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound,
-            tuple_size(NodeTuple), [])), QueryOutput};
-modify_kpnode(Bt, NodeTuple, LowerBound,
-        [{_, FirstActionKey, _}|_]=Actions, ResultNode, QueryOutput) ->
-    Sz = tuple_size(NodeTuple),
-    N = find_first_gteq(Bt, NodeTuple, LowerBound, Sz, FirstActionKey),
-    case N =:= Sz of
-    true  ->
-        % perform remaining actions on last node
-        {_, PointerInfo} = element(Sz, NodeTuple),
-        {ok, ChildKPs, QueryOutput2} =
-            modify_node(Bt, PointerInfo, Actions, QueryOutput),
-        NodeList = lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound,
-            Sz - 1, ChildKPs)),
-        {ok, NodeList, QueryOutput2};
-    false ->
-        {NodeKey, PointerInfo} = element(N, NodeTuple),
-        SplitFun = fun({_ActionType, ActionKey, _ActionValue}) ->
-                not less(Bt, NodeKey, ActionKey)
-            end,
-        {LessEqQueries, GreaterQueries} = lists:splitwith(SplitFun, Actions),
-        {ok, ChildKPs, QueryOutput2} =
-                modify_node(Bt, PointerInfo, LessEqQueries, QueryOutput),
-        ResultNode2 = lists:reverse(ChildKPs, bounded_tuple_to_revlist(NodeTuple,
-                LowerBound, N - 1, ResultNode)),
-        modify_kpnode(Bt, NodeTuple, N+1, GreaterQueries, ResultNode2, QueryOutput2)
-    end.
-
-bounded_tuple_to_revlist(_Tuple, Start, End, Tail) when Start > End ->
-    Tail;
-bounded_tuple_to_revlist(Tuple, Start, End, Tail) ->
-    bounded_tuple_to_revlist(Tuple, Start+1, End, [element(Start, Tuple)|Tail]).
-
-bounded_tuple_to_list(Tuple, Start, End, Tail) ->
-    bounded_tuple_to_list2(Tuple, Start, End, [], Tail).
-
-bounded_tuple_to_list2(_Tuple, Start, End, Acc, Tail) when Start > End ->
-    lists:reverse(Acc, Tail);
-bounded_tuple_to_list2(Tuple, Start, End, Acc, Tail) ->
-    bounded_tuple_to_list2(Tuple, Start + 1, End, [element(Start, Tuple) | Acc], Tail).
-
-find_first_gteq(_Bt, _Tuple, Start, End, _Key) when Start == End ->
-    End;
-find_first_gteq(Bt, Tuple, Start, End, Key) ->
-    Mid = Start + ((End - Start) div 2),
-    {TupleKey, _} = element(Mid, Tuple),
-    case less(Bt, TupleKey, Key) of
-    true ->
-        find_first_gteq(Bt, Tuple, Mid+1, End, Key);
-    false ->
-        find_first_gteq(Bt, Tuple, Start, Mid, Key)
-    end.
-
-modify_kvnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
-    {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound, tuple_size(NodeTuple), [])), QueryOutput};
-modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], ResultNode, QueryOutput) when LowerBound > tuple_size(NodeTuple) ->
-    case ActionType of
-    insert ->
-        modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
-    remove ->
-        % just drop the action
-        modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, QueryOutput);
-    fetch ->
-        % the key/value must not exist in the tree
-        modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
-    end;
-modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], AccNode, QueryOutput) ->
-    N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), ActionKey),
-    {Key, Value} = element(N, NodeTuple),
-    ResultNode =  bounded_tuple_to_revlist(NodeTuple, LowerBound, N - 1, AccNode),
-    case less(Bt, ActionKey, Key) of
-    true ->
-        case ActionType of
-        insert ->
-            % ActionKey is less than the Key, so insert
-            modify_kvnode(Bt, NodeTuple, N, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
-        remove ->
-            % ActionKey is less than the Key, just drop the action
-            modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, QueryOutput);
-        fetch ->
-            % ActionKey is less than the Key, the key/value must not exist in the tree
-            modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
-        end;
-    false ->
-        % ActionKey and Key are maybe equal.
-        case less(Bt, Key, ActionKey) of
-        false ->
-            case ActionType of
-            insert ->
-                modify_kvnode(Bt, NodeTuple, N+1, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
-            remove ->
-                modify_kvnode(Bt, NodeTuple, N+1, RestActions, ResultNode, QueryOutput);
-            fetch ->
-                % ActionKey is equal to the Key, insert into the QueryOuput, but re-process the node
-                % since an identical action key can follow it.
-                modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{ok, assemble(Bt, Key, Value)} | QueryOutput])
-            end;
-        true ->
-            modify_kvnode(Bt, NodeTuple, N + 1, [{ActionType, ActionKey, ActionValue} | RestActions], [{Key, Value} | ResultNode], QueryOutput)
-        end
-    end.
-
-
-reduce_stream_node(_Bt, _Dir, nil, _KeyStart, _InEndRangeFun, GroupedKey, GroupedKVsAcc,
-        GroupedRedsAcc, _KeyGroupFun, _Fun, Acc) ->
-    {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
-reduce_stream_node(Bt, Dir, Node, KeyStart, InEndRangeFun, GroupedKey, GroupedKVsAcc,
-        GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
-    P = element(1, Node),
-    case get_node(Bt, P) of
-    {kp_node, NodeList} ->
-        NodeList2 = adjust_dir(Dir, NodeList),
-        reduce_stream_kp_node(Bt, Dir, NodeList2, KeyStart, InEndRangeFun, GroupedKey,
-                GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc);
-    {kv_node, KVs} ->
-        KVs2 = adjust_dir(Dir, KVs),
-        reduce_stream_kv_node(Bt, Dir, KVs2, KeyStart, InEndRangeFun, GroupedKey,
-                GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc)
-    end.
-
-reduce_stream_kv_node(Bt, Dir, KVs, KeyStart, InEndRangeFun,
-                        GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
-                        KeyGroupFun, Fun, Acc) ->
-
-    GTEKeyStartKVs =
-    case KeyStart of
-    undefined ->
-        KVs;
-    _ ->
-        DropFun = case Dir of
-        fwd ->
-            fun({Key, _}) -> less(Bt, Key, KeyStart) end;
-        rev ->
-            fun({Key, _}) -> less(Bt, KeyStart, Key) end
-        end,
-        lists:dropwhile(DropFun, KVs)
-    end,
-    KVs2 = lists:takewhile(
-        fun({Key, _}) -> InEndRangeFun(Key) end, GTEKeyStartKVs),
-    reduce_stream_kv_node2(Bt, KVs2, GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
-                        KeyGroupFun, Fun, Acc).
-
-
-reduce_stream_kv_node2(_Bt, [], GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
-        _KeyGroupFun, _Fun, Acc) ->
-    {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
-reduce_stream_kv_node2(Bt, [{Key, Value}| RestKVs], GroupedKey, GroupedKVsAcc,
-        GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
-    case GroupedKey of
-    undefined ->
-        reduce_stream_kv_node2(Bt, RestKVs, Key,
-                [assemble(Bt,Key,Value)], [], KeyGroupFun, Fun, Acc);
-    _ ->
-
-        case KeyGroupFun(GroupedKey, Key) of
-        true ->
-            reduce_stream_kv_node2(Bt, RestKVs, GroupedKey,
-                [assemble(Bt,Key,Value)|GroupedKVsAcc], GroupedRedsAcc, KeyGroupFun,
-                Fun, Acc);
-        false ->
-            case Fun(GroupedKey, {GroupedKVsAcc, GroupedRedsAcc}, Acc) of
-            {ok, Acc2} ->
-                reduce_stream_kv_node2(Bt, RestKVs, Key, [assemble(Bt,Key,Value)],
-                    [], KeyGroupFun, Fun, Acc2);
-            {stop, Acc2} ->
-                throw({stop, Acc2})
-            end
-        end
-    end.
-
-reduce_stream_kp_node(Bt, Dir, NodeList, KeyStart, InEndRangeFun,
-                        GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
-                        KeyGroupFun, Fun, Acc) ->
-    Nodes =
-    case KeyStart of
-    undefined ->
-        NodeList;
-    _ ->
-        case Dir of
-        fwd ->
-            lists:dropwhile(fun({Key, _}) -> less(Bt, Key, KeyStart) end, NodeList);
-        rev ->
-            RevKPs = lists:reverse(NodeList),
-            case lists:splitwith(fun({Key, _}) -> less(Bt, Key, KeyStart) end, RevKPs) of
-            {_Before, []} ->
-                NodeList;
-            {Before, [FirstAfter | _]} ->
-                [FirstAfter | lists:reverse(Before)]
-            end
-        end
-    end,
-    {InRange, MaybeInRange} = lists:splitwith(
-        fun({Key, _}) -> InEndRangeFun(Key) end, Nodes),
-    NodesInRange = case MaybeInRange of
-    [FirstMaybeInRange | _] when Dir =:= fwd ->
-        InRange ++ [FirstMaybeInRange];
-    _ ->
-        InRange
-    end,
-    reduce_stream_kp_node2(Bt, Dir, NodesInRange, KeyStart, InEndRangeFun,
-        GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc).
-
-
-reduce_stream_kp_node2(Bt, Dir, [{_Key, NodeInfo} | RestNodeList], KeyStart, InEndRangeFun,
-                        undefined, [], [], KeyGroupFun, Fun, Acc) ->
-    {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
-            reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, InEndRangeFun, undefined,
-                [], [], KeyGroupFun, Fun, Acc),
-    reduce_stream_kp_node2(Bt, Dir, RestNodeList, KeyStart, InEndRangeFun, GroupedKey2,
-            GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2);
-reduce_stream_kp_node2(Bt, Dir, NodeList, KeyStart, InEndRangeFun,
-        GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
-    {Grouped0, Ungrouped0} = lists:splitwith(fun({Key,_}) ->
-        KeyGroupFun(GroupedKey, Key) end, NodeList),
-    {GroupedNodes, UngroupedNodes} =
-    case Grouped0 of
-    [] ->
-        {Grouped0, Ungrouped0};
-    _ ->
-        [FirstGrouped | RestGrouped] = lists:reverse(Grouped0),
-        {RestGrouped, [FirstGrouped | Ungrouped0]}
-    end,
-    GroupedReds = [element(2, Node) || {_, Node} <- GroupedNodes],
-    case UngroupedNodes of
-    [{_Key, NodeInfo}|RestNodes] ->
-        {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
-            reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, InEndRangeFun, GroupedKey,
-                GroupedKVsAcc, GroupedReds ++ GroupedRedsAcc, KeyGroupFun, Fun, Acc),
-        reduce_stream_kp_node2(Bt, Dir, RestNodes, KeyStart, InEndRangeFun, GroupedKey2,
-                GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2);
-    [] ->
-        {ok, Acc, GroupedReds ++ GroupedRedsAcc, GroupedKVsAcc, GroupedKey}
-    end.
-
-adjust_dir(fwd, List) ->
-    List;
-adjust_dir(rev, List) ->
-    lists:reverse(List).
-
-stream_node(Bt, Reds, Node, StartKey, InRange, Dir, Fun, Acc) ->
-    Pointer = element(1, Node),
-    {NodeType, NodeList} = get_node(Bt, Pointer),
-    case NodeType of
-    kp_node ->
-        stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc);
-    kv_node ->
-        stream_kv_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc)
-    end.
-
-stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc) ->
-    Pointer = element(1, Node),
-    {NodeType, NodeList} = get_node(Bt, Pointer),
-    case NodeType of
-    kp_node ->
-        stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc);
-    kv_node ->
-        stream_kv_node2(Bt, Reds, [], adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc)
-    end.
-
-stream_kp_node(_Bt, _Reds, [], _InRange, _Dir, _Fun, Acc) ->
-    {ok, Acc};
-stream_kp_node(Bt, Reds, [{Key, Node} | Rest], InRange, Dir, Fun, Acc) ->
-    Red = element(2, Node),
-    case Fun(traverse, Key, Red, Acc) of
-    {ok, Acc2} ->
-        case stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc2) of
-        {ok, Acc3} ->
-            stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc3);
-        {stop, LastReds, Acc3} ->
-            {stop, LastReds, Acc3}
-        end;
-    {skip, Acc2} ->
-        stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc2);
-    {stop, Acc2} ->
-        {stop, Reds, Acc2}
-    end.
-
-drop_nodes(_Bt, Reds, _StartKey, []) ->
-    {Reds, []};
-drop_nodes(Bt, Reds, StartKey, [{NodeKey, Node} | RestKPs]) ->
-    case less(Bt, NodeKey, StartKey) of
-    true ->
-        drop_nodes(Bt, [element(2, Node) | Reds], StartKey, RestKPs);
-    false ->
-        {Reds, [{NodeKey, Node} | RestKPs]}
-    end.
-
-stream_kp_node(Bt, Reds, KPs, StartKey, InRange, Dir, Fun, Acc) ->
-    {NewReds, NodesToStream} =
-    case Dir of
-    fwd ->
-        % drop all nodes sorting before the key
-        drop_nodes(Bt, Reds, StartKey, KPs);
-    rev ->
-        % keep all nodes sorting before the key, AND the first node to sort after
-        RevKPs = lists:reverse(KPs),
-         case lists:splitwith(fun({Key, _Pointer}) -> less(Bt, Key, StartKey) end, RevKPs) of
-        {_RevsBefore, []} ->
-            % everything sorts before it
-            {Reds, KPs};
-        {RevBefore, [FirstAfter | Drop]} ->
-            {[element(2, Node) || {_K, Node} <- Drop] ++ Reds,
-                 [FirstAfter | lists:reverse(RevBefore)]}
-        end
-    end,
-    case NodesToStream of
-    [] ->
-        {ok, Acc};
-    [{_Key, Node} | Rest] ->
-        case stream_node(Bt, NewReds, Node, StartKey, InRange, Dir, Fun, Acc) of
-        {ok, Acc2} ->
-            Red = element(2, Node),
-            stream_kp_node(Bt, [Red | NewReds], Rest, InRange, Dir, Fun, Acc2);
-        {stop, LastReds, Acc2} ->
-            {stop, LastReds, Acc2}
-        end
-    end.
-
-stream_kv_node(Bt, Reds, KVs, StartKey, InRange, Dir, Fun, Acc) ->
-    DropFun =
-    case Dir of
-    fwd ->
-        fun({Key, _}) -> less(Bt, Key, StartKey) end;
-    rev ->
-        fun({Key, _}) -> less(Bt, StartKey, Key) end
-    end,
-    {LTKVs, GTEKVs} = lists:splitwith(DropFun, KVs),
-    AssembleLTKVs = [assemble(Bt,K,V) || {K,V} <- LTKVs],
-    stream_kv_node2(Bt, Reds, AssembleLTKVs, GTEKVs, InRange, Dir, Fun, Acc).
-
-stream_kv_node2(_Bt, _Reds, _PrevKVs, [], _InRange, _Dir, _Fun, Acc) ->
-    {ok, Acc};
-stream_kv_node2(Bt, Reds, PrevKVs, [{K,V} | RestKVs], InRange, Dir, Fun, Acc) ->
-    case InRange(K) of
-    false ->
-        {stop, {PrevKVs, Reds}, Acc};
-    true ->
-        AssembledKV = assemble(Bt, K, V),
-        case Fun(visit, AssembledKV, {PrevKVs, Reds}, Acc) of
-        {ok, Acc2} ->
-            stream_kv_node2(Bt, Reds, [AssembledKV | PrevKVs], RestKVs, InRange, Dir, Fun, Acc2);
-        {stop, Acc2} ->
-            {stop, {PrevKVs, Reds}, Acc2}
-        end
-    end.
diff --git a/src/couch/src/couch_changes.erl b/src/couch/src/couch_changes.erl
deleted file mode 100644
index 6e9294a..0000000
--- a/src/couch/src/couch_changes.erl
+++ /dev/null
@@ -1,724 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_changes).
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--export([
-    handle_db_changes/3,
-    get_changes_timeout/2,
-    wait_updated/3,
-    get_rest_updated/1,
-    configure_filter/4,
-    filter/3,
-    handle_db_event/3,
-    handle_view_event/3,
-    send_changes_doc_ids/6,
-    send_changes_design_docs/6
-]).
-
--export([changes_enumerator/2]).
-
-%% export so we can use fully qualified call to facilitate hot-code upgrade
--export([
-    keep_sending_changes/3
-]).
-
--record(changes_acc, {
-    db,
-    seq,
-    prepend,
-    filter,
-    callback,
-    user_acc,
-    resp_type,
-    limit,
-    include_docs,
-    doc_options,
-    conflicts,
-    timeout,
-    timeout_fun,
-    aggregation_kvs,
-    aggregation_results
-}).
-
-handle_db_changes(Args0, Req, Db0) ->
-    #changes_args{
-        style = Style,
-        filter = FilterName,
-        feed = Feed,
-        dir = Dir,
-        since = Since
-    } = Args0,
-    Filter = configure_filter(FilterName, Style, Req, Db0),
-    Args = Args0#changes_args{filter_fun = Filter},
-    DbName = couch_db:name(Db0),
-    StartListenerFun = fun() ->
-        couch_event:link_listener(
-            ?MODULE, handle_db_event, self(), [{dbname, DbName}]
-        )
-    end,
-    Start = fun() ->
-        {ok, Db} = couch_db:reopen(Db0),
-        StartSeq = case Dir of
-        rev ->
-            couch_db:get_update_seq(Db);
-        fwd ->
-            Since
-        end,
-        {Db, StartSeq}
-    end,
-    % begin timer to deal with heartbeat when filter function fails
-    case Args#changes_args.heartbeat of
-    undefined ->
-        erlang:erase(last_changes_heartbeat);
-    Val when is_integer(Val); Val =:= true ->
-        put(last_changes_heartbeat, os:timestamp())
-    end,
-
-    case lists:member(Feed, ["continuous", "longpoll", "eventsource"]) of
-    true ->
-        fun(CallbackAcc) ->
-            {Callback, UserAcc} = get_callback_acc(CallbackAcc),
-            {ok, Listener} = StartListenerFun(),
-
-            {Db, StartSeq} = Start(),
-            UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
-            {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
-            Acc0 = build_acc(Args, Callback, UserAcc2, Db, StartSeq,
-                             <<"">>, Timeout, TimeoutFun),
-            try
-                keep_sending_changes(
-                    Args#changes_args{dir=fwd},
-                    Acc0,
-                    true)
-            after
-                couch_event:stop_listener(Listener),
-                get_rest_updated(ok) % clean out any remaining update messages
-            end
-        end;
-    false ->
-        fun(CallbackAcc) ->
-            {Callback, UserAcc} = get_callback_acc(CallbackAcc),
-            UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
-            {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
-            {Db, StartSeq} = Start(),
-            Acc0 = build_acc(Args#changes_args{feed="normal"}, Callback,
-                             UserAcc2, Db, StartSeq, <<>>,
-                             Timeout, TimeoutFun),
-            {ok, #changes_acc{seq = LastSeq, user_acc = UserAcc3}} =
-                send_changes(
-                    Acc0,
-                    Dir,
-                    true),
-            end_sending_changes(Callback, UserAcc3, LastSeq, Feed)
-        end
-    end.
-
-
-handle_db_event(_DbName, updated, Parent) ->
-    Parent ! updated,
-    {ok, Parent};
-handle_db_event(_DbName, deleted, Parent) ->
-    Parent ! deleted,
-    {ok, Parent};
-handle_db_event(_DbName, _Event, Parent) ->
-    {ok, Parent}.
-
-
-handle_view_event(_DbName, Msg, {Parent, DDocId}) ->
-    case Msg of
-        {index_commit, DDocId} ->
-            Parent ! updated;
-        {index_delete, DDocId} ->
-            Parent ! deleted;
-        _ ->
-            ok
-    end,
-    {ok, {Parent, DDocId}}.
-
-get_callback_acc({Callback, _UserAcc} = Pair) when is_function(Callback, 3) ->
-    Pair;
-get_callback_acc(Callback) when is_function(Callback, 2) ->
-    {fun(Ev, Data, _) -> Callback(Ev, Data) end, ok}.
-
-
-configure_filter("_doc_ids", Style, Req, _Db) ->
-    {doc_ids, Style, get_doc_ids(Req)};
-configure_filter("_selector", Style, Req, _Db) ->
-    {selector, Style,  get_selector_and_fields(Req)};
-configure_filter("_design", Style, _Req, _Db) ->
-    {design_docs, Style};
-configure_filter("_view", Style, Req, Db) ->
-    ViewName = get_view_qs(Req),
-    if ViewName /= "" -> ok; true ->
-        throw({bad_request, "`view` filter parameter is not provided."})
-    end,
-    ViewNameParts = string:tokens(ViewName, "/"),
-    case [?l2b(couch_httpd:unquote(Part)) || Part <- ViewNameParts] of
-        [DName, VName] ->
-            {ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
-            check_member_exists(DDoc, [<<"views">>, VName]),
-            case couch_db:is_clustered(Db) of
-                true ->
-                    DIR = fabric_util:doc_id_and_rev(DDoc),
-                    {fetch, view, Style, DIR, VName};
-                false ->
-                    {view, Style, DDoc, VName}
-            end;
-        [] ->
-            Msg = "`view` must be of the form `designname/viewname`",
-            throw({bad_request, Msg})
-    end;
-configure_filter([$_ | _], _Style, _Req, _Db) ->
-    throw({bad_request, "unknown builtin filter name"});
-configure_filter("", main_only, _Req, _Db) ->
-    {default, main_only};
-configure_filter("", all_docs, _Req, _Db) ->
-    {default, all_docs};
-configure_filter(FilterName, Style, Req, Db) ->
-    FilterNameParts = string:tokens(FilterName, "/"),
-    case [?l2b(couch_httpd:unquote(Part)) || Part <- FilterNameParts] of
-        [DName, FName] ->
-            {ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
-            check_member_exists(DDoc, [<<"filters">>, FName]),
-            case couch_db:is_clustered(Db) of
-                true ->
-                    DIR = fabric_util:doc_id_and_rev(DDoc),
-                    {fetch, custom, Style, Req, DIR, FName};
-                false->
-                    {custom, Style, Req, DDoc, FName}
-            end;
-
-        [] ->
-            {default, Style};
-        _Else ->
-            Msg = "`filter` must be of the form `designname/filtername`",
-            throw({bad_request, Msg})
-    end.
-
-
-filter(Db, #full_doc_info{}=FDI, Filter) ->
-    filter(Db, couch_doc:to_doc_info(FDI), Filter);
-filter(_Db, DocInfo, {default, Style}) ->
-    apply_style(DocInfo, Style);
-filter(_Db, DocInfo, {doc_ids, Style, DocIds}) ->
-    case lists:member(DocInfo#doc_info.id, DocIds) of
-        true ->
-            apply_style(DocInfo, Style);
-        false ->
-            []
-    end;
-filter(Db, DocInfo, {selector, Style, {Selector, _Fields}}) ->
-    Docs = open_revs(Db, DocInfo, Style),
-    Passes = [mango_selector:match(Selector, couch_doc:to_json_obj(Doc, []))
-        || Doc <- Docs],
-    filter_revs(Passes, Docs);
-filter(_Db, DocInfo, {design_docs, Style}) ->
-    case DocInfo#doc_info.id of
-        <<"_design", _/binary>> ->
-            apply_style(DocInfo, Style);
-        _ ->
-            []
-    end;
-filter(Db, DocInfo, {view, Style, DDoc, VName}) ->
-    Docs = open_revs(Db, DocInfo, Style),
-    {ok, Passes} = couch_query_servers:filter_view(DDoc, VName, Docs),
-    filter_revs(Passes, Docs);
-filter(Db, DocInfo, {custom, Style, Req0, DDoc, FName}) ->
-    Req = case Req0 of
-        {json_req, _} -> Req0;
-        #httpd{} -> {json_req, couch_httpd_external:json_req_obj(Req0, Db)}
-    end,
-    Docs = open_revs(Db, DocInfo, Style),
-    {ok, Passes} = couch_query_servers:filter_docs(Req, Db, DDoc, FName, Docs),
-    filter_revs(Passes, Docs).
-
-
-get_view_qs({json_req, {Props}}) ->
-    {Query} = couch_util:get_value(<<"query">>, Props, {[]}),
-    binary_to_list(couch_util:get_value(<<"view">>, Query, ""));
-get_view_qs(Req) ->
-    couch_httpd:qs_value(Req, "view", "").
-
-get_doc_ids({json_req, {Props}}) ->
-    check_docids(couch_util:get_value(<<"doc_ids">>, Props));
-get_doc_ids(#httpd{method='POST'}=Req) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    {Props} = couch_httpd:json_body_obj(Req),
-    check_docids(couch_util:get_value(<<"doc_ids">>, Props));
-get_doc_ids(#httpd{method='GET'}=Req) ->
-    DocIds = ?JSON_DECODE(couch_httpd:qs_value(Req, "doc_ids", "null")),
-    check_docids(DocIds);
-get_doc_ids(_) ->
-    throw({bad_request, no_doc_ids_provided}).
-
-
-get_selector_and_fields({json_req, {Props}}) ->
-    Selector = check_selector(couch_util:get_value(<<"selector">>, Props)),
-    Fields = check_fields(couch_util:get_value(<<"fields">>, Props, nil)),
-    {Selector, Fields};
-get_selector_and_fields(#httpd{method='POST'}=Req) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    get_selector_and_fields({json_req,  couch_httpd:json_body_obj(Req)});
-get_selector_and_fields(_) ->
-    throw({bad_request, "Selector must be specified in POST payload"}).
-
-
-check_docids(DocIds) when is_list(DocIds) ->
-    lists:foreach(fun
-        (DocId) when not is_binary(DocId) ->
-            Msg = "`doc_ids` filter parameter is not a list of doc ids.",
-            throw({bad_request, Msg});
-        (_) -> ok
-    end, DocIds),
-    DocIds;
-check_docids(_) ->
-    Msg = "`doc_ids` filter parameter is not a list of doc ids.",
-    throw({bad_request, Msg}).
-
-
-check_selector(Selector={_}) ->
-    try
-        mango_selector:normalize(Selector)
-    catch
-        {mango_error, Mod, Reason0} ->
-            {_StatusCode, _Error, Reason} = mango_error:info(Mod, Reason0),
-            throw({bad_request, Reason})
-    end;
-check_selector(_Selector) ->
-    throw({bad_request, "Selector error: expected a JSON object"}).
-
-
-check_fields(nil) ->
-    nil;
-check_fields(Fields) when is_list(Fields) ->
-    try
-        {ok, Fields1} = mango_fields:new(Fields),
-        Fields1
-    catch
-        {mango_error, Mod, Reason0} ->
-            {_StatusCode, _Error, Reason} = mango_error:info(Mod, Reason0),
-            throw({bad_request, Reason})
-    end;
-check_fields(_Fields) ->
-    throw({bad_request, "Selector error: fields must be JSON array"}).
-
-
-open_ddoc(Db, DDocId) ->
-    DbName = couch_db:name(Db),
-    case couch_db:is_clustered(Db) of
-        true ->
-            case ddoc_cache:open_doc(mem3:dbname(DbName), DDocId) of
-                {ok, _} = Resp -> Resp;
-                Else -> throw(Else)
-            end;
-        false ->
-            case couch_db:open_doc(Db, DDocId, [ejson_body]) of
-                {ok, _} = Resp -> Resp;
-                Else -> throw(Else)
-            end
-    end.
-
-
-check_member_exists(#doc{body={Props}}, Path) ->
-    couch_util:get_nested_json_value({Props}, Path).
-
-
-apply_style(#doc_info{revs=Revs}, main_only) ->
-    [#rev_info{rev=Rev} | _] = Revs,
-    [{[{<<"rev">>, couch_doc:rev_to_str(Rev)}]}];
-apply_style(#doc_info{revs=Revs}, all_docs) ->
-    [{[{<<"rev">>, couch_doc:rev_to_str(R)}]} || #rev_info{rev=R} <- Revs].
-
-
-open_revs(Db, DocInfo, Style) ->
-    DocInfos = case Style of
-        main_only -> [DocInfo];
-        all_docs -> [DocInfo#doc_info{revs=[R]}|| R <- DocInfo#doc_info.revs]
-    end,
-    OpenOpts = [deleted, conflicts],
-    % Relying on list comprehensions to silence errors
-    OpenResults = [couch_db:open_doc(Db, DI, OpenOpts) || DI <- DocInfos],
-    [Doc || {ok, Doc} <- OpenResults].
-
-
-filter_revs(Passes, Docs) ->
-    lists:flatmap(fun
-        ({true, #doc{revs={RevPos, [RevId | _]}}}) ->
-            RevStr = couch_doc:rev_to_str({RevPos, RevId}),
-            Change = {[{<<"rev">>, RevStr}]},
-            [Change];
-        (_) ->
-            []
-    end, lists:zip(Passes, Docs)).
-
-
-get_changes_timeout(Args, Callback) ->
-    #changes_args{
-        heartbeat = Heartbeat,
-        timeout = Timeout,
-        feed = ResponseType
-    } = Args,
-    DefaultTimeout = list_to_integer(
-        config:get("httpd", "changes_timeout", "60000")
-    ),
-    case Heartbeat of
-    undefined ->
-        case Timeout of
-        undefined ->
-            {DefaultTimeout, fun(UserAcc) -> {stop, UserAcc} end};
-        infinity ->
-            {infinity, fun(UserAcc) -> {stop, UserAcc} end};
-        _ ->
-            {lists:min([DefaultTimeout, Timeout]),
-                fun(UserAcc) -> {stop, UserAcc} end}
-        end;
-    true ->
-        {DefaultTimeout,
-            fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end};
-    _ ->
-        {lists:min([DefaultTimeout, Heartbeat]),
-            fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end}
-    end.
-
-start_sending_changes(_Callback, UserAcc, ResponseType)
-        when ResponseType =:= "continuous"
-        orelse ResponseType =:= "eventsource" ->
-    UserAcc;
-start_sending_changes(Callback, UserAcc, ResponseType) ->
-    Callback(start, ResponseType, UserAcc).
-
-build_acc(Args, Callback, UserAcc, Db, StartSeq, Prepend, Timeout, TimeoutFun) ->
-    #changes_args{
-        include_docs = IncludeDocs,
-        doc_options = DocOpts,
-        conflicts = Conflicts,
-        limit = Limit,
-        feed = ResponseType,
-        filter_fun = Filter
-    } = Args,
-    #changes_acc{
-        db = Db,
-        seq = StartSeq,
-        prepend = Prepend,
-        filter = Filter,
-        callback = Callback,
-        user_acc = UserAcc,
-        resp_type = ResponseType,
-        limit = Limit,
-        include_docs = IncludeDocs,
-        doc_options = DocOpts,
-        conflicts = Conflicts,
-        timeout = Timeout,
-        timeout_fun = TimeoutFun,
-        aggregation_results=[],
-        aggregation_kvs=[]
-    }.
-
-send_changes(Acc, Dir, FirstRound) ->
-    #changes_acc{
-        db = Db,
-        seq = StartSeq,
-        filter = Filter
-    } = maybe_upgrade_changes_acc(Acc),
-    DbEnumFun = fun changes_enumerator/2,
-    case can_optimize(FirstRound, Filter) of
-        {true, Fun} ->
-            Fun(Db, StartSeq, Dir, DbEnumFun, Acc, Filter);
-        _ ->
-            Opts = [{dir, Dir}],
-            couch_db:fold_changes(Db, StartSeq, DbEnumFun, Acc, Opts)
-    end.
-
-
-can_optimize(true, {doc_ids, _Style, DocIds}) ->
-    MaxDocIds = config:get_integer("couchdb",
-        "changes_doc_ids_optimization_threshold", 100),
-    if length(DocIds) =< MaxDocIds ->
-        {true, fun send_changes_doc_ids/6};
-    true ->
-        false
-    end;
-can_optimize(true, {design_docs, _Style}) ->
-    {true, fun send_changes_design_docs/6};
-can_optimize(_, _) ->
-    false.
-
-
-send_changes_doc_ids(Db, StartSeq, Dir, Fun, Acc0, {doc_ids, _Style, DocIds}) ->
-    Results = couch_db:get_full_doc_infos(Db, DocIds),
-    FullInfos = lists:foldl(fun
-        (#full_doc_info{}=FDI, Acc) -> [FDI | Acc];
-        (not_found, Acc) -> Acc
-    end, [], Results),
-    send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
-
-
-send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0, {design_docs, _Style}) ->
-    FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
-    Opts = [
-        include_deleted,
-        {start_key, <<"_design/">>},
-        {end_key_gt, <<"_design0">>}
-    ],
-    {ok, FullInfos} = couch_db:fold_docs(Db, FoldFun, [], Opts),
-    send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
-
-
-send_lookup_changes(FullDocInfos, StartSeq, Dir, Db, Fun, Acc0) ->
-    FoldFun = case Dir of
-        fwd -> fun lists:foldl/3;
-        rev -> fun lists:foldr/3
-    end,
-    GreaterFun = case Dir of
-        fwd -> fun(A, B) -> A > B end;
-        rev -> fun(A, B) -> A =< B end
-    end,
-    DocInfos = lists:foldl(fun(FDI, Acc) ->
-        DI = couch_doc:to_doc_info(FDI),
-        case GreaterFun(DI#doc_info.high_seq, StartSeq) of
-            true -> [DI | Acc];
-            false -> Acc
-        end
-    end, [], FullDocInfos),
-    SortedDocInfos = lists:keysort(#doc_info.high_seq, DocInfos),
-    FinalAcc = try
-        FoldFun(fun(DocInfo, Acc) ->
-            case Fun(DocInfo, Acc) of
-                {ok, NewAcc} ->
-                    NewAcc;
-                {stop, NewAcc} ->
-                    throw({stop, NewAcc})
-            end
-        end, Acc0, SortedDocInfos)
-    catch
-        {stop, Acc} -> Acc
-    end,
-    case Dir of
-        fwd ->
-            FinalAcc0 = case element(1, FinalAcc) of
-                changes_acc -> % we came here via couch_http or internal call
-                    FinalAcc#changes_acc{seq = couch_db:get_update_seq(Db)};
-                fabric_changes_acc -> % we came here via chttpd / fabric / rexi
-                    FinalAcc#fabric_changes_acc{seq = couch_db:get_update_seq(Db)}
-            end,
-            {ok, FinalAcc0};
-        rev -> {ok, FinalAcc}
-    end.
-
-
-keep_sending_changes(Args, Acc0, FirstRound) ->
-    #changes_args{
-        feed = ResponseType,
-        limit = Limit,
-        db_open_options = DbOptions
-    } = Args,
-
-    {ok, ChangesAcc} = send_changes(Acc0, fwd, FirstRound),
-
-    #changes_acc{
-        db = Db, callback = Callback,
-        timeout = Timeout, timeout_fun = TimeoutFun, seq = EndSeq,
-        prepend = Prepend2, user_acc = UserAcc2, limit = NewLimit
-    } = maybe_upgrade_changes_acc(ChangesAcc),
-
-    couch_db:close(Db),
-    if Limit > NewLimit, ResponseType == "longpoll" ->
-        end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType);
-    true ->
-        case wait_updated(Timeout, TimeoutFun, UserAcc2) of
-        {updated, UserAcc4} ->
-            DbOptions1 = [{user_ctx, couch_db:get_user_ctx(Db)} | DbOptions],
-            case couch_db:open(couch_db:name(Db), DbOptions1) of
-            {ok, Db2} ->
-                ?MODULE:keep_sending_changes(
-                  Args#changes_args{limit=NewLimit},
-                  ChangesAcc#changes_acc{
-                    db = Db2,
-                    user_acc = UserAcc4,
-                    seq = EndSeq,
-                    prepend = Prepend2,
-                    timeout = Timeout,
-                    timeout_fun = TimeoutFun},
-                  false);
-            _Else ->
-                end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType)
-            end;
-        {stop, UserAcc4} ->
-            end_sending_changes(Callback, UserAcc4, EndSeq, ResponseType)
-        end
-    end.
-
-end_sending_changes(Callback, UserAcc, EndSeq, ResponseType) ->
-    Callback({stop, EndSeq}, ResponseType, UserAcc).
-
-changes_enumerator(Value, Acc) ->
-    #changes_acc{
-        filter = Filter, callback = Callback, prepend = Prepend,
-        user_acc = UserAcc, limit = Limit, resp_type = ResponseType, db = Db,
-        timeout = Timeout, timeout_fun = TimeoutFun
-    } = maybe_upgrade_changes_acc(Acc),
-    Results0 = filter(Db, Value, Filter),
-    Results = [Result || Result <- Results0, Result /= null],
-    Seq = case Value of
-        #full_doc_info{} ->
-            Value#full_doc_info.update_seq;
-        #doc_info{} ->
-            Value#doc_info.high_seq
-    end,
-    Go = if (Limit =< 1) andalso Results =/= [] -> stop; true -> ok end,
-    case Results of
-    [] ->
-        {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
-        case Done of
-        stop ->
-            {stop, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}};
-        ok ->
-            {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}}
-        end;
-    _ ->
-        if ResponseType =:= "continuous" orelse ResponseType =:= "eventsource" ->
-            ChangesRow = changes_row(Results, Value, Acc),
-            UserAcc2 = Callback({change, ChangesRow, <<>>}, ResponseType, UserAcc),
-            reset_heartbeat(),
-            {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2, limit = Limit - 1}};
-        true ->
-            ChangesRow = changes_row(Results, Value, Acc),
-            UserAcc2 = Callback({change, ChangesRow, Prepend}, ResponseType, UserAcc),
-            reset_heartbeat(),
-            {Go, Acc#changes_acc{
-                seq = Seq, prepend = <<",\n">>,
-                user_acc = UserAcc2, limit = Limit - 1}}
-        end
-    end.
-
-
-
-changes_row(Results, #full_doc_info{} = FDI, Acc) ->
-    changes_row(Results, couch_doc:to_doc_info(FDI), Acc);
-changes_row(Results, DocInfo, Acc0) ->
-    Acc = maybe_upgrade_changes_acc(Acc0),
-    #doc_info{
-        id = Id, high_seq = Seq, revs = [#rev_info{deleted = Del} | _]
-    } = DocInfo,
-    {[{<<"seq">>, Seq}, {<<"id">>, Id}, {<<"changes">>, Results}] ++
-        deleted_item(Del) ++ maybe_get_changes_doc(DocInfo, Acc)}.
-
-maybe_get_changes_doc(Value, #changes_acc{include_docs=true}=Acc) ->
-    #changes_acc{
-        db = Db,
-        doc_options = DocOpts,
-        conflicts = Conflicts,
-        filter = Filter
-    } = Acc,
-    Opts = case Conflicts of
-               true -> [deleted, conflicts];
-               false -> [deleted]
-           end,
-    load_doc(Db, Value, Opts, DocOpts, Filter);
-
-maybe_get_changes_doc(_Value, _Acc) ->
-    [].
-
-
-load_doc(Db, Value, Opts, DocOpts, Filter) ->
-    case couch_index_util:load_doc(Db, Value, Opts) of
-        null ->
-            [{doc, null}];
-        Doc ->
-            [{doc, doc_to_json(Doc, DocOpts, Filter)}]
-    end.
-
-
-doc_to_json(Doc, DocOpts, {selector, _Style, {_Selector, Fields}})
-    when Fields =/= nil ->
-    mango_fields:extract(couch_doc:to_json_obj(Doc, DocOpts), Fields);
-doc_to_json(Doc, DocOpts, _Filter) ->
-    couch_doc:to_json_obj(Doc, DocOpts).
-
-
-deleted_item(true) -> [{<<"deleted">>, true}];
-deleted_item(_) -> [].
-
-% waits for a updated msg, if there are multiple msgs, collects them.
-wait_updated(Timeout, TimeoutFun, UserAcc) ->
-    receive
-    updated ->
-        get_rest_updated(UserAcc);
-    deleted ->
-        {stop, UserAcc}
-    after Timeout ->
-        {Go, UserAcc2} = TimeoutFun(UserAcc),
-        case Go of
-        ok ->
-            ?MODULE:wait_updated(Timeout, TimeoutFun, UserAcc2);
-        stop ->
-            {stop, UserAcc2}
-        end
-    end.
-
-get_rest_updated(UserAcc) ->
-    receive
-    updated ->
-        get_rest_updated(UserAcc)
-    after 0 ->
-        {updated, UserAcc}
-    end.
-
-reset_heartbeat() ->
-    case get(last_changes_heartbeat) of
-    undefined ->
-        ok;
-    _ ->
-        put(last_changes_heartbeat, os:timestamp())
-    end.
-
-maybe_heartbeat(Timeout, TimeoutFun, Acc) ->
-    Before = get(last_changes_heartbeat),
-    case Before of
-    undefined ->
-        {ok, Acc};
-    _ ->
-        Now = os:timestamp(),
-        case timer:now_diff(Now, Before) div 1000 >= Timeout of
-        true ->
-            Acc2 = TimeoutFun(Acc),
-            put(last_changes_heartbeat, Now),
-            Acc2;
-        false ->
-            {ok, Acc}
-        end
-    end.
-
-
-maybe_upgrade_changes_acc(#changes_acc{} = Acc) ->
-    Acc;
-maybe_upgrade_changes_acc(Acc) when tuple_size(Acc) == 19 ->
-    #changes_acc{
-        db = element(2, Acc),
-        seq = element(6, Acc),
-        prepend = element(7, Acc),
-        filter = element(8, Acc),
-        callback = element(9, Acc),
-        user_acc = element(10, Acc),
-        resp_type = element(11, Acc),
-        limit = element(12, Acc),
-        include_docs = element(13, Acc),
-        doc_options = element(14, Acc),
-        conflicts = element(15, Acc),
-        timeout = element(16, Acc),
-        timeout_fun = element(17, Acc),
-        aggregation_kvs = element(18, Acc),
-        aggregation_results = element(19, Acc)
-    }.
diff --git a/src/couch/src/couch_compress.erl b/src/couch/src/couch_compress.erl
deleted file mode 100644
index cfcc2a4..0000000
--- a/src/couch/src/couch_compress.erl
+++ /dev/null
@@ -1,99 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_compress).
-
--export([compress/2, decompress/1, is_compressed/2]).
--export([get_compression_method/0]).
--export([uncompressed_size/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
-% binaries compressed with snappy have their first byte set to this value
--define(SNAPPY_PREFIX, 1).
-% Term prefixes documented at:
-%      http://www.erlang.org/doc/apps/erts/erl_ext_dist.html
--define(TERM_PREFIX, 131).
--define(COMPRESSED_TERM_PREFIX, 131, 80).
-
-
-get_compression_method() ->
-    case config:get("couchdb", "file_compression") of
-    undefined ->
-        ?DEFAULT_COMPRESSION;
-    Method1 ->
-        case string:tokens(Method1, "_") of
-        [Method] ->
-            list_to_existing_atom(Method);
-        [Method, Level] ->
-            {list_to_existing_atom(Method), list_to_integer(Level)}
-        end
-    end.
-
-
-compress(<<?SNAPPY_PREFIX, _/binary>> = Bin, snappy) ->
-    Bin;
-compress(<<?SNAPPY_PREFIX, _/binary>> = Bin, Method) ->
-    compress(decompress(Bin), Method);
-compress(<<?COMPRESSED_TERM_PREFIX, _/binary>> = Bin, {deflate, _Level}) ->
-    Bin;
-compress(<<?TERM_PREFIX, _/binary>> = Bin, Method) ->
-    compress(decompress(Bin), Method);
-compress(Term, none) ->
-    ?term_to_bin(Term);
-compress(Term, {deflate, Level}) ->
-    term_to_binary(Term, [{minor_version, 1}, {compressed, Level}]);
-compress(Term, snappy) ->
-    Bin = ?term_to_bin(Term),
-    try
-        {ok, CompressedBin} = snappy:compress(Bin),
-        <<?SNAPPY_PREFIX, CompressedBin/binary>>
-    catch exit:snappy_nif_not_loaded ->
-        Bin
-    end.
-
-
-decompress(<<?SNAPPY_PREFIX, Rest/binary>>) ->
-    {ok, TermBin} = snappy:decompress(Rest),
-    binary_to_term(TermBin);
-decompress(<<?TERM_PREFIX, _/binary>> = Bin) ->
-    binary_to_term(Bin);
-decompress(_) ->
-    error(invalid_compression).
-
-
-is_compressed(<<?SNAPPY_PREFIX, _/binary>>, Method) ->
-    Method =:= snappy;
-is_compressed(<<?COMPRESSED_TERM_PREFIX, _/binary>>, {deflate, _Level}) ->
-    true;
-is_compressed(<<?COMPRESSED_TERM_PREFIX, _/binary>>, _Method) ->
-    false;
-is_compressed(<<?TERM_PREFIX, _/binary>>, Method) ->
-    Method =:= none;
-is_compressed(Term, _Method) when not is_binary(Term) ->
-    false;
-is_compressed(_, _) ->
-    error(invalid_compression).
-
-
-uncompressed_size(<<?SNAPPY_PREFIX, Rest/binary>>) ->
-    {ok, Size} = snappy:uncompressed_length(Rest),
-    Size;
-uncompressed_size(<<?COMPRESSED_TERM_PREFIX, Size:32, _/binary>> = _Bin) ->
-    % See http://erlang.org/doc/apps/erts/erl_ext_dist.html
-    % The uncompressed binary would be encoded with <<131, Rest/binary>>
-    % so need to add 1 for 131
-    Size + 1;
-uncompressed_size(<<?TERM_PREFIX, _/binary>> = Bin) ->
-    byte_size(Bin);
-uncompressed_size(_) ->
-    error(invalid_compression).
diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl
deleted file mode 100644
index 6587205..0000000
--- a/src/couch/src/couch_db.erl
+++ /dev/null
@@ -1,2086 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db).
-
--export([
-    create/2,
-    open/2,
-    open_int/2,
-    incref/1,
-    reopen/1,
-    close/1,
-
-    clustered_db/2,
-    clustered_db/3,
-
-    monitor/1,
-    monitored_by/1,
-    is_idle/1,
-
-    is_admin/1,
-    check_is_admin/1,
-    check_is_member/1,
-
-    name/1,
-    get_after_doc_read_fun/1,
-    get_before_doc_update_fun/1,
-    get_committed_update_seq/1,
-    get_compacted_seq/1,
-    get_compactor_pid/1,
-    get_compactor_pid_sync/1,
-    get_db_info/1,
-    get_partition_info/2,
-    get_del_doc_count/1,
-    get_doc_count/1,
-    get_epochs/1,
-    get_filepath/1,
-    get_instance_start_time/1,
-    get_pid/1,
-    get_revs_limit/1,
-    get_security/1,
-    get_update_seq/1,
-    get_user_ctx/1,
-    get_uuid/1,
-    get_purge_seq/1,
-    get_oldest_purge_seq/1,
-    get_purge_infos_limit/1,
-
-    is_db/1,
-    is_system_db/1,
-    is_clustered/1,
-    is_system_db_name/1,
-    is_partitioned/1,
-
-    set_revs_limit/2,
-    set_purge_infos_limit/2,
-    set_security/2,
-    set_user_ctx/2,
-
-    load_validation_funs/1,
-    reload_validation_funs/1,
-
-    open_doc/2,
-    open_doc/3,
-    open_doc_revs/4,
-    open_doc_int/3,
-    get_doc_info/2,
-    get_full_doc_info/2,
-    get_full_doc_infos/2,
-    get_missing_revs/2,
-    get_design_doc/2,
-    get_design_docs/1,
-    get_design_doc_count/1,
-    get_purge_infos/2,
-
-    get_minimum_purge_seq/1,
-    purge_client_exists/3,
-
-    validate_docid/2,
-    doc_from_json_obj_validate/2,
-
-    update_doc/3,
-    update_doc/4,
-    update_docs/4,
-    update_docs/2,
-    update_docs/3,
-    delete_doc/3,
-
-    purge_docs/2,
-    purge_docs/3,
-
-    with_stream/3,
-    open_write_stream/2,
-    open_read_stream/2,
-    is_active_stream/2,
-
-    fold_docs/3,
-    fold_docs/4,
-    fold_local_docs/4,
-    fold_design_docs/4,
-    fold_changes/4,
-    fold_changes/5,
-    count_changes_since/2,
-    fold_purge_infos/4,
-    fold_purge_infos/5,
-
-    calculate_start_seq/3,
-    owner_of/2,
-
-    start_compact/1,
-    cancel_compact/1,
-    wait_for_compaction/1,
-    wait_for_compaction/2,
-
-    dbname_suffix/1,
-    normalize_dbname/1,
-    validate_dbname/1,
-
-    make_doc/5,
-    new_revid/1
-]).
-
-
--export([
-    start_link/4
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_db_int.hrl").
-
--define(DBNAME_REGEX,
-    "^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*" % use the stock CouchDB regex
-    "(\\.[0-9]{10,})?$" % but allow an optional shard timestamp at the end
-).
-
-start_link(Engine, DbName, Filepath, Options) ->
-    Arg = {Engine, DbName, Filepath, Options},
-    proc_lib:start_link(couch_db_updater, init, [Arg]).
-
-create(DbName, Options) ->
-    couch_server:create(DbName, Options).
-
-% this is for opening a database for internal purposes like the replicator
-% or the view indexer. it never throws a reader error.
-open_int(DbName, Options) ->
-    couch_server:open(DbName, Options).
-
-% this should be called anytime an http request opens the database.
-% it ensures that the http userCtx is a valid reader
-open(DbName, Options) ->
-    case couch_server:open(DbName, Options) of
-        {ok, Db} ->
-            try
-                check_is_member(Db),
-                {ok, Db}
-            catch
-                throw:Error ->
-                    close(Db),
-                    throw(Error)
-            end;
-        Else -> Else
-    end.
-
-
-reopen(#db{} = Db) ->
-    % We could have just swapped out the storage engine
-    % for this database during a compaction so we just
-    % reimplement this as a close/open pair now.
-    try
-        open(Db#db.name, [{user_ctx, Db#db.user_ctx} | Db#db.options])
-    after
-        close(Db)
-    end.
-
-
-% You shouldn't call this. Its part of the ref counting between
-% couch_server and couch_db instances.
-incref(#db{} = Db) ->
-    couch_db_engine:incref(Db).
-
-clustered_db(DbName, Options) when is_list(Options) ->
-    UserCtx = couch_util:get_value(user_ctx, Options, #user_ctx{}),
-    SecProps = couch_util:get_value(security, Options, []),
-    Props = couch_util:get_value(props, Options, []),
-    {ok, #db{
-        name = DbName,
-        user_ctx = UserCtx,
-        security = SecProps,
-        options = [{props, Props}]
-    }};
-
-clustered_db(DbName, #user_ctx{} = UserCtx) ->
-    clustered_db(DbName, [{user_ctx, UserCtx}]).
-
-clustered_db(DbName, UserCtx, SecProps) ->
-    clustered_db(DbName, [{user_ctx, UserCtx}, {security, SecProps}]).
-
-is_db(#db{}) ->
-    true;
-is_db(_) ->
-    false.
-
-is_system_db(#db{options = Options}) ->
-    lists:member(sys_db, Options).
-
-is_clustered(#{}) ->
-    true;
-is_clustered(#db{main_pid = nil}) ->
-    true;
-is_clustered(#db{}) ->
-    false;
-is_clustered(?OLD_DB_REC = Db) ->
-    ?OLD_DB_MAIN_PID(Db) == undefined.
-
-is_partitioned(#db{options = Options}) ->
-    Props = couch_util:get_value(props, Options, []),
-    couch_util:get_value(partitioned, Props, false).
-
-close(#db{} = Db) ->
-    ok = couch_db_engine:decref(Db);
-close(?OLD_DB_REC) ->
-    ok.
-
-is_idle(#db{compactor_pid=nil} = Db) ->
-    monitored_by(Db) == [];
-is_idle(_Db) ->
-    false.
-
-monitored_by(Db) ->
-    case couch_db_engine:monitored_by(Db) of
-        Pids when is_list(Pids) ->
-            PidTracker = whereis(couch_stats_process_tracker),
-            Pids -- [Db#db.main_pid, PidTracker];
-        undefined ->
-            []
-    end.
-
-
-monitor(#db{main_pid=MainPid}) ->
-    erlang:monitor(process, MainPid).
-
-start_compact(#db{} = Db) ->
-    gen_server:call(Db#db.main_pid, start_compact).
-
-cancel_compact(#db{main_pid=Pid}) ->
-    gen_server:call(Pid, cancel_compact).
-
-wait_for_compaction(Db) ->
-    wait_for_compaction(Db, infinity).
-
-wait_for_compaction(#db{main_pid=Pid}=Db, Timeout) ->
-    Start = os:timestamp(),
-    case gen_server:call(Pid, compactor_pid) of
-        CPid when is_pid(CPid) ->
-            Ref = erlang:monitor(process, CPid),
-            receive
-                {'DOWN', Ref, _, _, normal} when Timeout == infinity ->
-                    wait_for_compaction(Db, Timeout);
-                {'DOWN', Ref, _, _, normal} ->
-                    Elapsed = timer:now_diff(os:timestamp(), Start) div 1000,
-                    wait_for_compaction(Db, Timeout - Elapsed);
-                {'DOWN', Ref, _, _, Reason} ->
-                    {error, Reason}
-            after Timeout ->
-                erlang:demonitor(Ref, [flush]),
-                {error, Timeout}
-            end;
-        _ ->
-            ok
-    end.
-
-delete_doc(Db, Id, Revisions) ->
-    DeletedDocs = [#doc{id=Id, revs=[Rev], deleted=true} || Rev <- Revisions],
-    {ok, [Result]} = update_docs(Db, DeletedDocs, []),
-    {ok, Result}.
-
-open_doc(Db, IdOrDocInfo) ->
-    open_doc(Db, IdOrDocInfo, []).
-
-open_doc(Db, Id, Options) ->
-    increment_stat(Db, [couchdb, database_reads]),
-    case open_doc_int(Db, Id, Options) of
-    {ok, #doc{deleted=true}=Doc} ->
-        case lists:member(deleted, Options) of
-        true ->
-            apply_open_options({ok, Doc},Options);
-        false ->
-            {not_found, deleted}
-        end;
-    Else ->
-        apply_open_options(Else,Options)
-    end.
-
-apply_open_options({ok, Doc},Options) ->
-    apply_open_options2(Doc,Options);
-apply_open_options(Else,_Options) ->
-    Else.
-
-apply_open_options2(Doc,[]) ->
-    {ok, Doc};
-apply_open_options2(#doc{atts=Atts0,revs=Revs}=Doc,
-        [{atts_since, PossibleAncestors}|Rest]) ->
-    RevPos = find_ancestor_rev_pos(Revs, PossibleAncestors),
-    Atts = lists:map(fun(Att) ->
-        [AttPos, Data] = couch_att:fetch([revpos, data], Att),
-        if  AttPos > RevPos -> couch_att:store(data, Data, Att);
-            true -> couch_att:store(data, stub, Att)
-        end
-    end, Atts0),
-    apply_open_options2(Doc#doc{atts=Atts}, Rest);
-apply_open_options2(Doc, [ejson_body | Rest]) ->
-    apply_open_options2(couch_doc:with_ejson_body(Doc), Rest);
-apply_open_options2(Doc,[_|Rest]) ->
-    apply_open_options2(Doc,Rest).
-
-
-find_ancestor_rev_pos({_, []}, _AttsSinceRevs) ->
-    0;
-find_ancestor_rev_pos(_DocRevs, []) ->
-    0;
-find_ancestor_rev_pos({RevPos, [RevId|Rest]}, AttsSinceRevs) ->
-    case lists:member({RevPos, RevId}, AttsSinceRevs) of
-    true ->
-        RevPos;
-    false ->
-        find_ancestor_rev_pos({RevPos - 1, Rest}, AttsSinceRevs)
-    end.
-
-open_doc_revs(Db, Id, Revs, Options) ->
-    increment_stat(Db, [couchdb, database_reads]),
-    [{ok, Results}] = open_doc_revs_int(Db, [{Id, Revs}], Options),
-    {ok, [apply_open_options(Result, Options) || Result <- Results]}.
-
-% Each returned result is a list of tuples:
-% {Id, MissingRevs, PossibleAncestors}
-% if no revs are missing, it's omitted from the results.
-get_missing_revs(Db, IdRevsList) ->
-    Results = get_full_doc_infos(Db, [Id1 || {Id1, _Revs} <- IdRevsList]),
-    {ok, find_missing(IdRevsList, Results)}.
-
-find_missing([], []) ->
-    [];
-find_missing([{Id, Revs}|RestIdRevs], [FullInfo | RestLookupInfo])
-        when is_record(FullInfo, full_doc_info) ->
-    case couch_key_tree:find_missing(FullInfo#full_doc_info.rev_tree, Revs) of
-    [] ->
-        find_missing(RestIdRevs, RestLookupInfo);
-    MissingRevs ->
-        #doc_info{revs=RevsInfo} = couch_doc:to_doc_info(FullInfo),
-        LeafRevs = [Rev || #rev_info{rev=Rev} <- RevsInfo],
-        % Find the revs that are possible parents of this rev
-        PossibleAncestors =
-        lists:foldl(fun({LeafPos, LeafRevId}, Acc) ->
-            % this leaf is a "possible ancenstor" of the missing
-            % revs if this LeafPos lessthan any of the missing revs
-            case lists:any(fun({MissingPos, _}) ->
-                    LeafPos < MissingPos end, MissingRevs) of
-            true ->
-                [{LeafPos, LeafRevId} | Acc];
-            false ->
-                Acc
-            end
-        end, [], LeafRevs),
-        [{Id, MissingRevs, PossibleAncestors} |
-                find_missing(RestIdRevs, RestLookupInfo)]
-    end;
-find_missing([{Id, Revs}|RestIdRevs], [not_found | RestLookupInfo]) ->
-    [{Id, Revs, []} | find_missing(RestIdRevs, RestLookupInfo)].
-
-get_doc_info(Db, Id) ->
-    case get_full_doc_info(Db, Id) of
-    #full_doc_info{} = FDI ->
-        {ok, couch_doc:to_doc_info(FDI)};
-    Else ->
-        Else
-    end.
-
-get_full_doc_info(Db, Id) ->
-    [Result] = get_full_doc_infos(Db, [Id]),
-    Result.
-
-get_full_doc_infos(Db, Ids) ->
-    couch_db_engine:open_docs(Db, Ids).
-
-purge_docs(Db, IdRevs) ->
-    purge_docs(Db, IdRevs, []).
-
--spec purge_docs(#db{}, [{UUId, Id, [Rev]}], [PurgeOption]) ->
-    {ok, [Reply]} when
-    UUId :: binary(),
-    Id :: binary() | list(),
-    Rev :: {non_neg_integer(), binary()},
-    PurgeOption :: interactive_edit | replicated_changes,
-    Reply :: {ok, []} | {ok, [Rev]}.
-purge_docs(#db{main_pid = Pid} = Db, UUIDsIdsRevs, Options) ->
-    UUIDsIdsRevs2 = [{UUID, couch_util:to_binary(Id), Revs}
-        || {UUID, Id, Revs}  <- UUIDsIdsRevs],
-    % Check here if any UUIDs already exist when
-    % we're not replicating purge infos
-    IsRepl = lists:member(replicated_changes, Options),
-    if IsRepl -> ok; true ->
-        UUIDs = [UUID || {UUID, _, _} <- UUIDsIdsRevs2],
-        lists:foreach(fun(Resp) ->
-            if Resp == not_found -> ok; true ->
-                Fmt = "Duplicate purge info UIUD: ~s",
-                Reason = io_lib:format(Fmt, [element(2, Resp)]),
-                throw({badreq, Reason})
-            end
-        end, get_purge_infos(Db, UUIDs))
-    end,
-    increment_stat(Db, [couchdb, database_purges]),
-    gen_server:call(Pid, {purge_docs, UUIDsIdsRevs2, Options}).
-
--spec get_purge_infos(#db{}, [UUId]) -> [PurgeInfo] when
-    UUId :: binary(),
-    PurgeInfo :: {PurgeSeq, UUId, Id, [Rev]} | not_found,
-    PurgeSeq :: non_neg_integer(),
-    Id :: binary(),
-    Rev :: {non_neg_integer(), binary()}.
-get_purge_infos(Db, UUIDs) ->
-    couch_db_engine:load_purge_infos(Db, UUIDs).
-
-
-get_minimum_purge_seq(#db{} = Db) ->
-    PurgeSeq = couch_db_engine:get_purge_seq(Db),
-    OldestPurgeSeq = couch_db_engine:get_oldest_purge_seq(Db),
-    PurgeInfosLimit = couch_db_engine:get_purge_infos_limit(Db),
-
-    FoldFun = fun(#doc{id = DocId, body = {Props}}, SeqAcc) ->
-        case DocId of
-            <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> ->
-                ClientSeq = couch_util:get_value(<<"purge_seq">>, Props),
-                DbName = couch_db:name(Db),
-                % If there's a broken doc we have to keep every
-                % purge info until the doc is fixed or removed.
-                Fmt = "Invalid purge doc '~s' on ~p with purge_seq '~w'",
-                case ClientSeq of
-                    CS when is_integer(CS), CS >= PurgeSeq - PurgeInfosLimit ->
-                        {ok, SeqAcc};
-                    CS when is_integer(CS) ->
-                        case purge_client_exists(DbName, DocId, Props) of
-                            true ->
-                                {ok, erlang:min(CS, SeqAcc)};
-                            false ->
-                                couch_log:error(Fmt, [DocId, DbName, ClientSeq]),
-                                {ok, SeqAcc}
-                        end;
-                    _ ->
-                        couch_log:error(Fmt, [DocId, DbName, ClientSeq]),
-                        {ok, erlang:min(OldestPurgeSeq, SeqAcc)}
-                end;
-            _ ->
-                {stop, SeqAcc}
-        end
-    end,
-    InitMinSeq = PurgeSeq - PurgeInfosLimit,
-    Opts = [
-        {start_key, list_to_binary(?LOCAL_DOC_PREFIX ++ "purge-")}
-    ],
-    {ok, MinIdxSeq} = couch_db:fold_local_docs(Db, FoldFun, InitMinSeq, Opts),
-    FinalSeq = case MinIdxSeq < PurgeSeq - PurgeInfosLimit of
-        true -> MinIdxSeq;
-        false -> erlang:max(0, PurgeSeq - PurgeInfosLimit)
-    end,
-    % Log a warning if we've got a purge sequence exceeding the
-    % configured threshold.
-    if FinalSeq >= (PurgeSeq - PurgeInfosLimit) -> ok; true ->
-        Fmt = "The purge sequence for '~s' exceeds configured threshold",
-        couch_log:warning(Fmt, [couch_db:name(Db)])
-    end,
-    FinalSeq.
-
-
-purge_client_exists(DbName, DocId, Props) ->
-    % Warn about clients that have not updated their purge
-    % checkpoints in the last "index_lag_warn_seconds"
-    LagWindow = config:get_integer(
-            "purge", "index_lag_warn_seconds", 86400), % Default 24 hours
-
-    {Mega, Secs, _} = os:timestamp(),
-    NowSecs = Mega * 1000000 + Secs,
-    LagThreshold = NowSecs - LagWindow,
-
-    try
-        Exists = couch_db_plugin:is_valid_purge_client(DbName, Props),
-        if not Exists -> ok; true ->
-            Updated = couch_util:get_value(<<"updated_on">>, Props),
-            if is_integer(Updated) and Updated > LagThreshold -> ok; true ->
-                Diff = NowSecs - Updated,
-                Fmt1 = "Purge checkpoint '~s' not updated in ~p seconds
-                    in database ~p",
-                couch_log:error(Fmt1, [DocId, Diff, DbName])
-            end
-        end,
-        Exists
-    catch _:_ ->
-        % If we fail to check for a client we have to assume that
-        % it exists.
-        Fmt2 = "Failed to check purge checkpoint using
-            document '~p' in database ~p",
-        couch_log:error(Fmt2, [DocId, DbName]),
-        true
-    end.
-
-
-set_purge_infos_limit(#db{main_pid=Pid}=Db, Limit) when Limit > 0 ->
-    check_is_admin(Db),
-    gen_server:call(Pid, {set_purge_infos_limit, Limit}, infinity);
-set_purge_infos_limit(_Db, _Limit) ->
-    throw(invalid_purge_infos_limit).
-
-
-get_after_doc_read_fun(#db{after_doc_read = Fun}) ->
-    Fun.
-
-get_before_doc_update_fun(#db{before_doc_update = Fun}) ->
-    Fun.
-
-get_committed_update_seq(#db{committed_update_seq=Seq}) ->
-    Seq.
-
-get_update_seq(#db{} = Db)->
-    couch_db_engine:get_update_seq(Db).
-
-get_user_ctx(#db{user_ctx = UserCtx}) ->
-    UserCtx;
-get_user_ctx(?OLD_DB_REC = Db) ->
-    ?OLD_DB_USER_CTX(Db).
-
-get_purge_seq(#db{}=Db) ->
-    couch_db_engine:get_purge_seq(Db).
-
-get_oldest_purge_seq(#db{}=Db) ->
-    couch_db_engine:get_oldest_purge_seq(Db).
-
-get_purge_infos_limit(#db{}=Db) ->
-    couch_db_engine:get_purge_infos_limit(Db).
-
-get_pid(#db{main_pid = Pid}) ->
-    Pid.
-
-get_del_doc_count(Db) ->
-    {ok, couch_db_engine:get_del_doc_count(Db)}.
-
-get_doc_count(Db) ->
-    {ok, couch_db_engine:get_doc_count(Db)}.
-
-get_uuid(#db{}=Db) ->
-    couch_db_engine:get_uuid(Db).
-
-get_epochs(#db{}=Db) ->
-    Epochs = couch_db_engine:get_epochs(Db),
-    validate_epochs(Epochs),
-    Epochs.
-
-get_filepath(#db{filepath = FilePath}) ->
-    FilePath.
-
-get_instance_start_time(#db{instance_start_time = IST}) ->
-    IST.
-
-get_compacted_seq(#db{}=Db) ->
-    couch_db_engine:get_compacted_seq(Db).
-
-get_compactor_pid(#db{compactor_pid = Pid}) ->
-    Pid.
-
-get_compactor_pid_sync(#db{main_pid=Pid}=Db) ->
-    case gen_server:call(Pid, compactor_pid, infinity) of
-        CPid when is_pid(CPid) ->
-            CPid;
-        _ ->
-            nil
-    end.
-
-get_db_info(Db) ->
-    #db{
-        name = Name,
-        compactor_pid = Compactor,
-        instance_start_time = StartTime,
-        committed_update_seq = CommittedUpdateSeq
-    } = Db,
-    {ok, DocCount} = get_doc_count(Db),
-    {ok, DelDocCount} = get_del_doc_count(Db),
-    SizeInfo = couch_db_engine:get_size_info(Db),
-    DiskVersion = couch_db_engine:get_disk_version(Db),
-    Uuid = case get_uuid(Db) of
-        undefined -> null;
-        Uuid0 -> Uuid0
-    end,
-    CompactedSeq = case get_compacted_seq(Db) of
-        undefined -> null;
-        Else1 -> Else1
-    end,
-    Props = case couch_db_engine:get_props(Db) of
-        undefined -> null;
-        Else2 -> {Else2}
-    end,
-    InfoList = [
-        {db_name, Name},
-        {engine, couch_db_engine:get_engine(Db)},
-        {doc_count, DocCount},
-        {doc_del_count, DelDocCount},
-        {update_seq, get_update_seq(Db)},
-        {purge_seq, couch_db_engine:get_purge_seq(Db)},
-        {compact_running, Compactor /= nil},
-        {sizes, {SizeInfo}},
-        {instance_start_time, StartTime},
-        {disk_format_version, DiskVersion},
-        {committed_update_seq, CommittedUpdateSeq},
-        {compacted_seq, CompactedSeq},
-        {props, Props},
-        {uuid, Uuid}
-    ],
-    {ok, InfoList}.
-
-get_partition_info(#db{} = Db, Partition) when is_binary(Partition) ->
-    Info = couch_db_engine:get_partition_info(Db, Partition),
-    {ok, Info};
-get_partition_info(_Db, _Partition) ->
-    throw({bad_request, <<"`partition` is not valid">>}).
-
-
-get_design_doc(#db{name = <<"shards/", _/binary>> = ShardDbName}, DDocId0) ->
-    DDocId = couch_util:normalize_ddoc_id(DDocId0),
-    DbName = mem3:dbname(ShardDbName),
-    {_, Ref} = spawn_monitor(fun() ->
-        exit(fabric:open_doc(DbName, DDocId, []))
-    end),
-    receive {'DOWN', Ref, _, _, Response} ->
-        Response
-    end;
-get_design_doc(#db{} = Db, DDocId0) ->
-    DDocId = couch_util:normalize_ddoc_id(DDocId0),
-    couch_db:open_doc_int(Db, DDocId, [ejson_body]).
-
-get_design_docs(#db{name = <<"shards/", _/binary>> = ShardDbName}) ->
-    DbName = mem3:dbname(ShardDbName),
-    {_, Ref} = spawn_monitor(fun() -> exit(fabric:design_docs(DbName)) end),
-    receive {'DOWN', Ref, _, _, Response} ->
-        Response
-    end;
-get_design_docs(#db{} = Db) ->
-    FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
-    {ok, Docs} = fold_design_docs(Db, FoldFun, [], []),
-    {ok, lists:reverse(Docs)}.
-
-get_design_doc_count(#db{} = Db) ->
-    FoldFun = fun(_, Acc) -> {ok, Acc + 1} end,
-    fold_design_docs(Db, FoldFun, 0, []).
-
-check_is_admin(#db{user_ctx=UserCtx}=Db) ->
-    case is_admin(Db) of
-        true -> ok;
-        false ->
-            Reason = <<"You are not a db or server admin.">>,
-            throw_security_error(UserCtx, Reason)
-    end.
-
-check_is_member(#db{user_ctx=UserCtx}=Db) ->
-    case is_member(Db) of
-        true -> ok;
-        false -> throw_security_error(UserCtx)
-    end.
-
-is_admin(#db{user_ctx=UserCtx}=Db) ->
-    case couch_db_plugin:check_is_admin(Db) of
-        true -> true;
-        false ->
-            {Admins} = get_admins(Db),
-            is_authorized(UserCtx, Admins)
-    end.
-
-is_member(#db{user_ctx=UserCtx}=Db) ->
-    case is_admin(Db) of
-        true -> true;
-        false ->
-            case is_public_db(Db) of
-                true -> true;
-                false ->
-                    {Members} = get_members(Db),
-                    is_authorized(UserCtx, Members)
-            end
-    end.
-
-is_public_db(#db{}=Db) ->
-    {Members} = get_members(Db),
-    Names = couch_util:get_value(<<"names">>, Members, []),
-    Roles = couch_util:get_value(<<"roles">>, Members, []),
-    Names =:= [] andalso Roles =:= [].
-
-is_authorized(#user_ctx{name=UserName,roles=UserRoles}, Security) ->
-    Names = couch_util:get_value(<<"names">>, Security, []),
-    Roles = couch_util:get_value(<<"roles">>, Security, []),
-    case check_security(roles, UserRoles, [<<"_admin">> | Roles]) of
-        true -> true;
-        false -> check_security(names, UserName, Names)
-    end.
-
-check_security(roles, [], _) ->
-    false;
-check_security(roles, UserRoles, Roles) ->
-    UserRolesSet = ordsets:from_list(UserRoles),
-    RolesSet = ordsets:from_list(Roles),
-    not ordsets:is_disjoint(UserRolesSet, RolesSet);
-check_security(names, _, []) ->
-    false;
-check_security(names, null, _) ->
-    false;
-check_security(names, UserName, Names) ->
-    lists:member(UserName, Names).
-
-throw_security_error(#user_ctx{name=null}=UserCtx) ->
-    Reason = <<"You are not authorized to access this db.">>,
-    throw_security_error(UserCtx, Reason);
-throw_security_error(#user_ctx{name=_}=UserCtx) ->
-    Reason = <<"You are not allowed to access this db.">>,
-    throw_security_error(UserCtx, Reason).
-throw_security_error(#user_ctx{}=UserCtx, Reason) ->
-    Error = security_error_type(UserCtx),
-    throw({Error, Reason}).
-
-security_error_type(#user_ctx{name=null}) ->
-    unauthorized;
-security_error_type(#user_ctx{name=_}) ->
-    forbidden.
-
-
-get_admins(#db{security=SecProps}) ->
-    couch_util:get_value(<<"admins">>, SecProps, {[]}).
-
-get_members(#db{security=SecProps}) ->
-    % we fallback to readers here for backwards compatibility
-    couch_util:get_value(<<"members">>, SecProps,
-        couch_util:get_value(<<"readers">>, SecProps, {[]})).
-
-get_security(#db{security=SecProps}) ->
-    {SecProps};
-get_security(?OLD_DB_REC = Db) ->
-    {?OLD_DB_SECURITY(Db)}.
-
-set_security(#db{main_pid=Pid}=Db, {NewSecProps}) when is_list(NewSecProps) ->
-    check_is_admin(Db),
-    ok = validate_security_object(NewSecProps),
-    gen_server:call(Pid, {set_security, NewSecProps}, infinity);
-set_security(_, _) ->
-    throw(bad_request).
-
-set_user_ctx(#db{} = Db, UserCtx) ->
-    {ok, Db#db{user_ctx = UserCtx}}.
-
-validate_security_object(SecProps) ->
-    Admins = couch_util:get_value(<<"admins">>, SecProps, {[]}),
-    % we fallback to readers here for backwards compatibility
-    Members = couch_util:get_value(<<"members">>, SecProps,
-        couch_util:get_value(<<"readers">>, SecProps, {[]})),
-    ok = validate_names_and_roles(Admins),
-    ok = validate_names_and_roles(Members),
-    ok.
-
-% validate user input
-validate_names_and_roles({Props}) when is_list(Props) ->
-    case couch_util:get_value(<<"names">>, Props, []) of
-    Ns when is_list(Ns) ->
-            [throw("names must be a JSON list of strings") ||N <- Ns, not is_binary(N)],
-            Ns;
-    _ ->
-        throw("names must be a JSON list of strings")
-    end,
-    case couch_util:get_value(<<"roles">>, Props, []) of
-    Rs when is_list(Rs) ->
-        [throw("roles must be a JSON list of strings") ||R <- Rs, not is_binary(R)],
-        Rs;
-    _ ->
-        throw("roles must be a JSON list of strings")
-    end,
-    ok;
-validate_names_and_roles(_) ->
-    throw("admins or members must be a JSON list of strings").
-
-get_revs_limit(#db{} = Db) ->
-    couch_db_engine:get_revs_limit(Db).
-
-set_revs_limit(#db{main_pid=Pid}=Db, Limit) when Limit > 0 ->
-    check_is_admin(Db),
-    gen_server:call(Pid, {set_revs_limit, Limit}, infinity);
-set_revs_limit(_Db, _Limit) ->
-    throw(invalid_revs_limit).
-
-name(#db{name=Name}) ->
-    Name;
-name(?OLD_DB_REC = Db) ->
-    ?OLD_DB_NAME(Db).
-
-
-validate_docid(#db{} = Db, DocId) when is_binary(DocId) ->
-    couch_doc:validate_docid(DocId, name(Db)),
-    case is_partitioned(Db) of
-        true ->
-            couch_partition:validate_docid(DocId);
-        false ->
-            ok
-    end.
-
-
-doc_from_json_obj_validate(#db{} = Db, DocJson) ->
-    Doc = couch_doc:from_json_obj_validate(DocJson, name(Db)),
-    {Props} = DocJson,
-    case couch_util:get_value(<<"_id">>, Props) of
-        DocId when is_binary(DocId) ->
-            % Only validate the docid if it was provided
-            validate_docid(Db, DocId);
-        _ ->
-            ok
-    end,
-    Doc.
-
-
-update_doc(Db, Doc, Options) ->
-    update_doc(Db, Doc, Options, interactive_edit).
-
-update_doc(Db, Doc, Options, UpdateType) ->
-    case update_docs(Db, [Doc], Options, UpdateType) of
-    {ok, [{ok, NewRev}]} ->
-        {ok, NewRev};
-    {ok, [{{_Id, _Rev}, Error}]} ->
-        throw(Error);
-    {ok, [Error]} ->
-        throw(Error);
-    {ok, []} ->
-        % replication success
-        {Pos, [RevId | _]} = Doc#doc.revs,
-        {ok, {Pos, RevId}}
-    end.
-
-update_docs(Db, Docs) ->
-    update_docs(Db, Docs, []).
-
-% group_alike_docs groups the sorted documents into sublist buckets, by id.
-% ([DocA, DocA, DocB, DocC], []) -> [[DocA, DocA], [DocB], [DocC]]
-group_alike_docs(Docs) ->
-    % Here we're just asserting that our doc sort is stable so that
-    % if we have duplicate docids we don't have to worry about the
-    % behavior of lists:sort/2 which isn't documented anyhwere as
-    % being stable.
-    WithPos = lists:zip(Docs, lists:seq(1, length(Docs))),
-    SortFun = fun({D1, P1}, {D2, P2}) -> {D1#doc.id, P1} =< {D2#doc.id, P2} end,
-    SortedDocs = [D || {D, _} <- lists:sort(SortFun, WithPos)],
-    group_alike_docs(SortedDocs, []).
-
-group_alike_docs([], Buckets) ->
-    lists:reverse(lists:map(fun lists:reverse/1, Buckets));
-group_alike_docs([Doc|Rest], []) ->
-    group_alike_docs(Rest, [[Doc]]);
-group_alike_docs([Doc|Rest], [Bucket|RestBuckets]) ->
-    [#doc{id=BucketId}|_] = Bucket,
-    case Doc#doc.id == BucketId of
-    true ->
-        % add to existing bucket
-        group_alike_docs(Rest, [[Doc|Bucket]|RestBuckets]);
-    false ->
-        % add to new bucket
-       group_alike_docs(Rest, [[Doc]|[Bucket|RestBuckets]])
-    end.
-
-validate_doc_update(#db{}=Db, #doc{id= <<"_design/",_/binary>>}=Doc, _GetDiskDocFun) ->
-    case catch check_is_admin(Db) of
-        ok -> validate_ddoc(Db, Doc);
-        Error -> Error
-    end;
-validate_doc_update(#db{validate_doc_funs = undefined} = Db, Doc, Fun) ->
-    ValidationFuns = load_validation_funs(Db),
-    validate_doc_update(Db#db{validate_doc_funs=ValidationFuns}, Doc, Fun);
-validate_doc_update(#db{validate_doc_funs=[]}, _Doc, _GetDiskDocFun) ->
-    ok;
-validate_doc_update(_Db, #doc{id= <<"_local/",_/binary>>}, _GetDiskDocFun) ->
-    ok;
-validate_doc_update(Db, Doc, GetDiskDocFun) ->
-    case get(io_priority) of
-        {internal_repl, _} ->
-            ok;
-        _ ->
-            validate_doc_update_int(Db, Doc, GetDiskDocFun)
-    end.
-
-validate_ddoc(Db, DDoc) ->
-    try
-        ok = couch_mrview:validate(Db, couch_doc:with_ejson_body(DDoc))
-    catch
-        throw:{invalid_design_doc, Reason} ->
-            {bad_request, invalid_design_doc, Reason};
-        throw:{compilation_error, Reason} ->
-            {bad_request, compilation_error, Reason};
-        throw:Error ->
-            Error
-    end.
-
-validate_doc_update_int(Db, Doc, GetDiskDocFun) ->
-    Fun = fun() ->
-        DiskDoc = GetDiskDocFun(),
-        JsonCtx = couch_util:json_user_ctx(Db),
-        SecObj = get_security(Db),
-        try
-            [case Fun(Doc, DiskDoc, JsonCtx, SecObj) of
-                ok -> ok;
-                Error -> throw(Error)
-             end || Fun <- Db#db.validate_doc_funs],
-            ok
-        catch
-            throw:Error ->
-                Error
-        end
-    end,
-    couch_stats:update_histogram([couchdb, query_server, vdu_process_time],
-                                 Fun).
-
-
-% to be safe, spawn a middleman here
-load_validation_funs(#db{main_pid=Pid, name = <<"shards/", _/binary>>}=Db) ->
-    {_, Ref} = spawn_monitor(fun() ->
-        exit(ddoc_cache:open(mem3:dbname(Db#db.name), validation_funs))
-    end),
-    receive
-        {'DOWN', Ref, _, _, {ok, Funs}} ->
-            gen_server:cast(Pid, {load_validation_funs, Funs}),
-            Funs;
-        {'DOWN', Ref, _, _, {database_does_not_exist, _StackTrace}} ->
-            ok = couch_server:close_db_if_idle(Db#db.name),
-            erlang:error(database_does_not_exist);
-        {'DOWN', Ref, _, _, Reason} ->
-            couch_log:error("could not load validation funs ~p", [Reason]),
-            throw(internal_server_error)
-    end;
-load_validation_funs(#db{main_pid=Pid}=Db) ->
-    {ok, DDocInfos} = get_design_docs(Db),
-    OpenDocs = fun
-        (#full_doc_info{}=D) ->
-            {ok, Doc} = open_doc_int(Db, D, [ejson_body]),
-            Doc
-    end,
-    DDocs = lists:map(OpenDocs, DDocInfos),
-    Funs = lists:flatmap(fun(DDoc) ->
-        case couch_doc:get_validate_doc_fun(DDoc) of
-            nil -> [];
-            Fun -> [Fun]
-        end
-    end, DDocs),
-    gen_server:cast(Pid, {load_validation_funs, Funs}),
-    Funs.
-
-reload_validation_funs(#db{} = Db) ->
-    gen_server:cast(Db#db.main_pid, {load_validation_funs, undefined}).
-
-prep_and_validate_update(Db, #doc{id=Id,revs={RevStart, Revs}}=Doc,
-        OldFullDocInfo, LeafRevsDict, AllowConflict) ->
-    case Revs of
-    [PrevRev|_] ->
-        case dict:find({RevStart, PrevRev}, LeafRevsDict) of
-        {ok, {#leaf{deleted=Deleted, ptr=DiskSp}, DiskRevs}} ->
-            case couch_doc:has_stubs(Doc) of
-            true ->
-                DiskDoc = make_doc(Db, Id, Deleted, DiskSp, DiskRevs),
-                Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
-                {validate_doc_update(Db, Doc2, fun() -> DiskDoc end), Doc2};
-            false ->
-                LoadDiskDoc = fun() -> make_doc(Db,Id,Deleted,DiskSp,DiskRevs) end,
-                {validate_doc_update(Db, Doc, LoadDiskDoc), Doc}
-            end;
-        error when AllowConflict ->
-            couch_doc:merge_stubs(Doc, #doc{}), % will generate error if
-                                                        % there are stubs
-            {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
-        error ->
-            {conflict, Doc}
-        end;
-    [] ->
-        % new doc, and we have existing revs.
-        % reuse existing deleted doc
-        if OldFullDocInfo#full_doc_info.deleted orelse AllowConflict ->
-            {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
-        true ->
-            {conflict, Doc}
-        end
-    end.
-
-
-
-prep_and_validate_updates(_Db, [], [], _AllowConflict, AccPrepped,
-        AccFatalErrors) ->
-    AccPrepped2 = lists:reverse(lists:map(fun lists:reverse/1, AccPrepped)),
-    {AccPrepped2, AccFatalErrors};
-prep_and_validate_updates(Db, [DocBucket|RestBuckets], [not_found|RestLookups],
-        AllowConflict, AccPrepped, AccErrors) ->
-    % no existing revs are known,
-    {PreppedBucket, AccErrors3} = lists:foldl(
-        fun(#doc{revs=Revs}=Doc, {AccBucket, AccErrors2}) ->
-            case couch_doc:has_stubs(Doc) of
-            true ->
-                couch_doc:merge_stubs(Doc, #doc{}); % will throw exception
-            false -> ok
-            end,
-            case Revs of
-            {0, []} ->
-                case validate_doc_update(Db, Doc, fun() -> nil end) of
-                ok ->
-                    {[Doc | AccBucket], AccErrors2};
-                Error ->
-                    {AccBucket, [{doc_tag(Doc), Error} | AccErrors2]}
-                end;
-            _ ->
-                % old revs specified but none exist, a conflict
-                {AccBucket, [{doc_tag(Doc), conflict} | AccErrors2]}
-            end
-        end,
-        {[], AccErrors}, DocBucket),
-
-    prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict,
-            [PreppedBucket | AccPrepped], AccErrors3);
-prep_and_validate_updates(Db, [DocBucket|RestBuckets],
-        [#full_doc_info{rev_tree=OldRevTree}=OldFullDocInfo|RestLookups],
-        AllowConflict, AccPrepped, AccErrors) ->
-    Leafs = couch_key_tree:get_all_leafs(OldRevTree),
-    LeafRevsDict = dict:from_list([
-        {{Start, RevId}, {Leaf, Revs}} ||
-        {Leaf, {Start, [RevId | _]} = Revs} <- Leafs
-    ]),
-    {PreppedBucket, AccErrors3} = lists:foldl(
-        fun(Doc, {Docs2Acc, AccErrors2}) ->
-            case prep_and_validate_update(Db, Doc, OldFullDocInfo,
-                    LeafRevsDict, AllowConflict) of
-            {ok, Doc2} ->
-                {[Doc2 | Docs2Acc], AccErrors2};
-            {Error, _} ->
-                % Record the error
-                {Docs2Acc, [{doc_tag(Doc), Error} |AccErrors2]}
-            end
-        end,
-        {[], AccErrors}, DocBucket),
-    prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict,
-            [PreppedBucket | AccPrepped], AccErrors3).
-
-
-update_docs(Db, Docs, Options) ->
-    update_docs(Db, Docs, Options, interactive_edit).
-
-
-prep_and_validate_replicated_updates(_Db, [], [], AccPrepped, AccErrors) ->
-    Errors2 = [{{Id, {Pos, Rev}}, Error} ||
-            {#doc{id=Id,revs={Pos,[Rev|_]}}, Error} <- AccErrors],
-    AccPrepped2 = lists:reverse(lists:map(fun lists:reverse/1, AccPrepped)),
-    {AccPrepped2, lists:reverse(Errors2)};
-prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldInfo], AccPrepped, AccErrors) ->
-    case OldInfo of
-    not_found ->
-        {ValidatedBucket, AccErrors3} = lists:foldl(
-            fun(Doc, {AccPrepped2, AccErrors2}) ->
-                case couch_doc:has_stubs(Doc) of
-                true ->
-                    couch_doc:merge_stubs(Doc, #doc{}); % will throw exception
-                false -> ok
-                end,
-                case validate_doc_update(Db, Doc, fun() -> nil end) of
-                ok ->
-                    {[Doc | AccPrepped2], AccErrors2};
-                Error ->
-                    {AccPrepped2, [{Doc, Error} | AccErrors2]}
-                end
-            end,
-            {[], AccErrors}, Bucket),
-        prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo, [ValidatedBucket | AccPrepped], AccErrors3);
-    #full_doc_info{rev_tree=OldTree} ->
-        OldLeafs = couch_key_tree:get_all_leafs_full(OldTree),
-        OldLeafsLU = [{Start, RevId} || {Start, [{RevId, _}|_]} <- OldLeafs],
-        NewPaths = lists:map(fun couch_doc:to_path/1, Bucket),
-        NewRevTree = couch_key_tree:multi_merge(OldTree, NewPaths),
-        Leafs = couch_key_tree:get_all_leafs_full(NewRevTree),
-        LeafRevsFullDict = dict:from_list( [{{Start, RevId}, FullPath} || {Start, [{RevId, _}|_]}=FullPath <- Leafs]),
-        {ValidatedBucket, AccErrors3} =
-        lists:foldl(
-            fun(#doc{id=Id,revs={Pos, [RevId|_]}}=Doc, {AccValidated, AccErrors2}) ->
-                IsOldLeaf = lists:member({Pos, RevId}, OldLeafsLU),
-                case dict:find({Pos, RevId}, LeafRevsFullDict) of
-                {ok, {Start, Path}} when not IsOldLeaf ->
-                    % our unflushed doc is a leaf node. Go back on the path
-                    % to find the previous rev that's on disk.
-
-                    LoadPrevRevFun = fun() ->
-                                make_first_doc_on_disk(Db,Id,Start-1, tl(Path))
-                            end,
-
-                    case couch_doc:has_stubs(Doc) of
-                    true ->
-                        DiskDoc = case LoadPrevRevFun() of
-                            #doc{} = DiskDoc0 ->
-                                DiskDoc0;
-                            _ ->
-                                % Force a missing_stub exception
-                                couch_doc:merge_stubs(Doc, #doc{})
-                        end,
-                        Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
-                        GetDiskDocFun = fun() -> DiskDoc end;
-                    false ->
-                        Doc2 = Doc,
-                        GetDiskDocFun = LoadPrevRevFun
-                    end,
-
-                    case validate_doc_update(Db, Doc2, GetDiskDocFun) of
-                    ok ->
-                        {[Doc2 | AccValidated], AccErrors2};
-                    Error ->
-                        {AccValidated, [{Doc, Error} | AccErrors2]}
-                    end;
-                _ ->
-                    % this doc isn't a leaf or already exists in the tree.
-                    % ignore but consider it a success.
-                    {AccValidated, AccErrors2}
-                end
-            end,
-            {[], AccErrors}, Bucket),
-        prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo,
-                [ValidatedBucket | AccPrepped], AccErrors3)
-    end.
-
-
-
-new_revid(#doc{body=Body, revs={OldStart,OldRevs}, atts=Atts, deleted=Deleted}) ->
-    DigestedAtts = lists:foldl(fun(Att, Acc) ->
-        [N, T, M] = couch_att:fetch([name, type, md5], Att),
-        case M == <<>> of
-            true -> Acc;
-            false -> [{N, T, M} | Acc]
-        end
-    end, [], Atts),
-    case DigestedAtts of
-        Atts2 when length(Atts) =/= length(Atts2) ->
-            % We must have old style non-md5 attachments
-            ?l2b(integer_to_list(couch_util:rand32()));
-        Atts2 ->
-            OldRev = case OldRevs of [] -> 0; [OldRev0|_] -> OldRev0 end,
-            couch_hash:md5_hash(term_to_binary([Deleted, OldStart, OldRev, Body, Atts2], [{minor_version, 1}]))
-    end.
-
-new_revs([], OutBuckets, IdRevsAcc) ->
-    {lists:reverse(OutBuckets), IdRevsAcc};
-new_revs([Bucket|RestBuckets], OutBuckets, IdRevsAcc) ->
-    {NewBucket, IdRevsAcc3} = lists:mapfoldl(
-        fun(#doc{revs={Start, RevIds}}=Doc, IdRevsAcc2)->
-        NewRevId = new_revid(Doc),
-        {Doc#doc{revs={Start+1, [NewRevId | RevIds]}},
-            [{doc_tag(Doc), {ok, {Start+1, NewRevId}}} | IdRevsAcc2]}
-    end, IdRevsAcc, Bucket),
-    new_revs(RestBuckets, [NewBucket|OutBuckets], IdRevsAcc3).
-
-check_dup_atts(#doc{atts=Atts}=Doc) ->
-    lists:foldl(fun(Att, Names) ->
-        Name = couch_att:fetch(name, Att),
-        case ordsets:is_element(Name, Names) of
-            true -> throw({bad_request, <<"Duplicate attachments">>});
-            false -> ordsets:add_element(Name, Names)
-        end
-    end, ordsets:new(), Atts),
-    Doc.
-
-tag_docs([]) ->
-    [];
-tag_docs([#doc{meta=Meta}=Doc | Rest]) ->
-    [Doc#doc{meta=[{ref, make_ref()} | Meta]} | tag_docs(Rest)].
-
-doc_tag(#doc{meta=Meta}) ->
-    case lists:keyfind(ref, 1, Meta) of
-        {ref, Ref} when is_reference(Ref) -> Ref;
-        false -> throw(doc_not_tagged);
-        Else -> throw({invalid_doc_tag, Else})
-    end.
-
-update_docs(Db, Docs0, Options, replicated_changes) ->
-    Docs = tag_docs(Docs0),
-
-    PrepValidateFun = fun(Db0, DocBuckets0, ExistingDocInfos) ->
-        prep_and_validate_replicated_updates(Db0, DocBuckets0,
-            ExistingDocInfos, [], [])
-    end,
-
-    {ok, DocBuckets, NonRepDocs, DocErrors}
-        = before_docs_update(Db, Docs, PrepValidateFun, replicated_changes),
-
-    DocBuckets2 = [[doc_flush_atts(Db, check_dup_atts(Doc))
-            || Doc <- Bucket] || Bucket <- DocBuckets],
-    {ok, _} = write_and_commit(Db, DocBuckets2,
-        NonRepDocs, [merge_conflicts | Options]),
-    {ok, DocErrors};
-
-update_docs(Db, Docs0, Options, interactive_edit) ->
-    Docs = tag_docs(Docs0),
-
-    AllOrNothing = lists:member(all_or_nothing, Options),
-    PrepValidateFun = fun(Db0, DocBuckets0, ExistingDocInfos) ->
-        prep_and_validate_updates(Db0, DocBuckets0, ExistingDocInfos,
-            AllOrNothing, [], [])
-    end,
-
-    {ok, DocBuckets, NonRepDocs, DocErrors}
-        = before_docs_update(Db, Docs, PrepValidateFun, interactive_edit),
-
-    if (AllOrNothing) and (DocErrors /= []) ->
-        RefErrorDict = dict:from_list([{doc_tag(Doc), Doc} || Doc <- Docs]),
-        {aborted, lists:map(fun({Ref, Error}) ->
-            #doc{id=Id,revs={Start,RevIds}} = dict:fetch(Ref, RefErrorDict),
-            case {Start, RevIds} of
-                {Pos, [RevId | _]} -> {{Id, {Pos, RevId}}, Error};
-                {0, []} -> {{Id, {0, <<>>}}, Error}
-            end
-        end, DocErrors)};
-    true ->
-        Options2 = if AllOrNothing -> [merge_conflicts];
-                true -> [] end ++ Options,
-        DocBuckets2 = [[
-                doc_flush_atts(Db, set_new_att_revpos(
-                        check_dup_atts(Doc)))
-                || Doc <- B] || B <- DocBuckets],
-        {DocBuckets3, IdRevs} = new_revs(DocBuckets2, [], []),
-
-        {ok, CommitResults} = write_and_commit(Db, DocBuckets3,
-            NonRepDocs, Options2),
-
-        ResultsDict = lists:foldl(fun({Key, Resp}, ResultsAcc) ->
-            dict:store(Key, Resp, ResultsAcc)
-        end, dict:from_list(IdRevs), CommitResults ++ DocErrors),
-        {ok, lists:map(fun(Doc) ->
-            dict:fetch(doc_tag(Doc), ResultsDict)
-        end, Docs)}
-    end.
-
-% Returns the first available document on disk. Input list is a full rev path
-% for the doc.
-make_first_doc_on_disk(_Db, _Id, _Pos, []) ->
-    nil;
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, #doc{}} | RestPath]) ->
-    make_first_doc_on_disk(Db, Id, Pos-1, RestPath);
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, ?REV_MISSING}|RestPath]) ->
-    make_first_doc_on_disk(Db, Id, Pos - 1, RestPath);
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, #leaf{deleted=IsDel, ptr=Sp}} |_]=DocPath) ->
-    Revs = [Rev || {Rev, _} <- DocPath],
-    make_doc(Db, Id, IsDel, Sp, {Pos, Revs}).
-
-collect_results_with_metrics(Pid, MRef, []) ->
-    Begin = os:timestamp(),
-    try
-        collect_results(Pid, MRef, [])
-    after
-        ResultsTime = timer:now_diff(os:timestamp(), Begin) div 1000,
-        couch_stats:update_histogram(
-            [couchdb, collect_results_time],
-            ResultsTime
-        )
-    end.
-
-collect_results(Pid, MRef, ResultsAcc) ->
-    receive
-    {result, Pid, Result} ->
-        collect_results(Pid, MRef, [Result | ResultsAcc]);
-    {done, Pid} ->
-        {ok, ResultsAcc};
-    {retry, Pid} ->
-        retry;
-    {'DOWN', MRef, _, _, Reason} ->
-        exit(Reason)
-    end.
-
-write_and_commit(#db{main_pid=Pid, user_ctx=Ctx}=Db, DocBuckets1,
-        NonRepDocs, Options) ->
-    DocBuckets = prepare_doc_summaries(Db, DocBuckets1),
-    MergeConflicts = lists:member(merge_conflicts, Options),
-    MRef = erlang:monitor(process, Pid),
-    try
-        Pid ! {update_docs, self(), DocBuckets, NonRepDocs, MergeConflicts},
-        case collect_results_with_metrics(Pid, MRef, []) of
-        {ok, Results} -> {ok, Results};
-        retry ->
-            % This can happen if the db file we wrote to was swapped out by
-            % compaction. Retry by reopening the db and writing to the current file
-            {ok, Db2} = open(Db#db.name, [{user_ctx, Ctx}]),
-            DocBuckets2 = [
-                [doc_flush_atts(Db2, Doc) || Doc <- Bucket] ||
-                Bucket <- DocBuckets1
-            ],
-            % We only retry once
-            DocBuckets3 = prepare_doc_summaries(Db2, DocBuckets2),
-            close(Db2),
-            Pid ! {update_docs, self(), DocBuckets3, NonRepDocs, MergeConflicts},
-            case collect_results_with_metrics(Pid, MRef, []) of
-            {ok, Results} -> {ok, Results};
-            retry -> throw({update_error, compaction_retry})
-            end
-        end
-    after
-        erlang:demonitor(MRef, [flush])
-    end.
-
-
-prepare_doc_summaries(Db, BucketList) ->
-    [lists:map(
-        fun(#doc{body = Body, atts = Atts} = Doc0) ->
-            DiskAtts = [couch_att:to_disk_term(Att) || Att <- Atts],
-            {ok, SizeInfo} = couch_att:size_info(Atts),
-            AttsStream = case Atts of
-                [Att | _] ->
-                    {stream, StreamEngine} = couch_att:fetch(data, Att),
-                    StreamEngine;
-                [] ->
-                    nil
-            end,
-            Doc1 = Doc0#doc{
-                atts = DiskAtts,
-                meta = [
-                    {size_info, SizeInfo},
-                    {atts_stream, AttsStream},
-                    {ejson_size, couch_ejson_size:encoded_size(Body)}
-                ] ++ Doc0#doc.meta
-            },
-            couch_db_engine:serialize_doc(Db, Doc1)
-        end,
-        Bucket) || Bucket <- BucketList].
-
-
-before_docs_update(#db{validate_doc_funs = VDFuns} = Db, Docs, PVFun, UpdateType) ->
-    increment_stat(Db, [couchdb, database_writes]),
-
-    % Separate _local docs from normal docs
-    IsLocal = fun
-        (#doc{id= <<?LOCAL_DOC_PREFIX, _/binary>>}) -> true;
-        (_) -> false
-    end,
-    {NonRepDocs, Docs2} = lists:partition(IsLocal, Docs),
-
-    BucketList = group_alike_docs(Docs2),
-
-    DocBuckets = lists:map(fun(Bucket) ->
-        lists:map(fun(Doc) ->
-            DocWithBody = couch_doc:with_ejson_body(Doc),
-            couch_db_plugin:before_doc_update(Db, DocWithBody, UpdateType)
-        end, Bucket)
-    end, BucketList),
-
-    ValidatePred = fun
-        (#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>}) -> true;
-        (#doc{atts = Atts}) -> Atts /= []
-    end,
-
-    case (VDFuns /= []) orelse lists:any(ValidatePred, Docs2) of
-        true ->
-            % lookup the doc by id and get the most recent
-            Ids = [Id || [#doc{id = Id} | _] <- DocBuckets],
-            ExistingDocs = get_full_doc_infos(Db, Ids),
-            {DocBuckets2, DocErrors} = PVFun(Db, DocBuckets, ExistingDocs),
-             % remove empty buckets
-            DocBuckets3 = [Bucket || Bucket <- DocBuckets2, Bucket /= []],
-            {ok, DocBuckets3, NonRepDocs, DocErrors};
-        false ->
-            {ok, DocBuckets, NonRepDocs, []}
-    end.
-
-
-set_new_att_revpos(#doc{revs={RevPos,_Revs},atts=Atts0}=Doc) ->
-    Atts = lists:map(
-        fun(Att) ->
-            case couch_att:fetch(data, Att) of
-                % already commited to disk, don't set new rev
-                {stream, _} -> Att;
-                {Fd, _} when is_pid(Fd) -> Att;
-                % write required so update RevPos
-                _ -> couch_att:store(revpos, RevPos+1, Att)
-            end
-        end, Atts0),
-    Doc#doc{atts = Atts}.
-
-
-doc_flush_atts(Db, Doc) ->
-    Doc#doc{atts=[couch_att:flush(Db, Att) || Att <- Doc#doc.atts]}.
-
-
-compressible_att_type(MimeType) when is_binary(MimeType) ->
-    compressible_att_type(?b2l(MimeType));
-compressible_att_type(MimeType) ->
-    TypeExpList = re:split(
-        config:get("attachments", "compressible_types", ""),
-        "\\s*,\\s*",
-        [{return, list}]
-    ),
-    lists:any(
-        fun(TypeExp) ->
-            Regexp = ["^\\s*", re:replace(TypeExp, "\\*", ".*"),
-                "(?:\\s*;.*?)?\\s*", $$],
-            re:run(MimeType, Regexp, [caseless]) =/= nomatch
-        end,
-        [T || T <- TypeExpList, T /= []]
-    ).
-
-% From RFC 2616 3.6.1 - Chunked Transfer Coding
-%
-%   In other words, the origin server is willing to accept
-%   the possibility that the trailer fields might be silently
-%   discarded along the path to the client.
-%
-% I take this to mean that if "Trailers: Content-MD5\r\n"
-% is present in the request, but there is no Content-MD5
-% trailer, we're free to ignore this inconsistency and
-% pretend that no Content-MD5 exists.
-with_stream(Db, Att, Fun) ->
-    [InMd5, Type, Enc] = couch_att:fetch([md5, type, encoding], Att),
-    BufferSize = list_to_integer(
-        config:get("couchdb", "attachment_stream_buffer_size", "4096")),
-    Options = case (Enc =:= identity) andalso compressible_att_type(Type) of
-        true ->
-            CompLevel = list_to_integer(
-                config:get("attachments", "compression_level", "0")
-            ),
-            [
-                {buffer_size, BufferSize},
-                {encoding, gzip},
-                {compression_level, CompLevel}
-            ];
-        _ ->
-            [{buffer_size, BufferSize}]
-    end,
-    {ok, OutputStream} = open_write_stream(Db, Options),
-    ReqMd5 = case Fun(OutputStream) of
-        {md5, FooterMd5} ->
-            case InMd5 of
-                md5_in_footer -> FooterMd5;
-                _ -> InMd5
-            end;
-        _ ->
-            InMd5
-    end,
-    {StreamEngine, Len, IdentityLen, Md5, IdentityMd5} =
-        couch_stream:close(OutputStream),
-    couch_util:check_md5(IdentityMd5, ReqMd5),
-    {AttLen, DiskLen, NewEnc} = case Enc of
-    identity ->
-        case {Md5, IdentityMd5} of
-        {Same, Same} ->
-            {Len, IdentityLen, identity};
-        _ ->
-            {Len, IdentityLen, gzip}
-        end;
-    gzip ->
-        case couch_att:fetch([att_len, disk_len], Att) of
-            [AL, DL] when AL =:= undefined orelse DL =:= undefined ->
-                % Compressed attachment uploaded through the standalone API.
-                {Len, Len, gzip};
-            [AL, DL] ->
-                % This case is used for efficient push-replication, where a
-                % compressed attachment is located in the body of multipart
-                % content-type request.
-                {AL, DL, gzip}
-        end
-    end,
-    couch_att:store([
-        {data, {stream, StreamEngine}},
-        {att_len, AttLen},
-        {disk_len, DiskLen},
-        {md5, Md5},
-        {encoding, NewEnc}
-    ], Att).
-
-
-open_write_stream(Db, Options) ->
-    couch_db_engine:open_write_stream(Db, Options).
-
-
-open_read_stream(Db, AttState) ->
-    couch_db_engine:open_read_stream(Db, AttState).
-
-
-is_active_stream(Db, StreamEngine) ->
-    couch_db_engine:is_active_stream(Db, StreamEngine).
-
-
-calculate_start_seq(_Db, _Node, Seq) when is_integer(Seq) ->
-    Seq;
-calculate_start_seq(Db, Node, {Seq, Uuid}) ->
-    % Treat the current node as the epoch node
-    calculate_start_seq(Db, Node, {Seq, Uuid, Node});
-calculate_start_seq(Db, _Node, {Seq, {split, Uuid}, EpochNode}) ->
-    case is_owner(EpochNode, Seq, get_epochs(Db)) of
-        true ->
-            % Find last replicated sequence from split source to target
-            mem3_rep:find_split_target_seq(Db, EpochNode, Uuid, Seq);
-        false ->
-            couch_log:warning("~p calculate_start_seq not owner "
-                "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p, epochs: ~p",
-                [?MODULE, Db#db.name, Seq, Uuid, EpochNode, get_epochs(Db)]),
-            0
-    end;
-calculate_start_seq(Db, _Node, {Seq, Uuid, EpochNode}) ->
-    case is_prefix(Uuid, get_uuid(Db)) of
-        true ->
-            case is_owner(EpochNode, Seq, get_epochs(Db)) of
-                true -> Seq;
-                false ->
-                    couch_log:warning("~p calculate_start_seq not owner "
-                        "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p, epochs: ~p",
-                        [?MODULE, Db#db.name, Seq, Uuid, EpochNode,
-                            get_epochs(Db)]),
-                    0
-            end;
-        false ->
-            couch_log:warning("~p calculate_start_seq uuid prefix mismatch "
-                "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p",
-                [?MODULE, Db#db.name, Seq, Uuid, EpochNode]),
-            %% The file was rebuilt, most likely in a different
-            %% order, so rewind.
-            0
-    end;
-calculate_start_seq(Db, _Node, {replace, OriginalNode, Uuid, Seq}) ->
-    case is_prefix(Uuid, couch_db:get_uuid(Db)) of
-        true ->
-            try
-                start_seq(get_epochs(Db), OriginalNode, Seq)
-            catch throw:epoch_mismatch ->
-                couch_log:warning("~p start_seq duplicate uuid on node: ~p "
-                    "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p",
-                    [?MODULE, node(), Db#db.name, Seq, Uuid, OriginalNode]),
-                0
-            end;
-        false ->
-            {replace, OriginalNode, Uuid, Seq}
-    end.
-
-
-validate_epochs(Epochs) ->
-    %% Assert uniqueness.
-    case length(Epochs) == length(lists:ukeysort(2, Epochs)) of
-        true  -> ok;
-        false -> erlang:error(duplicate_epoch)
-    end,
-    %% Assert order.
-    case Epochs == lists:sort(fun({_, A}, {_, B}) -> B =< A end, Epochs) of
-        true  -> ok;
-        false -> erlang:error(epoch_order)
-    end.
-
-
-is_prefix(Pattern, Subject) ->
-     binary:longest_common_prefix([Pattern, Subject]) == size(Pattern).
-
-
-is_owner(Node, Seq, Epochs) ->
-    Node =:= owner_of(Epochs, Seq).
-
-
-owner_of(Db, Seq) when not is_list(Db) ->
-    owner_of(get_epochs(Db), Seq);
-owner_of([], _Seq) ->
-    undefined;
-owner_of([{EpochNode, EpochSeq} | _Rest], Seq) when Seq > EpochSeq ->
-    EpochNode;
-owner_of([_ | Rest], Seq) ->
-    owner_of(Rest, Seq).
-
-
-start_seq([{OrigNode, EpochSeq} | _], OrigNode, Seq) when Seq > EpochSeq ->
-    %% OrigNode is the owner of the Seq so we can safely stream from there
-    Seq;
-start_seq([{_, NewSeq}, {OrigNode, _} | _], OrigNode, Seq) when Seq > NewSeq ->
-    %% We transferred this file before Seq was written on OrigNode, so we need
-    %% to stream from the beginning of the next epoch. Note that it is _not_
-    %% necessary for the current node to own the epoch beginning at NewSeq
-    NewSeq;
-start_seq([_ | Rest], OrigNode, Seq) ->
-    start_seq(Rest, OrigNode, Seq);
-start_seq([], _OrigNode, _Seq) ->
-    throw(epoch_mismatch).
-
-
-fold_docs(Db, UserFun, UserAcc) ->
-    fold_docs(Db, UserFun, UserAcc, []).
-
-fold_docs(Db, UserFun, UserAcc, Options) ->
-    couch_db_engine:fold_docs(Db, UserFun, UserAcc, Options).
-
-
-fold_local_docs(Db, UserFun, UserAcc, Options) ->
-    couch_db_engine:fold_local_docs(Db, UserFun, UserAcc, Options).
-
-
-fold_design_docs(Db, UserFun, UserAcc, Options1) ->
-    Options2 = set_design_doc_keys(Options1),
-    couch_db_engine:fold_docs(Db, UserFun, UserAcc, Options2).
-
-
-fold_changes(Db, StartSeq, UserFun, UserAcc) ->
-    fold_changes(Db, StartSeq, UserFun, UserAcc, []).
-
-
-fold_changes(Db, StartSeq, UserFun, UserAcc, Opts) ->
-    couch_db_engine:fold_changes(Db, StartSeq, UserFun, UserAcc, Opts).
-
-
-fold_purge_infos(Db, StartPurgeSeq, Fun, Acc) ->
-    fold_purge_infos(Db, StartPurgeSeq, Fun, Acc, []).
-
-
-fold_purge_infos(Db, StartPurgeSeq, UFun, UAcc, Opts) ->
-    couch_db_engine:fold_purge_infos(Db, StartPurgeSeq, UFun, UAcc, Opts).
-
-
-count_changes_since(Db, SinceSeq) ->
-    couch_db_engine:count_changes_since(Db, SinceSeq).
-
-
-%%% Internal function %%%
-open_doc_revs_int(Db, IdRevs, Options) ->
-    Ids = [Id || {Id, _Revs} <- IdRevs],
-    LookupResults = get_full_doc_infos(Db, Ids),
-    lists:zipwith(
-        fun({Id, Revs}, Lookup) ->
-            case Lookup of
-            #full_doc_info{rev_tree=RevTree} ->
-                {FoundRevs, MissingRevs} =
-                case Revs of
-                all ->
-                    {couch_key_tree:get_all_leafs(RevTree), []};
-                _ ->
-                    case lists:member(latest, Options) of
-                    true ->
-                        couch_key_tree:get_key_leafs(RevTree, Revs);
-                    false ->
-                        couch_key_tree:get(RevTree, Revs)
-                    end
-                end,
-                FoundResults =
-                lists:map(fun({Value, {Pos, [Rev|_]}=FoundRevPath}) ->
-                    case Value of
-                    ?REV_MISSING ->
-                        % we have the rev in our list but know nothing about it
-                        {{not_found, missing}, {Pos, Rev}};
-                    #leaf{deleted=IsDeleted, ptr=SummaryPtr} ->
-                        {ok, make_doc(Db, Id, IsDeleted, SummaryPtr, FoundRevPath)}
-                    end
-                end, FoundRevs),
-                Results = FoundResults ++ [{{not_found, missing}, MissingRev} || MissingRev <- MissingRevs],
-                {ok, Results};
-            not_found when Revs == all ->
-                {ok, []};
-            not_found ->
-                {ok, [{{not_found, missing}, Rev} || Rev <- Revs]}
-            end
-        end,
-        IdRevs, LookupResults).
-
-open_doc_int(Db, <<?LOCAL_DOC_PREFIX, _/binary>> = Id, Options) ->
-    case couch_db_engine:open_local_docs(Db, [Id]) of
-    [#doc{} = Doc] ->
-        apply_open_options({ok, Doc}, Options);
-    [not_found] ->
-        {not_found, missing}
-    end;
-open_doc_int(Db, #doc_info{id=Id,revs=[RevInfo|_]}=DocInfo, Options) ->
-    #rev_info{deleted=IsDeleted,rev={Pos,RevId},body_sp=Bp} = RevInfo,
-    Doc = make_doc(Db, Id, IsDeleted, Bp, {Pos,[RevId]}),
-    apply_open_options(
-       {ok, Doc#doc{meta=doc_meta_info(DocInfo, [], Options)}}, Options);
-open_doc_int(Db, #full_doc_info{id=Id,rev_tree=RevTree}=FullDocInfo, Options) ->
-    #doc_info{revs=[#rev_info{deleted=IsDeleted,rev=Rev,body_sp=Bp}|_]} =
-        DocInfo = couch_doc:to_doc_info(FullDocInfo),
-    {[{_, RevPath}], []} = couch_key_tree:get(RevTree, [Rev]),
-    Doc = make_doc(Db, Id, IsDeleted, Bp, RevPath),
-    apply_open_options(
-        {ok, Doc#doc{meta=doc_meta_info(DocInfo, RevTree, Options)}}, Options);
-open_doc_int(Db, Id, Options) ->
-    case get_full_doc_info(Db, Id) of
-    #full_doc_info{} = FullDocInfo ->
-        open_doc_int(Db, FullDocInfo, Options);
-    not_found ->
-        {not_found, missing}
-    end.
-
-doc_meta_info(#doc_info{high_seq=Seq,revs=[#rev_info{rev=Rev}|RestInfo]}, RevTree, Options) ->
-    case lists:member(revs_info, Options) of
-    false -> [];
-    true ->
-        {[{Pos, RevPath}],[]} =
-            couch_key_tree:get_full_key_paths(RevTree, [Rev]),
-
-        [{revs_info, Pos, lists:map(
-            fun({Rev1, ?REV_MISSING}) ->
-                {Rev1, missing};
-            ({Rev1, Leaf}) ->
-                case Leaf#leaf.deleted of
-                true ->
-                    {Rev1, deleted};
-                false ->
-                    {Rev1, available}
-                end
-            end, RevPath)}]
-    end ++
-    case lists:member(conflicts, Options) of
-    false -> [];
-    true ->
-        case [Rev1 || #rev_info{rev=Rev1,deleted=false} <- RestInfo] of
-        [] -> [];
-        ConflictRevs -> [{conflicts, ConflictRevs}]
-        end
-    end ++
-    case lists:member(deleted_conflicts, Options) of
-    false -> [];
-    true ->
-        case [Rev1 || #rev_info{rev=Rev1,deleted=true} <- RestInfo] of
-        [] -> [];
-        DelConflictRevs -> [{deleted_conflicts, DelConflictRevs}]
-        end
-    end ++
-    case lists:member(local_seq, Options) of
-    false -> [];
-    true -> [{local_seq, Seq}]
-    end.
-
-
-make_doc(_Db, Id, Deleted, nil = _Bp, RevisionPath) ->
-    #doc{
-        id = Id,
-        revs = RevisionPath,
-        body = [],
-        atts = [],
-        deleted = Deleted
-    };
-make_doc(#db{} = Db, Id, Deleted, Bp, {Pos, Revs}) ->
-    RevsLimit = get_revs_limit(Db),
-    Doc0 = couch_db_engine:read_doc_body(Db, #doc{
-        id = Id,
-        revs = {Pos, lists:sublist(Revs, 1, RevsLimit)},
-        body = Bp,
-        deleted = Deleted
-    }),
-    Doc1 = case Doc0#doc.atts of
-        BinAtts when is_binary(BinAtts) ->
-            Doc0#doc{
-                atts = couch_compress:decompress(BinAtts)
-            };
-        ListAtts when is_list(ListAtts) ->
-            Doc0
-    end,
-    after_doc_read(Db, Doc1#doc{
-        atts = [couch_att:from_disk_term(Db, T) || T <- Doc1#doc.atts]
-    }).
-
-
-after_doc_read(#db{} = Db, Doc) ->
-    DocWithBody = couch_doc:with_ejson_body(Doc),
-    couch_db_plugin:after_doc_read(Db, DocWithBody).
-
-increment_stat(#db{options = Options}, Stat) ->
-    case lists:member(sys_db, Options) of
-    true ->
-        ok;
-    false ->
-        couch_stats:increment_counter(Stat)
-    end.
-
--spec normalize_dbname(list() | binary()) -> binary().
-
-normalize_dbname(DbName) when is_list(DbName) ->
-    normalize_dbname(list_to_binary(DbName));
-normalize_dbname(DbName) when is_binary(DbName) ->
-    mem3:dbname(couch_util:drop_dot_couch_ext(DbName)).
-
-
--spec dbname_suffix(list() | binary()) -> binary().
-
-dbname_suffix(DbName) ->
-    filename:basename(normalize_dbname(DbName)).
-
-
-validate_dbname(DbName) when is_list(DbName) ->
-    validate_dbname(?l2b(DbName));
-validate_dbname(DbName) when is_binary(DbName) ->
-    Normalized = normalize_dbname(DbName),
-    couch_db_plugin:validate_dbname(
-        DbName, Normalized, fun validate_dbname_int/2).
-
-validate_dbname_int(DbName, Normalized) when is_binary(DbName) ->
-    DbNoExt = couch_util:drop_dot_couch_ext(DbName),
-    case re:run(DbNoExt, ?DBNAME_REGEX, [{capture,none}, dollar_endonly]) of
-        match ->
-            ok;
-        nomatch ->
-            case is_system_db_name(Normalized) of
-                true -> ok;
-                false -> {error, {illegal_database_name, DbName}}
-            end
-    end.
-
-is_system_db_name(DbName) when is_list(DbName) ->
-    is_system_db_name(?l2b(DbName));
-is_system_db_name(DbName) when is_binary(DbName) ->
-    Normalized = normalize_dbname(DbName),
-    Suffix = filename:basename(Normalized),
-    case {filename:dirname(Normalized), lists:member(Suffix, ?SYSTEM_DATABASES)} of
-        {<<".">>, Result} -> Result;
-        {_Prefix, false} -> false;
-        {Prefix, true} ->
-            ReOpts =  [{capture,none}, dollar_endonly],
-            re:run(Prefix, ?DBNAME_REGEX, ReOpts) == match
-    end.
-
-set_design_doc_keys(Options1) ->
-    Dir = case lists:keyfind(dir, 1, Options1) of
-        {dir, D0} -> D0;
-        _ -> fwd
-    end,
-    Options2 = set_design_doc_start_key(Options1, Dir),
-    set_design_doc_end_key(Options2, Dir).
-
-
--define(FIRST_DDOC_KEY, <<"_design/">>).
--define(LAST_DDOC_KEY, <<"_design0">>).
-
-
-set_design_doc_start_key(Options, fwd) ->
-    Key1 = couch_util:get_value(start_key, Options, ?FIRST_DDOC_KEY),
-    Key2 = case Key1 < ?FIRST_DDOC_KEY of
-        true -> ?FIRST_DDOC_KEY;
-        false -> Key1
-    end,
-    lists:keystore(start_key, 1, Options, {start_key, Key2});
-set_design_doc_start_key(Options, rev) ->
-    Key1 = couch_util:get_value(start_key, Options, ?LAST_DDOC_KEY),
-    Key2 = case Key1 > ?LAST_DDOC_KEY of
-        true -> ?LAST_DDOC_KEY;
-        false -> Key1
-    end,
-    lists:keystore(start_key, 1, Options, {start_key, Key2}).
-
-
-set_design_doc_end_key(Options, fwd) ->
-    case couch_util:get_value(end_key_gt, Options) of
-        undefined ->
-            Key1 = couch_util:get_value(end_key, Options, ?LAST_DDOC_KEY),
-            Key2 = case Key1 > ?LAST_DDOC_KEY of
-                true -> ?LAST_DDOC_KEY;
-                false -> Key1
-            end,
-            lists:keystore(end_key, 1, Options, {end_key, Key2});
-        EKeyGT ->
-            Key2 = case EKeyGT > ?LAST_DDOC_KEY of
-                true -> ?LAST_DDOC_KEY;
-                false -> EKeyGT
-            end,
-            lists:keystore(end_key_gt, 1, Options, {end_key_gt, Key2})
-    end;
-set_design_doc_end_key(Options, rev) ->
-    case couch_util:get_value(end_key_gt, Options) of
-        undefined ->
-            Key1 = couch_util:get_value(end_key, Options, ?LAST_DDOC_KEY),
-            Key2 = case Key1 < ?FIRST_DDOC_KEY of
-                true -> ?FIRST_DDOC_KEY;
-                false -> Key1
-            end,
-            lists:keystore(end_key, 1, Options, {end_key, Key2});
-        EKeyGT ->
-            Key2 = case EKeyGT < ?FIRST_DDOC_KEY of
-                true -> ?FIRST_DDOC_KEY;
-                false -> EKeyGT
-            end,
-            lists:keystore(end_key_gt, 1, Options, {end_key_gt, Key2})
-    end.
-
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-setup_all() ->
-    ok = meck:new(couch_epi, [passthrough]),
-    ok = meck:expect(couch_epi, decide, fun(_, _, _, _, _) -> no_decision end),
-    ok.
-
-teardown_all(_) ->
-    meck:unload().
-
-setup() ->
-    meck:reset([couch_epi]).
-
-teardown(_) ->
-    ok.
-
-validate_dbname_success_test_() ->
-    Cases =
-        generate_cases_with_shards("long/co$mplex-/path+/something")
-        ++ generate_cases_with_shards("something")
-        ++ lists:append(
-            [generate_cases_with_shards(?b2l(SystemDb))
-                || SystemDb <- ?SYSTEM_DATABASES]),
-    {
-        setup,
-        fun setup_all/0,
-        fun teardown_all/1,
-        {
-            foreach,
-            fun setup/0,
-            fun teardown/1,
-            [should_pass_validate_dbname(A) || {_, A} <- Cases]
-        }
-    }.
-
-validate_dbname_fail_test_() ->
-    Cases = generate_cases("_long/co$mplex-/path+/_something")
-       ++ generate_cases("_something")
-       ++ generate_cases_with_shards("long/co$mplex-/path+/_something#")
-       ++ generate_cases_with_shards("long/co$mplex-/path+/some.thing")
-       ++ generate_cases("!abcdefg/werwej/_users")
-       ++ generate_cases_with_shards("!abcdefg/werwej/_users"),
-    {
-        setup,
-        fun setup_all/0,
-        fun teardown_all/1,
-        {
-            foreach,
-            fun setup/0,
-            fun teardown/1,
-            [should_fail_validate_dbname(A) || {_, A} <- Cases]
-        }
-    }.
-
-normalize_dbname_test_() ->
-    Cases = generate_cases_with_shards("long/co$mplex-/path+/_something")
-       ++ generate_cases_with_shards("_something"),
-    WithExpected = [{?l2b(filename:rootname(A)), B} || {A, B} <- Cases],
-    [{test_name({Expected, Db}), ?_assertEqual(Expected, normalize_dbname(Db))}
-        || {Expected, Db} <- WithExpected].
-
-dbname_suffix_test_() ->
-    Cases = generate_cases_with_shards("long/co$mplex-/path+/_something")
-       ++ generate_cases_with_shards("_something"),
-    WithExpected = [{?l2b(filename:basename(Arg)), Db} || {Arg, Db} <- Cases],
-    [{test_name({Expected, Db}), ?_assertEqual(Expected, dbname_suffix(Db))}
-        || {Expected, Db} <- WithExpected].
-
-is_system_db_name_test_() ->
-    Cases = lists:append([
-        generate_cases_with_shards("long/co$mplex-/path+/" ++ ?b2l(Db))
-            || Db <- ?SYSTEM_DATABASES]
-        ++ [generate_cases_with_shards(?b2l(Db)) || Db <- ?SYSTEM_DATABASES
-    ]),
-    WithExpected = [{?l2b(filename:basename(filename:rootname(Arg))), Db}
-        || {Arg, Db} <- Cases],
-    [{test_name({Expected, Db}) ++ " in ?SYSTEM_DATABASES",
-        ?_assert(is_system_db_name(Db))} || {Expected, Db} <- WithExpected].
-
-should_pass_validate_dbname(DbName) ->
-    {test_name(DbName), ?_assertEqual(ok, validate_dbname(DbName))}.
-
-should_fail_validate_dbname(DbName) ->
-    {test_name(DbName), ?_test(begin
-        Result = validate_dbname(DbName),
-        ?assertMatch({error, {illegal_database_name, _}}, Result),
-        {error, {illegal_database_name, FailedDbName}} = Result,
-        ?assertEqual(to_binary(DbName), FailedDbName),
-        ok
-    end)}.
-
-calculate_start_seq_test_() ->
-    {
-        setup,
-        fun setup_start_seq_all/0,
-        fun teardown_start_seq_all/1,
-        {
-            foreach,
-            fun setup_start_seq/0,
-            fun teardown_start_seq/1,
-            [
-                t_calculate_start_seq_uuid_mismatch(),
-                t_calculate_start_seq_is_owner(),
-                t_calculate_start_seq_not_owner(),
-                t_calculate_start_seq_raw(),
-                t_calculate_start_seq_epoch_mismatch()
-            ]
-        }
-    }.
-
-setup_start_seq_all() ->
-    meck:new(couch_db_engine, [passthrough]),
-    meck:expect(couch_db_engine, get_uuid, fun(_) -> <<"foo">> end),
-    ok = meck:expect(couch_log, warning, 2, ok),
-    Epochs = [
-        {node2, 10},
-        {node1, 1}
-    ],
-    meck:expect(couch_db_engine, get_epochs, fun(_) -> Epochs end).
-
-teardown_start_seq_all(_) ->
-    meck:unload().
-
-setup_start_seq() ->
-    meck:reset([
-        couch_db_engine,
-        couch_log
-    ]).
-
-teardown_start_seq(_) ->
-    ok.
-
-t_calculate_start_seq_uuid_mismatch() ->
-    ?_test(begin
-        Db = test_util:fake_db([]),
-        Seq = calculate_start_seq(Db, node2, {15, <<"baz">>}),
-        ?assertEqual(0, Seq)
-    end).
-
-t_calculate_start_seq_is_owner() ->
-    ?_test(begin
-        Db = test_util:fake_db([]),
-        Seq = calculate_start_seq(Db, node2, {15, <<"foo">>}),
-        ?assertEqual(15, Seq)
-    end).
-
-t_calculate_start_seq_not_owner() ->
-    ?_test(begin
-        Db = test_util:fake_db([]),
-        Seq = calculate_start_seq(Db, node1, {15, <<"foo">>}),
-        ?assertEqual(0, Seq)
-    end).
-
-t_calculate_start_seq_raw() ->
-    ?_test(begin
-        Db = test_util:fake_db([]),
-        Seq = calculate_start_seq(Db, node1, 13),
-        ?assertEqual(13, Seq)
-    end).
-
-t_calculate_start_seq_epoch_mismatch() ->
-    ?_test(begin
-        Db = test_util:fake_db([]),
-        SeqIn = {replace, not_this_node, get_uuid(Db), 42},
-        Seq = calculate_start_seq(Db, node1, SeqIn),
-        ?assertEqual(0, Seq)
-    end).
-
-is_owner_test() ->
-    ?assertNot(is_owner(foo, 1, [])),
-    ?assertNot(is_owner(foo, 1, [{foo, 1}])),
-    ?assert(is_owner(foo, 2, [{foo, 1}])),
-    ?assert(is_owner(foo, 50, [{bar, 100}, {foo, 1}])),
-    ?assert(is_owner(foo, 50, [{baz, 200}, {bar, 100}, {foo, 1}])),
-    ?assert(is_owner(bar, 150, [{baz, 200}, {bar, 100}, {foo, 1}])),
-    ?assertError(duplicate_epoch, validate_epochs([{foo, 1}, {bar, 1}])),
-    ?assertError(epoch_order, validate_epochs([{foo, 100}, {bar, 200}])).
-
-to_binary(DbName) when is_list(DbName) ->
-    ?l2b(DbName);
-to_binary(DbName) when is_binary(DbName) ->
-    DbName.
-
-test_name({Expected, DbName}) ->
-    lists:flatten(io_lib:format("~p -> ~p", [DbName, Expected]));
-test_name(DbName) ->
-    lists:flatten(io_lib:format("~p", [DbName])).
-
-generate_cases_with_shards(DbName) ->
-    DbNameWithShard = add_shard(DbName),
-    DbNameWithShardAndExtension = add_shard(DbName) ++ ".couch",
-    Cases = [
-        DbName, ?l2b(DbName),
-        DbNameWithShard, ?l2b(DbNameWithShard),
-        DbNameWithShardAndExtension, ?l2b(DbNameWithShardAndExtension)
-    ],
-    [{DbName, Case} || Case <- Cases].
-
-add_shard(DbName) ->
-    "shards/00000000-3fffffff/" ++ DbName ++ ".1415960794".
-
-generate_cases(DbName) ->
-    [{DbName, DbName}, {DbName, ?l2b(DbName)}].
-
--endif.
diff --git a/src/couch/src/couch_db_engine.erl b/src/couch/src/couch_db_engine.erl
deleted file mode 100644
index 9adc992..0000000
--- a/src/couch/src/couch_db_engine.erl
+++ /dev/null
@@ -1,1105 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_engine).
-
-
--include("couch_db.hrl").
--include("couch_db_int.hrl").
-
-
--type filepath() :: iolist().
--type docid() :: binary().
--type rev() :: {non_neg_integer(), binary()}.
--type revs() :: [rev()].
--type json() :: any().
--type uuid() :: binary().
--type purge_seq() :: non_neg_integer().
-
--type doc_pair() :: {
-        #full_doc_info{} | not_found,
-        #full_doc_info{} | not_found
-    }.
-
--type doc_pairs() :: [doc_pair()].
-
--type db_open_options() :: [
-        create
-    ].
-
--type delete_options() :: [
-        {context, delete | compaction} |
-        sync
-    ].
-
--type purge_info() :: {purge_seq(), uuid(), docid(), revs()}.
--type epochs() :: [{Node::atom(), UpdateSeq::non_neg_integer()}].
--type size_info() :: [{Name::atom(), Size::non_neg_integer()}].
--type partition_info() :: [
-    {partition, Partition::binary()} |
-    {doc_count, DocCount::non_neg_integer()} |
-    {doc_del_count, DocDelCount::non_neg_integer()} |
-    {sizes, size_info()}
-].
-
--type write_stream_options() :: [
-        {buffer_size, Size::pos_integer()} |
-        {encoding, atom()} |
-        {compression_level, non_neg_integer()}
-    ].
-
--type doc_fold_options() :: [
-        {start_key, Key::any()} |
-        {end_key, Key::any()} |
-        {end_key_gt, Key::any()} |
-        {dir, fwd | rev} |
-        include_reductions |
-        include_deleted
-    ].
-
--type changes_fold_options() :: [
-        {dir, fwd | rev}
-    ].
-
--type purge_fold_options() :: [
-        {start_key, Key::any()} |
-        {end_key, Key::any()} |
-        {end_key_gt, Key::any()} |
-        {dir, fwd | rev}
-    ].
-
--type db_handle() :: any().
-
--type doc_fold_fun() :: fun((#full_doc_info{}, UserAcc::any()) ->
-        {ok, NewUserAcc::any()} |
-        {stop, NewUserAcc::any()}).
-
--type local_doc_fold_fun() :: fun((#doc{}, UserAcc::any()) ->
-        {ok, NewUserAcc::any()} |
-        {stop, NewUserAcc::any()}).
-
--type changes_fold_fun() :: fun((#doc_info{}, UserAcc::any()) ->
-        {ok, NewUserAcc::any()} |
-        {stop, NewUserAcc::any()}).
-
--type purge_fold_fun() :: fun((purge_info(), UserAcc::any()) ->
-        {ok, NewUserAcc::any()} |
-        {stop, NewUserAcc::any()}).
-
-
-% This is called by couch_server to determine which
-% engine should be used for the given database. DbPath
-% is calculated based on the DbName and the configured
-% extension for a given engine. The first engine to
-% return true is the engine that will be used for the
-% database.
--callback exists(DbPath::filepath()) -> boolean().
-
-
-% This is called by couch_server to delete a database. It
-% is called from inside the couch_server process which
-% means that the storage engine does not have to guarantee
-% its own consistency checks when executing in this
-% context. Although since this is executed in the context
-% of couch_server it should return relatively quickly.
--callback delete(
-            RootDir::filepath(),
-            DbPath::filepath(),
-            DelOpts::delete_options()) ->
-        ok | {error, Reason::atom()}.
-
-
-% This function can be called from multiple contexts. It
-% will either be called just before a call to delete/3 above
-% or when a compaction is cancelled which executes in the
-% context of a couch_db_updater process. It is intended to
-% remove any temporary files used during compaction that
-% may be used to recover from a failed compaction swap.
--callback delete_compaction_files(
-            RootDir::filepath(),
-            DbPath::filepath(),
-            DelOpts::delete_options()) ->
-        ok.
-
-
-% This is called from the couch_db_updater:init/1 context. As
-% such this means that it is guaranteed to only have one process
-% executing for a given DbPath argument (ie, opening a given
-% database is guaranteed to only happen in a single process).
-% However, multiple process may be trying to open different
-% databases concurrently so if a database requires a shared
-% resource that will require concurrency control at the storage
-% engine layer.
-%
-% The returned DbHandle should be a term that can be freely
-% copied between processes and accessed concurrently. However
-% its guaranteed that the handle will only ever be mutated
-% in a single threaded context (ie, within the couch_db_updater
-% process).
--callback init(DbPath::filepath(), db_open_options()) ->
-    {ok, DbHandle::db_handle()}.
-
-
-% This is called in the context of couch_db_updater:terminate/2
-% and as such has the same properties for init/2. It's guaranteed
-% to be consistent for a given database but may be called by many
-% databases concurrently.
--callback terminate(Reason::any(), DbHandle::db_handle()) -> Ignored::any().
-
-
-% This is called in the context of couch_db_updater:handle_call/3
-% for any message that is unknown. It can be used to handle messages
-% from asynchronous processes like the engine's compactor if it has one.
--callback handle_db_updater_call(Msg::any(), DbHandle::db_handle()) ->
-        {reply, Resp::any(), NewDbHandle::db_handle()} |
-        {stop, Reason::any(), Resp::any(), NewDbHandle::db_handle()}.
-
-
-% This is called in the context of couch_db_updater:handle_info/2
-% and has the same properties as handle_call/3.
--callback handle_db_updater_info(Msg::any(), DbHandle::db_handle()) ->
-    {noreply, NewDbHandle::db_handle()} |
-    {noreply, NewDbHandle::db_handle(), Timeout::timeout()} |
-    {stop, Reason::any(), NewDbHandle::db_handle()}.
-
-
-% These functions are called by any process opening or closing
-% a database. As such they need to be able to handle being
-% called concurrently. For example, the legacy engine uses these
-% to add monitors to the main engine process.
--callback incref(DbHandle::db_handle()) -> {ok, NewDbHandle::db_handle()}.
--callback decref(DbHandle::db_handle()) -> ok.
--callback monitored_by(DbHande::db_handle()) -> [pid()].
-
-
-% This is called in the context of couch_db_updater:handle_info/2
-% and should return the timestamp of the last activity of
-% the database. If a storage has no notion of activity or the
-% value would be hard to report its ok to just return the
-% result of os:timestamp/0 as this will just disable idle
-% databases from automatically closing.
--callback last_activity(DbHandle::db_handle()) -> erlang:timestamp().
-
-
-% All of the get_* functions may be called from many
-% processes concurrently.
-
-% The database should make a note of the update sequence when it
-% was last compacted. If the database doesn't need compacting it
-% can just hard code a return value of 0.
--callback get_compacted_seq(DbHandle::db_handle()) ->
-            CompactedSeq::non_neg_integer().
-
-
-% The number of documents in the database which have all leaf
-% revisions marked as deleted.
--callback get_del_doc_count(DbHandle::db_handle()) ->
-            DelDocCount::non_neg_integer().
-
-
-% This number is reported in the database info properties and
-% as such can be any JSON value.
--callback get_disk_version(DbHandle::db_handle()) -> Version::json().
-
-
-% The number of documents in the database that have one or more
-% leaf revisions not marked as deleted.
--callback get_doc_count(DbHandle::db_handle()) -> DocCount::non_neg_integer().
-
-
-% The epochs track which node owned the database starting at
-% a given update sequence. Each time a database is opened it
-% should look at the epochs. If the most recent entry is not
-% for the current node it should add an entry that will be
-% written the next time a write is performed. An entry is
-% simply a {node(), CurrentUpdateSeq} tuple.
--callback get_epochs(DbHandle::db_handle()) -> Epochs::epochs().
-
-
-% Get the current purge sequence known to the engine. This
-% value should be updated during calls to purge_docs.
--callback get_purge_seq(DbHandle::db_handle()) -> purge_seq().
-
-
-% Get the oldest purge sequence known to the engine
--callback get_oldest_purge_seq(DbHandle::db_handle()) -> purge_seq().
-
-
-% Get the purged infos limit. This should just return the last
-% value that was passed to set_purged_docs_limit/2.
--callback get_purge_infos_limit(DbHandle::db_handle()) -> pos_integer().
-
-
-% Get the revision limit. This should just return the last
-% value that was passed to set_revs_limit/2.
--callback get_revs_limit(DbHandle::db_handle()) -> RevsLimit::pos_integer().
-
-
-% Get the current security properties. This should just return
-% the last value that was passed to set_security/2.
--callback get_security(DbHandle::db_handle()) -> SecProps::any().
-
-
-% Get the current properties.
--callback get_props(DbHandle::db_handle()) -> Props::[any()].
-
-
-% This information is displayed in the database info poperties. It
-% should just be a list of {Name::atom(), Size::non_neg_integer()}
-% tuples that will then be combined across shards. Currently,
-% various modules expect there to at least be values for:
-%
-%   file     - Number of bytes on disk
-%
-%   active   - Theoretical minimum number of bytes to store this db on disk
-%              which is used to guide decisions on compaction
-%
-%   external - Number of bytes that would be required to represent the
-%              contents outside of the database (for capacity and backup
-%              planning)
--callback get_size_info(DbHandle::db_handle()) -> SizeInfo::size_info().
-
-
-% This returns the information for the given partition.
-% It should just be a list of {Name::atom(), Size::non_neg_integer()}
-% It returns the partition name, doc count, deleted doc count and two sizes:
-%
-%   active   - Theoretical minimum number of bytes to store this partition on disk
-%
-%   external - Number of bytes that would be required to represent the
-%              contents of this partition outside of the database
--callback get_partition_info(DbHandle::db_handle(), Partition::binary()) ->
-    partition_info().
-
-
-% The current update sequence of the database. The update
-% sequence should be incrememnted for every revision added to
-% the database.
--callback get_update_seq(DbHandle::db_handle()) -> UpdateSeq::non_neg_integer().
-
-
-% Whenever a database is created it should generate a
-% persistent UUID for identification in case the shard should
-% ever need to be moved between nodes in a cluster.
--callback get_uuid(DbHandle::db_handle()) -> UUID::binary().
-
-
-% These functions are only called by couch_db_updater and
-% as such are guaranteed to be single threaded calls. The
-% database should simply store these values somewhere so
-% they can be returned by the corresponding get_* calls.
-
--callback set_revs_limit(DbHandle::db_handle(), RevsLimit::pos_integer()) ->
-        {ok, NewDbHandle::db_handle()}.
-
-
--callback set_purge_infos_limit(DbHandle::db_handle(), Limit::pos_integer()) ->
-        {ok, NewDbHandle::db_handle()}.
-
-
--callback set_security(DbHandle::db_handle(), SecProps::any()) ->
-        {ok, NewDbHandle::db_handle()}.
-
-
-% This function is only called by couch_db_updater and
-% as such is guaranteed to be single threaded calls. The
-% database should simply store provided property list
-% unaltered.
-
--callback set_props(DbHandle::db_handle(), Props::any()) ->
-        {ok, NewDbHandle::db_handle()}.
-
-
-% Set the current update sequence of the database. The intention is to use this
-% when copying a database such that the destination update sequence should
-% match exactly the source update sequence.
--callback set_update_seq(
-    DbHandle::db_handle(),
-    UpdateSeq::non_neg_integer()) ->
-    {ok, NewDbHandle::db_handle()}.
-
-
-% This function will be called by many processes concurrently.
-% It should return a #full_doc_info{} record or not_found for
-% every provided DocId in the order those DocId's appear in
-% the input.
-%
-% Traditionally this function will only return documents that
-% were present in the database when the DbHandle was retrieved
-% from couch_server. It is currently unknown what would break
-% if a storage engine deviated from that property.
--callback open_docs(DbHandle::db_handle(), DocIds::[docid()]) ->
-        [#full_doc_info{} | not_found].
-
-
-% This function will be called by many processes concurrently.
-% It should return a #doc{} record or not_found for every
-% provided DocId in the order they appear in the input.
-%
-% The same caveats around database snapshots from open_docs
-% apply to this function (although this function is called
-% rather less frequently so it may not be as big of an
-% issue).
--callback open_local_docs(DbHandle::db_handle(), DocIds::[docid()]) ->
-        [#doc{} | not_found].
-
-
-% This function will be called from many contexts concurrently.
-% The provided RawDoc is a #doc{} record that has its body
-% value set to the body value returned from write_doc_body/2.
-%
-% This API exists so that storage engines can store document
-% bodies externally from the #full_doc_info{} record (which
-% is the traditional approach and is recommended).
--callback read_doc_body(DbHandle::db_handle(), RawDoc::doc()) ->
-        doc().
-
-
-% This function will be called from many contexts concurrently.
-% If the storage engine has a purge_info() record for any of the
-% provided UUIDs, those purge_info() records should be returned. The
-% resulting list should have the same length as the input list of
-% UUIDs.
--callback load_purge_infos(DbHandle::db_handle(), [uuid()]) ->
-        [purge_info() | not_found].
-
-
-% This function is called concurrently by any client process
-% that is writing a document. It should accept a #doc{}
-% record and return a #doc{} record with a mutated body it
-% wishes to have written to disk by write_doc_body/2.
-%
-% This API exists so that storage engines can compress
-% document bodies in parallel by client processes rather
-% than forcing all compression to occur single threaded
-% in the context of the couch_db_updater process.
--callback serialize_doc(DbHandle::db_handle(), Doc::doc()) ->
-        doc().
-
-
-% This function is called in the context of a couch_db_updater
-% which means its single threaded for the given DbHandle.
-%
-% The returned #doc{} record should have its Body set to a value
-% that will be stored in the #full_doc_info{} record's revision
-% tree leaves which is passed to read_doc_body/2 above when
-% a client wishes to read a document.
-%
-% The BytesWritten return value is used to determine the number
-% of active bytes in the database which can is used to make
-% a determination of when to compact this database.
--callback write_doc_body(DbHandle::db_handle(), Doc::doc()) ->
-        {ok, FlushedDoc::doc(), BytesWritten::non_neg_integer()}.
-
-
-% This function is called from the context of couch_db_updater
-% and as such is guaranteed single threaded for the given
-% DbHandle.
-%
-% This is probably the most complicated function in the entire
-% API due to a few subtle behavior requirements required by
-% CouchDB's storage model.
-%
-% The Pairs argument is a list of pairs (2-tuples) of
-% #full_doc_info{} records. The first element of the pair is
-% the #full_doc_info{} that exists on disk. The second element
-% is the new version that should be written to disk. There are
-% two basic cases that should be followed:
-%
-%     1. {not_found, #full_doc_info{}} - A new document was created
-%     2. {#full_doc_info{}, #full_doc_info{}} - A document was updated
-%
-% The cases are fairly straight forward as long as proper
-% accounting for moving entries in the update sequence are accounted
-% for.
-%
-% The LocalDocs variable is applied separately. Its important to
-% note for new storage engine authors that these documents are
-% separate because they should *not* be included as part of the
-% changes index for the database.
-%
-% Traditionally an invocation of write_doc_infos should be all
-% or nothing in so much that if an error occurs (or the VM dies)
-% then the database doesn't retain any of the changes. However
-% as long as a storage engine maintains consistency this should
-% not be an issue as it has never been a guarantee and the
-% batches are non-deterministic (from the point of view of the
-% client).
--callback write_doc_infos(
-    DbHandle::db_handle(),
-    Pairs::doc_pairs(),
-    LocalDocs::[#doc{}]) ->
-        {ok, NewDbHandle::db_handle()}.
-
-
-% This function is called from the context of couch_db_updater
-% and as such is guaranteed single threaded for the given
-% DbHandle.
-%
-% Each doc_pair() is a 2-tuple of #full_doc_info{} records. The
-% first element of the pair is the #full_doc_info{} that exists
-% on disk. The second element is the new version that should be
-% written to disk. There are three basic cases that should be considered:
-%
-%     1. {#full_doc_info{}, #full_doc_info{}} - A document was partially purged
-%     2. {#full_doc_info{}, not_found} - A document was completely purged
-%     3. {not_found, not_found} - A no-op purge
-%
-% In case 1, non-tail-append engines may have to remove revisions
-% specifically rather than rely on compaction to remove them. Also
-% note that the new #full_doc_info{} will have a different update_seq
-% that will need to be reflected in the changes feed.
-%
-% In case 2 you'll notice is "purged completely" which
-% means it needs to be removed from the database including the
-% update sequence.
-%
-% In case 3 we just need to store the purge_info() to know that it
-% was processed even though it produced no changes to the database.
-%
-% The purge_info() tuples contain the purge_seq, uuid, docid and
-% revisions that were requested to be purged. This should be persisted
-% in such a way that we can efficiently load purge_info() by its UUID
-% as well as iterate over purge_info() entries in order of their PurgeSeq.
--callback purge_docs(DbHandle::db_handle(), [doc_pair()], [purge_info()]) ->
-        {ok, NewDbHandle::db_handle()}.
-
-
-% This function should be called from a single threaded context and
-% should be used to copy purge infos from on database to another
-% when copying a database
--callback copy_purge_infos(DbHandle::db_handle(), [purge_info()]) ->
-        {ok, NewDbHandle::db_handle()}.
-
-
-% This function is called in the context of couch_db_udpater and
-% as such is single threaded for any given DbHandle.
-%
-% This call is made periodically to ensure that the database has
-% stored all updates on stable storage. (ie, here is where you fsync).
--callback commit_data(DbHandle::db_handle()) ->
-        {ok, NewDbHande::db_handle()}.
-
-
-% This function is called by multiple processes concurrently.
-%
-% This function along with open_read_stream are part of the
-% attachments API. For the time being I'm leaving these mostly
-% undocumented. There are implementations of this in both the
-% legacy btree engine as well as the alternative engine
-% implementations for the curious, however this is a part of the
-% API for which I'd like feed back.
-%
-% Currently an engine can elect to not implement these API's
-% by throwing the atom not_supported.
--callback open_write_stream(
-    DbHandle::db_handle(),
-    Options::write_stream_options()) ->
-        {ok, pid()}.
-
-
-% See the documentation for open_write_stream
--callback open_read_stream(DbHandle::db_handle(), StreamDiskInfo::any()) ->
-        {ok, {Module::atom(), ReadStreamState::any()}}.
-
-
-% See the documentation for open_write_stream
--callback is_active_stream(DbHandle::db_handle(), ReadStreamState::any()) ->
-        boolean().
-
-
-% This funciton is called by many processes concurrently.
-%
-% This function is called to fold over the documents in
-% the database sorted by the raw byte collation order of
-% the document id. For each document id, the supplied user
-% function should be invoked with the first argument set
-% to the #full_doc_info{} record and the second argument
-% set to the current user supplied accumulator. The return
-% value of the user function is a 2-tuple of {Go, NewUserAcc}.
-% The NewUserAcc value should then replace the current
-% user accumulator. If Go is the atom ok, iteration over
-% documents should continue. If Go is the atom stop, then
-% iteration should halt and the return value should be
-% {ok, NewUserAcc}.
-%
-% Possible options to this function include:
-%
-%     1. start_key - Start iteration at the provided key or
-%        or just after if the key doesn't exist
-%     2. end_key - Stop iteration just after the provided key
-%     3. end_key_gt - Stop iteration prior to visiting the provided
-%        key
-%     4. dir - The atom fwd or rev. This is to be able to iterate
-%        over documents in reverse order. The logic for comparing
-%        start_key, end_key, and end_key_gt are then reversed (ie,
-%        when rev, start_key should be greater than end_key if the
-%        user wishes to see results)
-%     5. include_reductions - This is a hack for _all_docs since
-%        it currently relies on reductions to count an offset. This
-%        is a terrible hack that will need to be addressed by the
-%        API in the future. If this option is present the supplied
-%        user function expects three arguments, where the first
-%        argument is a #full_doc_info{} record, the second argument
-%        is the current list of reductions to the left of the current
-%        document, and the third argument is the current user
-%        accumulator. The return value from the user function is
-%        unaffected. However the final return value of the function
-%        should include the final total reductions as the second
-%        element of a 3-tuple. Like I said, this is a hack.
-%     6. include_deleted - By default deleted documents are not
-%        included in fold_docs calls. However in some special
-%        cases we do want to see them (as of now, just in couch_changes
-%        during the design document changes optimization)
-%
-% Historically, if a process calls this function repeatedly it
-% would see the same results returned even if there were concurrent
-% updates happening. However there doesn't seem to be any instance of
-% that actually happening so a storage engine that includes new results
-% between invocations shouldn't have any issues.
--callback fold_docs(
-    DbHandle::db_handle(),
-    UserFold::doc_fold_fun(),
-    UserAcc::any(),
-    doc_fold_options()) ->
-        {ok, LastUserAcc::any()}.
-
-
-% This function may be called by many processes concurrently.
-%
-% This should behave exactly the same as fold_docs/4 except that it
-% should only return local documents and the first argument to the
-% user function is a #doc{} record, not a #full_doc_info{}.
--callback fold_local_docs(
-    DbHandle::db_handle(),
-    UserFold::local_doc_fold_fun(),
-    UserAcc::any(),
-    doc_fold_options()) ->
-        {ok, LastUserAcc::any()}.
-
-
-% This function may be called by many processes concurrently.
-%
-% This function is called to fold over the documents (not local
-% documents) in order of their most recent update. Each document
-% in the database should have exactly one entry in this sequence.
-% If a document is updated during a call to this function it should
-% not be included twice as that will probably lead to Very Bad Things.
-%
-% This should behave similarly to fold_docs/4 in that the supplied
-% user function should be invoked with a #full_doc_info{} record
-% as the first argument and the current user accumulator as the
-% second argument. The same semantics for the return value from the
-% user function should be handled as in fold_docs/4.
-%
-% The StartSeq parameter indicates where the fold should start
-% *after*. As in, if a change with a value of StartSeq exists in the
-% database it should not be included in the fold.
-%
-% The only option currently supported by the API is the `dir`
-% option that should behave the same as for fold_docs.
--callback fold_changes(
-    DbHandle::db_handle(),
-    StartSeq::non_neg_integer(),
-    UserFold::changes_fold_fun(),
-    UserAcc::any(),
-    changes_fold_options()) ->
-        {ok, LastUserAcc::any()}.
-
-
-% This function may be called by many processes concurrently.
-%
-% This function is called to fold over purged requests in order of
-% their oldest purge (increasing purge_seq order)
-%
-% The StartPurgeSeq parameter indicates where the fold should start *after*.
--callback fold_purge_infos(
-    DbHandle::db_handle(),
-    StartPurgeSeq::purge_seq(),
-    UserFold::purge_fold_fun(),
-    UserAcc::any(),
-    purge_fold_options()) ->
-        {ok, LastUserAcc::any()}.
-
-
-% This function may be called by many processes concurrently.
-%
-% This function is called to count the number of documents changed
-% since the given UpdateSeq (ie, not including the possible change
-% at exactly UpdateSeq). It is currently only used internally to
-% provide a status update in a replication's _active_tasks entry
-% to indicate how many documents are left to be processed.
-%
-% This is a fairly difficult thing to support in engine's that don't
-% behave exactly like a tree with efficient support for counting rows
-% between keys. As such returning 0 or even just the difference between
-% the current update sequence is possibly the best some storage engines
-% can provide. This may lead to some confusion when interpreting the
-% _active_tasks entry if the storage engine isn't accounted for by the
-% client.
--callback count_changes_since(
-    DbHandle::db_handle(),
-    UpdateSeq::non_neg_integer()) ->
-        TotalChanges::non_neg_integer().
-
-
-% This function is called in the context of couch_db_updater and as
-% such is guaranteed to be single threaded for the given DbHandle.
-%
-% If a storage engine requires compaction this is a trigger to start
-% it off. However a storage engine can do whatever it wants here. As
-% this is fairly engine specific there's not a lot guidance that is
-% generally applicable.
-%
-% When compaction is finished the compactor should use
-% gen_server:cast/2 to send a {compact_done, CompactEngine, CompactInfo}
-% message to the Parent pid provided. Currently CompactEngine
-% must be the same engine that started the compaction and CompactInfo
-% is an arbitrary term that's passed to finish_compaction/4.
--callback start_compaction(
-    DbHandle::db_handle(),
-    DbName::binary(),
-    Options::db_open_options(),
-    Parent::pid()) ->
-        {ok, NewDbHandle::db_handle(), CompactorPid::pid()}.
-
-
-% This function is called in the context of couch_db_udpater and as
-% such is guarnateed to be single threaded for the given DbHandle.
-%
-% Same as for start_compaction, this will be extremely specific to
-% any given storage engine.
-%
-% The split in the API here is so that if the storage engine needs
-% to update the DbHandle state of the couch_db_updater it can as
-% finish_compaction/4 is called in the context of the couch_db_updater.
--callback finish_compaction(
-    OldDbHandle::db_handle(),
-    DbName::binary(),
-    Options::db_open_options(),
-    CompactInfo::any()) ->
-        {ok, CompactedDbHandle::db_handle(), CompactorPid::pid() | undefined}.
-
-
--export([
-    exists/2,
-    delete/4,
-    delete_compaction_files/4,
-
-    init/3,
-    terminate/2,
-    handle_db_updater_call/3,
-    handle_db_updater_info/2,
-
-    incref/1,
-    decref/1,
-    monitored_by/1,
-
-    last_activity/1,
-
-    get_engine/1,
-    get_compacted_seq/1,
-    get_del_doc_count/1,
-    get_disk_version/1,
-    get_doc_count/1,
-    get_epochs/1,
-    get_purge_seq/1,
-    get_oldest_purge_seq/1,
-    get_purge_infos_limit/1,
-    get_revs_limit/1,
-    get_security/1,
-    get_props/1,
-    get_size_info/1,
-    get_partition_info/2,
-    get_update_seq/1,
-    get_uuid/1,
-
-    set_revs_limit/2,
-    set_security/2,
-    set_purge_infos_limit/2,
-    set_props/2,
-
-    set_update_seq/2,
-
-    open_docs/2,
-    open_local_docs/2,
-    read_doc_body/2,
-    load_purge_infos/2,
-
-    serialize_doc/2,
-    write_doc_body/2,
-    write_doc_infos/3,
-    purge_docs/3,
-    copy_purge_infos/2,
-    commit_data/1,
-
-    open_write_stream/2,
-    open_read_stream/2,
-    is_active_stream/2,
-
-    fold_docs/4,
-    fold_local_docs/4,
-    fold_changes/5,
-    fold_purge_infos/5,
-    count_changes_since/2,
-
-    start_compaction/1,
-    finish_compaction/2,
-    trigger_on_compact/1
-]).
-
-
-exists(Engine, DbPath) ->
-    Engine:exists(DbPath).
-
-
-delete(Engine, RootDir, DbPath, DelOpts) when is_list(DelOpts) ->
-    Engine:delete(RootDir, DbPath, DelOpts).
-
-
-delete_compaction_files(Engine, RootDir, DbPath, DelOpts)
-        when is_list(DelOpts) ->
-    Engine:delete_compaction_files(RootDir, DbPath, DelOpts).
-
-
-init(Engine, DbPath, Options) ->
-    case Engine:init(DbPath, Options) of
-         {ok, EngineState} ->
-             {ok, {Engine, EngineState}};
-         Error ->
-             throw(Error)
-    end.
-
-
-terminate(Reason, #db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:terminate(Reason, EngineState).
-
-
-handle_db_updater_call(Msg, _From, #db{} = Db) ->
-    #db{
-        engine = {Engine, EngineState}
-    } = Db,
-    case Engine:handle_db_updater_call(Msg, EngineState) of
-        {reply, Resp, NewState} ->
-            {reply, Resp, Db#db{engine = {Engine, NewState}}};
-        {stop, Reason, Resp, NewState} ->
-            {stop, Reason, Resp, Db#db{engine = {Engine, NewState}}}
-    end.
-
-
-handle_db_updater_info(Msg, #db{} = Db) ->
-    #db{
-        name = Name,
-        engine = {Engine, EngineState}
-    } = Db,
-    case Engine:handle_db_updater_info(Msg, EngineState) of
-        {noreply, NewState} ->
-            {noreply, Db#db{engine = {Engine, NewState}}};
-        {noreply, NewState, Timeout} ->
-            {noreply, Db#db{engine = {Engine, NewState}}, Timeout};
-        {stop, Reason, NewState} ->
-            couch_log:error("DB ~s shutting down: ~p", [Name, Msg]),
-            {stop, Reason, Db#db{engine = {Engine, NewState}}}
-    end.
-
-
-incref(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewState} = Engine:incref(EngineState),
-    {ok, Db#db{engine = {Engine, NewState}}}.
-
-
-decref(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:decref(EngineState).
-
-
-monitored_by(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:monitored_by(EngineState).
-
-
-last_activity(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:last_activity(EngineState).
-
-
-get_engine(#db{} = Db) ->
-    #db{engine = {Engine, _}} = Db,
-    Engine.
-
-
-get_compacted_seq(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_compacted_seq(EngineState).
-
-
-get_del_doc_count(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_del_doc_count(EngineState).
-
-
-get_disk_version(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_disk_version(EngineState).
-
-
-get_doc_count(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_doc_count(EngineState).
-
-
-get_epochs(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_epochs(EngineState).
-
-
-get_purge_seq(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_purge_seq(EngineState).
-
-
-get_oldest_purge_seq(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_oldest_purge_seq(EngineState).
-
-
-get_purge_infos_limit(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_purge_infos_limit(EngineState).
-
-
-get_revs_limit(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_revs_limit(EngineState).
-
-
-get_security(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_security(EngineState).
-
-
-get_props(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_props(EngineState).
-
-
-get_size_info(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_size_info(EngineState).
-
-
-get_partition_info(#db{} = Db, Partition) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_partition_info(EngineState, Partition).
-
-
-get_update_seq(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_update_seq(EngineState).
-
-get_uuid(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_uuid(EngineState).
-
-
-set_revs_limit(#db{} = Db, RevsLimit) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:set_revs_limit(EngineState, RevsLimit),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-set_purge_infos_limit(#db{} = Db, PurgedDocsLimit) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:set_purge_infos_limit(EngineState, PurgedDocsLimit),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-set_security(#db{} = Db, SecProps) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:set_security(EngineState, SecProps),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-set_props(#db{} = Db, Props) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:set_props(EngineState, Props),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-set_update_seq(#db{} = Db, UpdateSeq) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:set_update_seq(EngineState, UpdateSeq),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-open_docs(#db{} = Db, DocIds) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:open_docs(EngineState, DocIds).
-
-
-open_local_docs(#db{} = Db, DocIds) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:open_local_docs(EngineState, DocIds).
-
-
-read_doc_body(#db{} = Db, RawDoc) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:read_doc_body(EngineState, RawDoc).
-
-
-load_purge_infos(#db{} = Db, UUIDs) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:load_purge_infos(EngineState, UUIDs).
-
-
-serialize_doc(#db{} = Db, #doc{} = Doc) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:serialize_doc(EngineState, Doc).
-
-
-write_doc_body(#db{} = Db, #doc{} = Doc) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:write_doc_body(EngineState, Doc).
-
-
-write_doc_infos(#db{} = Db, DocUpdates, LocalDocs) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:write_doc_infos(EngineState, DocUpdates, LocalDocs),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-purge_docs(#db{} = Db, DocUpdates, Purges) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:purge_docs(
-        EngineState, DocUpdates, Purges),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-copy_purge_infos(#db{} = Db, Purges) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:copy_purge_infos(
-        EngineState, Purges),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-commit_data(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:commit_data(EngineState),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-open_write_stream(#db{} = Db, Options) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:open_write_stream(EngineState, Options).
-
-
-open_read_stream(#db{} = Db, StreamDiskInfo) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:open_read_stream(EngineState, StreamDiskInfo).
-
-
-is_active_stream(#db{} = Db, ReadStreamState) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:is_active_stream(EngineState, ReadStreamState).
-
-
-fold_docs(#db{} = Db, UserFun, UserAcc, Options) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:fold_docs(EngineState, UserFun, UserAcc, Options).
-
-
-fold_local_docs(#db{} = Db, UserFun, UserAcc, Options) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:fold_local_docs(EngineState, UserFun, UserAcc, Options).
-
-
-fold_changes(#db{} = Db, StartSeq, UserFun, UserAcc, Options) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:fold_changes(EngineState, StartSeq, UserFun, UserAcc, Options).
-
-
-fold_purge_infos(#db{} = Db, StartPurgeSeq, UserFun, UserAcc, Options) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:fold_purge_infos(
-            EngineState, StartPurgeSeq, UserFun, UserAcc, Options).
-
-
-count_changes_since(#db{} = Db, StartSeq) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:count_changes_since(EngineState, StartSeq).
-
-
-start_compaction(#db{} = Db) ->
-    #db{
-        engine = {Engine, EngineState},
-        name = DbName,
-        options = Options
-    } = Db,
-    {ok, NewEngineState, Pid} = Engine:start_compaction(
-            EngineState, DbName, Options, self()),
-    {ok, Db#db{
-        engine = {Engine, NewEngineState},
-        compactor_pid = Pid
-    }}.
-
-
-finish_compaction(Db, CompactInfo) ->
-    #db{
-        engine = {Engine, St},
-        name = DbName,
-        options = Options
-    } = Db,
-    NewDb = case Engine:finish_compaction(St, DbName, Options, CompactInfo) of
-        {ok, NewState, undefined} ->
-            couch_event:notify(DbName, compacted),
-            Db#db{
-                engine = {Engine, NewState},
-                compactor_pid = nil
-            };
-        {ok, NewState, CompactorPid} when is_pid(CompactorPid) ->
-            Db#db{
-                engine = {Engine, NewState},
-                compactor_pid = CompactorPid
-            }
-    end,
-    ok = gen_server:call(couch_server, {db_updated, NewDb}, infinity),
-    {ok, NewDb}.
-
-
-trigger_on_compact(DbName) ->
-    {ok, DDocs} = get_ddocs(DbName),
-    couch_db_plugin:on_compact(DbName, DDocs).
-
-
-get_ddocs(<<"shards/", _/binary>> = DbName) ->
-    {_, Ref} = spawn_monitor(fun() ->
-        exit(fabric:design_docs(mem3:dbname(DbName)))
-    end),
-    receive
-        {'DOWN', Ref, _, _, {ok, JsonDDocs}} ->
-            {ok, lists:map(fun(JsonDDoc) ->
-                couch_doc:from_json_obj(JsonDDoc)
-            end, JsonDDocs)};
-        {'DOWN', Ref, _, _, Else} ->
-            Else
-    end;
-get_ddocs(DbName) ->
-    couch_util:with_db(DbName, fun(Db) ->
-        FoldFun = fun(FDI, Acc) ->
-            {ok, Doc} = couch_db:open_doc_int(Db, FDI, []),
-            {ok, [Doc | Acc]}
-        end,
-        {ok, Docs} = couch_db:fold_design_docs(Db, FoldFun, [], []),
-        {ok, lists:reverse(Docs)}
-    end).
diff --git a/src/couch/src/couch_db_epi.erl b/src/couch/src/couch_db_epi.erl
index 21879f6..bfd435a 100644
--- a/src/couch/src/couch_db_epi.erl
+++ b/src/couch/src/couch_db_epi.erl
@@ -35,7 +35,6 @@ providers() ->
 
 services() ->
     [
-        {couch_db, couch_db_plugin},
         {feature_flags, couch_flags}
     ].
 
diff --git a/src/couch/src/couch_db_header.erl b/src/couch/src/couch_db_header.erl
deleted file mode 100644
index 355364f..0000000
--- a/src/couch/src/couch_db_header.erl
+++ /dev/null
@@ -1,405 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_header).
-
-
--export([
-    new/0,
-    from/1,
-    is_header/1,
-    upgrade/1,
-    set/2
-]).
-
--export([
-    disk_version/1,
-    update_seq/1,
-    id_tree_state/1,
-    seq_tree_state/1,
-    latest/1,
-    local_tree_state/1,
-    purge_seq/1,
-    purged_docs/1,
-    security_ptr/1,
-    revs_limit/1,
-    uuid/1,
-    epochs/1,
-    compacted_seq/1
-]).
-
-
-% This should be updated anytime a header change happens that requires more
-% than filling in new defaults.
-%
-% As long the changes are limited to new header fields (with inline
-% defaults) added to the end of the record, then there is no need to increment
-% the disk revision number.
-%
-% if the disk revision is incremented, then new upgrade logic will need to be
-% added to couch_db_updater:init_db.
-
--define(LATEST_DISK_VERSION, 6).
-
--record(db_header, {
-    disk_version = ?LATEST_DISK_VERSION,
-    update_seq = 0,
-    unused = 0,
-    id_tree_state = nil,
-    seq_tree_state = nil,
-    local_tree_state = nil,
-    purge_seq = 0,
-    purged_docs = nil,
-    security_ptr = nil,
-    revs_limit = 1000,
-    uuid,
-    epochs,
-    compacted_seq
-}).
-
-
-new() ->
-    #db_header{
-        uuid = couch_uuids:random(),
-        epochs = [{node(), 0}]
-    }.
-
-
-from(Header0) ->
-    Header = upgrade(Header0),
-    #db_header{
-        uuid = Header#db_header.uuid,
-        epochs = Header#db_header.epochs,
-        compacted_seq = Header#db_header.compacted_seq
-    }.
-
-
-is_header(Header) ->
-    try
-        upgrade(Header),
-        true
-    catch _:_ ->
-        false
-    end.
-
-
-upgrade(Header) ->
-    Funs = [
-        fun upgrade_tuple/1,
-        fun upgrade_disk_version/1,
-        fun upgrade_uuid/1,
-        fun upgrade_epochs/1,
-        fun upgrade_compacted_seq/1
-    ],
-    lists:foldl(fun(F, HdrAcc) ->
-        F(HdrAcc)
-    end, Header, Funs).
-
-
-set(Header0, Fields) ->
-    % A subtlety here is that if a database was open during
-    % the release upgrade that updates to uuids and epochs then
-    % this dynamic upgrade also assigns a uuid and epoch.
-    Header = upgrade(Header0),
-    lists:foldl(fun({Field, Value}, HdrAcc) ->
-        set_field(HdrAcc, Field, Value)
-    end, Header, Fields).
-
-
-disk_version(Header) ->
-    get_field(Header, disk_version).
-
-
-update_seq(Header) ->
-    get_field(Header, update_seq).
-
-
-id_tree_state(Header) ->
-    get_field(Header, id_tree_state).
-
-
... 71565 lines suppressed ...