You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by va...@apache.org on 2021/04/16 21:45:06 UTC

[couchdb] 01/24: Delete non-functional 3.x applications and modules from main

This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit f9f7f216f38569320558e8c2268d066c772c1e3f
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Tue Apr 13 19:12:12 2021 -0400

    Delete non-functional 3.x applications and modules from main
    
    Affected applications fall in a few categories:
    
    1. Applications which are not used any more:
    
      * rexi
      * mem3
      * ddoc_cache
      * smoosh
      * ken
      * ioq
      * dreyfus
      * couch_event
      * snappy
      * kash
      * couch_plugins
    
    2. Applications we intend to keep for 4.x, but in their current form they rely
    on 3.x architecture (mem3, couch_file, clustering). These applications when
    they are ready to be implemented should be copied from 3.x as they will be more
    recent there:
    
      * couch_peruser
      * global_changes
      * setup
    
    3. Applications used only for utility functions, those function will be moved
    to other applications:
    
      * couch_mrview
      * couch_index
    
    4. Applications where some modules have been removed and some stayed:
    
      * couch:
        - removed: couch_db_*, couch_btree, couch_file, couch_lru, etc.
    
        - kept: couch_doc, key tree and some others stayed
    
        - couch_server: stripped down only to functions which are still used,
          serving uuids, returning couch version  and hashing passwords. It's a
          candidate to update in a separate PR to split that functionality to other
          modules (couch_passwords, fabric2_server).
    
      * fabric:
        - removed: all fabric_rpc modules
    
        - kept: fabric2_* modules
    
      * mango:
        - removed clouseau pings and check for "text" service
---
 emilio.config                                      |    4 +-
 mix.exs                                            |    5 +-
 rebar.config.script                                |   17 -
 rel/apps/couch_epi.config                          |    7 +-
 rel/reltool.config                                 |   32 -
 src/chttpd/src/chttpd_rewrite.erl                  |  487 -----
 src/couch/src/couch.app.src                        |    7 +-
 src/couch/src/couch_bt_engine.erl                  | 1246 ------------
 src/couch/src/couch_bt_engine.hrl                  |   27 -
 src/couch/src/couch_bt_engine_compactor.erl        |  590 ------
 src/couch/src/couch_bt_engine_header.erl           |  451 -----
 src/couch/src/couch_bt_engine_stream.erl           |   70 -
 src/couch/src/couch_btree.erl                      |  855 --------
 src/couch/src/couch_changes.erl                    |  724 -------
 src/couch/src/couch_compress.erl                   |   99 -
 src/couch/src/couch_db.erl                         | 2086 --------------------
 src/couch/src/couch_db_engine.erl                  | 1105 -----------
 src/couch/src/couch_db_header.erl                  |  405 ----
 src/couch/src/couch_db_int.hrl                     |   76 -
 src/couch/src/couch_db_plugin.erl                  |   96 -
 src/couch/src/couch_db_split.erl                   |  503 -----
 src/couch/src/couch_db_updater.erl                 |  955 ---------
 src/couch/src/couch_emsort.erl                     |  318 ---
 src/couch/src/couch_event_sup.erl                  |   74 -
 src/couch/src/couch_file.erl                       |  804 --------
 src/couch/src/couch_httpd_db.erl                   | 1263 ------------
 src/couch/src/couch_httpd_misc_handlers.erl        |  269 ---
 src/couch/src/couch_httpd_rewrite.erl              |  484 -----
 src/couch/src/couch_lru.erl                        |   67 -
 src/couch/src/couch_multidb_changes.erl            |  903 ---------
 src/couch/src/couch_server_int.hrl                 |   23 -
 src/couch/src/couch_stream.erl                     |  322 ---
 src/couch/src/couch_task_status.erl                |  171 --
 src/couch/src/couch_users_db.erl                   |  137 --
 src/couch/test/eunit/couch_auth_cache_tests.erl    |  349 ----
 .../test/eunit/couch_bt_engine_compactor_tests.erl |  129 --
 src/couch/test/eunit/couch_bt_engine_tests.erl     |   20 -
 .../test/eunit/couch_bt_engine_upgrade_tests.erl   |  244 ---
 src/couch/test/eunit/couch_btree_tests.erl         |  572 ------
 src/couch/test/eunit/couch_changes_tests.erl       |  962 ---------
 src/couch/test/eunit/couch_db_doc_tests.erl        |  121 --
 src/couch/test/eunit/couch_db_plugin_tests.erl     |  205 --
 .../test/eunit/couch_db_props_upgrade_tests.erl    |   83 -
 src/couch/test/eunit/couch_db_split_tests.erl      |  331 ----
 src/couch/test/eunit/couch_db_tests.erl            |  198 --
 src/couch/test/eunit/couch_file_tests.erl          |  551 ------
 src/couch/test/eunit/couch_index_tests.erl         |  232 ---
 src/couch/test/eunit/couch_server_tests.erl        |  294 ---
 src/couch/test/eunit/couch_stream_tests.erl        |  124 --
 src/couch/test/eunit/couch_task_status_tests.erl   |  233 ---
 src/couch/test/eunit/couchdb_attachments_tests.erl |  765 -------
 src/couch/test/eunit/couchdb_db_tests.erl          |   91 -
 src/couch/test/eunit/couchdb_design_doc_tests.erl  |   87 -
 .../test/eunit/couchdb_file_compression_tests.erl  |  250 ---
 .../test/eunit/couchdb_location_header_tests.erl   |   78 -
 src/couch/test/eunit/couchdb_mrview_tests.erl      |  261 ---
 .../test/eunit/couchdb_update_conflicts_tests.erl  |  280 ---
 src/couch/test/eunit/couchdb_vhosts_tests.erl      |  271 ---
 src/couch/test/eunit/couchdb_views_tests.erl       |  668 -------
 .../test/eunit/fixtures/os_daemon_configer.escript |    3 +-
 src/couch/test/eunit/global_changes_tests.erl      |  159 --
 src/couch/test/exunit/couch_compress_tests.exs     |  113 --
 src/couch/test/exunit/fabric_test.exs              |  101 -
 src/couch_event/.gitignore                         |    2 -
 src/couch_event/LICENSE                            |  202 --
 src/couch_event/README.md                          |    3 -
 src/couch_event/rebar.config                       |    1 -
 src/couch_event/src/couch_event.app.src            |   22 -
 src/couch_event/src/couch_event.erl                |   65 -
 src/couch_event/src/couch_event_app.erl            |   27 -
 src/couch_event/src/couch_event_int.hrl            |   19 -
 src/couch_event/src/couch_event_listener.erl       |  238 ---
 src/couch_event/src/couch_event_listener_mfa.erl   |  107 -
 src/couch_event/src/couch_event_os_listener.erl    |   76 -
 src/couch_event/src/couch_event_server.erl         |  156 --
 src/couch_event/src/couch_event_sup2.erl           |   44 -
 src/couch_index/.gitignore                         |    3 -
 src/couch_index/LICENSE                            |  202 --
 src/couch_index/rebar.config                       |    2 -
 src/couch_index/src/couch_index.app.src            |   19 -
 src/couch_index/src/couch_index.erl                |  639 ------
 src/couch_index/src/couch_index_app.erl            |   21 -
 src/couch_index/src/couch_index_compactor.erl      |  135 --
 src/couch_index/src/couch_index_epi.erl            |   50 -
 src/couch_index/src/couch_index_plugin.erl         |   51 -
 .../src/couch_index_plugin_couch_db.erl            |   26 -
 src/couch_index/src/couch_index_server.erl         |  303 ---
 src/couch_index/src/couch_index_sup.erl            |   24 -
 src/couch_index/src/couch_index_updater.erl        |  239 ---
 src/couch_index/src/couch_index_util.erl           |   78 -
 .../test/eunit/couch_index_compaction_tests.erl    |  117 --
 .../test/eunit/couch_index_ddoc_updated_tests.erl  |  145 --
 src/couch_js/src/couch_js.app.src                  |    3 +-
 src/couch_mrview/LICENSE                           |  202 --
 src/couch_mrview/include/couch_mrview.hrl          |  114 --
 src/couch_mrview/priv/stats_descriptions.cfg       |   24 -
 src/couch_mrview/rebar.config                      |    2 -
 src/couch_mrview/src/couch_mrview.app.src          |   18 -
 src/couch_mrview/src/couch_mrview.erl              |  692 -------
 src/couch_mrview/src/couch_mrview_cleanup.erl      |   59 -
 src/couch_mrview/src/couch_mrview_compactor.erl    |  294 ---
 src/couch_mrview/src/couch_mrview_http.erl         |  650 ------
 src/couch_mrview/src/couch_mrview_index.erl        |  329 ---
 src/couch_mrview/src/couch_mrview_show.erl         |  468 -----
 src/couch_mrview/src/couch_mrview_test_util.erl    |  123 --
 .../src/couch_mrview_update_notifier.erl           |   49 -
 src/couch_mrview/src/couch_mrview_updater.erl      |  373 ----
 src/couch_mrview/src/couch_mrview_util.erl         | 1180 -----------
 .../test/eunit/couch_mrview_all_docs_tests.erl     |  140 --
 .../test/eunit/couch_mrview_collation_tests.erl    |  207 --
 .../test/eunit/couch_mrview_compact_tests.erl      |  115 --
 .../test/eunit/couch_mrview_ddoc_updated_tests.erl |  145 --
 .../eunit/couch_mrview_ddoc_validation_tests.erl   |  422 ----
 .../test/eunit/couch_mrview_design_docs_tests.erl  |  136 --
 .../test/eunit/couch_mrview_http_tests.erl         |   28 -
 .../test/eunit/couch_mrview_index_info_tests.erl   |  111 --
 .../test/eunit/couch_mrview_local_docs_tests.erl   |  148 --
 .../test/eunit/couch_mrview_map_views_tests.erl    |  144 --
 .../eunit/couch_mrview_purge_docs_fabric_tests.erl |  286 ---
 .../test/eunit/couch_mrview_purge_docs_tests.erl   |  575 ------
 .../test/eunit/couch_mrview_red_views_tests.erl    |   95 -
 .../test/eunit/couch_mrview_util_tests.erl         |   39 -
 src/couch_peruser/.gitignore                       |    9 -
 src/couch_peruser/LICENSE                          |  202 --
 src/couch_peruser/README.md                        |   34 -
 src/couch_peruser/src/couch_peruser.app.src        |   20 -
 src/couch_peruser/src/couch_peruser.erl            |  423 ----
 src/couch_peruser/src/couch_peruser_app.erl        |   26 -
 src/couch_peruser/src/couch_peruser_sup.erl        |   29 -
 .../test/eunit/couch_peruser_test.erl              |  538 -----
 src/couch_plugins/LICENSE                          |  202 --
 src/couch_plugins/Makefile.am                      |   40 -
 src/couch_plugins/README.md                        |  159 --
 src/couch_plugins/src/couch_plugins.app.src        |   22 -
 src/couch_plugins/src/couch_plugins.erl            |  304 ---
 src/couch_plugins/src/couch_plugins_httpd.erl      |   65 -
 src/couch_pse_tests/src/couch_pse_tests.app.src    |   20 -
 src/couch_pse_tests/src/cpse_gather.erl            |   95 -
 src/couch_pse_tests/src/cpse_test_attachments.erl  |   99 -
 src/couch_pse_tests/src/cpse_test_compaction.erl   |  318 ---
 .../src/cpse_test_copy_purge_infos.erl             |   82 -
 src/couch_pse_tests/src/cpse_test_fold_changes.erl |  185 --
 src/couch_pse_tests/src/cpse_test_fold_docs.erl    |  400 ----
 .../src/cpse_test_fold_purge_infos.erl             |  167 --
 .../src/cpse_test_get_set_props.erl                |   95 -
 .../src/cpse_test_open_close_delete.erl            |   77 -
 .../src/cpse_test_purge_bad_checkpoints.erl        |   80 -
 src/couch_pse_tests/src/cpse_test_purge_docs.erl   |  464 -----
 .../src/cpse_test_purge_replication.erl            |  215 --
 src/couch_pse_tests/src/cpse_test_purge_seqs.erl   |  129 --
 .../src/cpse_test_read_write_docs.erl              |  311 ---
 src/couch_pse_tests/src/cpse_test_ref_counting.erl |  113 --
 src/couch_pse_tests/src/cpse_util.erl              |  677 -------
 src/ddoc_cache/LICENSE                             |  202 --
 src/ddoc_cache/README.md                           |    4 -
 src/ddoc_cache/priv/stats_descriptions.cfg         |   12 -
 src/ddoc_cache/src/ddoc_cache.app.src              |   32 -
 src/ddoc_cache/src/ddoc_cache.erl                  |   60 -
 src/ddoc_cache/src/ddoc_cache.hrl                  |   40 -
 src/ddoc_cache/src/ddoc_cache_app.erl              |   25 -
 src/ddoc_cache/src/ddoc_cache_entry.erl            |  374 ----
 src/ddoc_cache/src/ddoc_cache_entry_custom.erl     |   37 -
 src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl     |   46 -
 src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl |   47 -
 .../src/ddoc_cache_entry_validation_funs.erl       |   44 -
 src/ddoc_cache/src/ddoc_cache_lru.erl              |  333 ----
 src/ddoc_cache/src/ddoc_cache_opener.erl           |   66 -
 src/ddoc_cache/src/ddoc_cache_sup.erl              |   46 -
 src/ddoc_cache/src/ddoc_cache_value.erl            |   27 -
 .../test/eunit/ddoc_cache_basic_test.erl           |  175 --
 .../test/eunit/ddoc_cache_coverage_test.erl        |   77 -
 .../test/eunit/ddoc_cache_disabled_test.erl        |   62 -
 .../test/eunit/ddoc_cache_entry_test.erl           |  159 --
 src/ddoc_cache/test/eunit/ddoc_cache_ev.erl        |   21 -
 .../test/eunit/ddoc_cache_eviction_test.erl        |   96 -
 src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl  |  219 --
 .../test/eunit/ddoc_cache_no_cache_test.erl        |   87 -
 .../test/eunit/ddoc_cache_open_error_test.erl      |   46 -
 src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl |  107 -
 .../test/eunit/ddoc_cache_opener_test.erl          |   33 -
 .../test/eunit/ddoc_cache_refresh_test.erl         |  174 --
 .../test/eunit/ddoc_cache_remove_test.erl          |  224 ---
 src/ddoc_cache/test/eunit/ddoc_cache_test.hrl      |   26 -
 src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl     |  111 --
 src/dreyfus/.gitignore                             |    4 -
 src/dreyfus/LICENSE.txt                            |  202 --
 src/dreyfus/README.md                              |   78 -
 src/dreyfus/include/dreyfus.hrl                    |   74 -
 src/dreyfus/priv/stats_descriptions.cfg            |   65 -
 src/dreyfus/src/clouseau_rpc.erl                   |  109 -
 src/dreyfus/src/dreyfus.app.src                    |   22 -
 src/dreyfus/src/dreyfus_app.erl                    |   24 -
 src/dreyfus/src/dreyfus_bookmark.erl               |   90 -
 src/dreyfus/src/dreyfus_config.erl                 |   15 -
 src/dreyfus/src/dreyfus_epi.erl                    |   46 -
 src/dreyfus/src/dreyfus_fabric.erl                 |  205 --
 src/dreyfus/src/dreyfus_fabric_cleanup.erl         |   78 -
 src/dreyfus/src/dreyfus_fabric_group1.erl          |  129 --
 src/dreyfus/src/dreyfus_fabric_group2.erl          |  158 --
 src/dreyfus/src/dreyfus_fabric_info.erl            |  108 -
 src/dreyfus/src/dreyfus_fabric_search.erl          |  270 ---
 src/dreyfus/src/dreyfus_httpd.erl                  |  614 ------
 src/dreyfus/src/dreyfus_httpd_handlers.erl         |   29 -
 src/dreyfus/src/dreyfus_index.erl                  |  391 ----
 src/dreyfus/src/dreyfus_index_manager.erl          |  153 --
 src/dreyfus/src/dreyfus_index_updater.erl          |  181 --
 src/dreyfus/src/dreyfus_plugin_couch_db.erl        |   26 -
 src/dreyfus/src/dreyfus_rpc.erl                    |  130 --
 src/dreyfus/src/dreyfus_sup.erl                    |   32 -
 src/dreyfus/src/dreyfus_util.erl                   |  441 -----
 src/dreyfus/test/dreyfus_blacklist_await_test.erl  |   76 -
 .../test/dreyfus_blacklist_request_test.erl        |   96 -
 src/dreyfus/test/dreyfus_config_test.erl           |   71 -
 src/dreyfus/test/dreyfus_purge_test.erl            |  867 --------
 src/dreyfus/test/dreyfus_test_util.erl             |   13 -
 src/dreyfus/test/elixir/mix.exs                    |   30 -
 src/dreyfus/test/elixir/mix.lock                   |    5 -
 src/dreyfus/test/elixir/run                        |    4 -
 .../test/elixir/test/partition_search_test.exs     |  247 ---
 src/dreyfus/test/elixir/test/search_test.exs       |  226 ---
 src/dreyfus/test/elixir/test/test_helper.exs       |    4 -
 src/fabric/include/fabric.hrl                      |   46 -
 src/fabric/src/fabric.app.src                      |    2 -
 src/fabric/src/fabric.erl                          |  720 -------
 src/fabric/src/fabric_db_create.erl                |  228 ---
 src/fabric/src/fabric_db_delete.erl                |   98 -
 src/fabric/src/fabric_db_doc_count.erl             |   62 -
 src/fabric/src/fabric_db_info.erl                  |  171 --
 src/fabric/src/fabric_db_meta.erl                  |  198 --
 src/fabric/src/fabric_db_partition_info.erl        |  155 --
 src/fabric/src/fabric_db_update_listener.erl       |  177 --
 src/fabric/src/fabric_design_doc_count.erl         |   62 -
 src/fabric/src/fabric_dict.erl                     |   61 -
 src/fabric/src/fabric_doc_attachments.erl          |  160 --
 src/fabric/src/fabric_doc_atts.erl                 |  170 --
 src/fabric/src/fabric_doc_missing_revs.erl         |   97 -
 src/fabric/src/fabric_doc_open.erl                 |  610 ------
 src/fabric/src/fabric_doc_open_revs.erl            |  799 --------
 src/fabric/src/fabric_doc_purge.erl                |  571 ------
 src/fabric/src/fabric_doc_update.erl               |  377 ----
 src/fabric/src/fabric_group_info.erl               |  139 --
 src/fabric/src/fabric_ring.erl                     |  519 -----
 src/fabric/src/fabric_rpc.erl                      |  664 -------
 src/fabric/src/fabric_streams.erl                  |  274 ---
 src/fabric/src/fabric_util.erl                     |  347 ----
 src/fabric/src/fabric_view.erl                     |  478 -----
 src/fabric/src/fabric_view_all_docs.erl            |  332 ----
 src/fabric/src/fabric_view_changes.erl             |  820 --------
 src/fabric/src/fabric_view_map.erl                 |  267 ---
 src/fabric/src/fabric_view_reduce.erl              |  165 --
 src/fabric/test/eunit/fabric_rpc_tests.erl         |  181 --
 src/global_changes/.gitignore                      |    2 -
 src/global_changes/LICENSE                         |  203 --
 src/global_changes/README.md                       |   27 -
 src/global_changes/priv/stats_descriptions.cfg     |   20 -
 src/global_changes/src/global_changes.app.src      |   32 -
 src/global_changes/src/global_changes_app.erl      |   28 -
 src/global_changes/src/global_changes_epi.erl      |   51 -
 src/global_changes/src/global_changes_httpd.erl    |  285 ---
 .../src/global_changes_httpd_handlers.erl          |   28 -
 src/global_changes/src/global_changes_listener.erl |  165 --
 src/global_changes/src/global_changes_plugin.erl   |   40 -
 src/global_changes/src/global_changes_server.erl   |  229 ---
 src/global_changes/src/global_changes_sup.erl      |   84 -
 src/global_changes/src/global_changes_util.erl     |   27 -
 .../test/eunit/global_changes_hooks_tests.erl      |  156 --
 src/ioq/.gitignore                                 |    2 -
 src/ioq/src/ioq.app.src                            |   21 -
 src/ioq/src/ioq.erl                                |  189 --
 src/ioq/src/ioq_app.erl                            |   21 -
 src/ioq/src/ioq_sup.erl                            |   24 -
 src/ken/README.md                                  |   12 -
 src/ken/rebar.config.script                        |   28 -
 src/ken/src/ken.app.src.script                     |   38 -
 src/ken/src/ken.erl                                |   29 -
 src/ken/src/ken_app.erl                            |   28 -
 src/ken/src/ken_event_handler.erl                  |   56 -
 src/ken/src/ken_server.erl                         |  579 ------
 src/ken/src/ken_sup.erl                            |   33 -
 src/ken/test/config.ini                            |    2 -
 src/ken/test/ken_server_test.erl                   |   97 -
 src/mango/src/mango_cursor_text.erl                |  334 ----
 src/mango/src/mango_idx_text.erl                   |  459 -----
 src/mem3/LICENSE                                   |  202 --
 src/mem3/README.md                                 |   43 -
 src/mem3/README_reshard.md                         |   93 -
 src/mem3/include/mem3.hrl                          |   59 -
 src/mem3/priv/stats_descriptions.cfg               |   12 -
 src/mem3/rebar.config.script                       |   22 -
 src/mem3/src/mem3.app.src                          |   40 -
 src/mem3/src/mem3.erl                              |  424 ----
 src/mem3/src/mem3_app.erl                          |   21 -
 src/mem3/src/mem3_cluster.erl                      |  161 --
 src/mem3/src/mem3_epi.erl                          |   51 -
 src/mem3/src/mem3_hash.erl                         |   73 -
 src/mem3/src/mem3_httpd.erl                        |   84 -
 src/mem3/src/mem3_httpd_handlers.erl               |   61 -
 src/mem3/src/mem3_nodes.erl                        |  155 --
 src/mem3/src/mem3_plugin_couch_db.erl              |   21 -
 src/mem3/src/mem3_rep.erl                          |  998 ----------
 src/mem3/src/mem3_reshard.erl                      |  913 ---------
 src/mem3/src/mem3_reshard.hrl                      |   74 -
 src/mem3/src/mem3_reshard_api.erl                  |  217 --
 src/mem3/src/mem3_reshard_dbdoc.erl                |  274 ---
 src/mem3/src/mem3_reshard_httpd.erl                |  317 ---
 src/mem3/src/mem3_reshard_index.erl                |  164 --
 src/mem3/src/mem3_reshard_job.erl                  |  716 -------
 src/mem3/src/mem3_reshard_job_sup.erl              |   55 -
 src/mem3/src/mem3_reshard_store.erl                |  286 ---
 src/mem3/src/mem3_reshard_sup.erl                  |   47 -
 src/mem3/src/mem3_reshard_validate.erl             |  126 --
 src/mem3/src/mem3_rpc.erl                          |  711 -------
 src/mem3/src/mem3_seeds.erl                        |  162 --
 src/mem3/src/mem3_shards.erl                       |  766 -------
 src/mem3/src/mem3_sup.erl                          |   40 -
 src/mem3/src/mem3_sync.erl                         |  323 ---
 src/mem3/src/mem3_sync_event.erl                   |   86 -
 src/mem3/src/mem3_sync_event_listener.erl          |  353 ----
 src/mem3/src/mem3_sync_nodes.erl                   |  115 --
 src/mem3/src/mem3_sync_security.erl                |  117 --
 src/mem3/src/mem3_util.erl                         |  650 ------
 src/mem3/test/eunit/mem3_cluster_test.erl          |  133 --
 src/mem3/test/eunit/mem3_hash_test.erl             |   23 -
 src/mem3/test/eunit/mem3_rep_test.erl              |  321 ---
 src/mem3/test/eunit/mem3_reshard_api_test.erl      |  847 --------
 .../test/eunit/mem3_reshard_changes_feed_test.erl  |  389 ----
 src/mem3/test/eunit/mem3_reshard_test.erl          |  834 --------
 src/mem3/test/eunit/mem3_ring_prop_tests.erl       |  151 --
 src/mem3/test/eunit/mem3_seeds_test.erl            |   69 -
 src/mem3/test/eunit/mem3_sync_security_test.erl    |   54 -
 src/mem3/test/eunit/mem3_util_test.erl             |  130 --
 src/rexi/README.md                                 |   23 -
 src/rexi/include/rexi.hrl                          |   20 -
 src/rexi/priv/stats_descriptions.cfg               |   24 -
 src/rexi/rebar.config                              |    2 -
 src/rexi/src/rexi.app.src                          |   28 -
 src/rexi/src/rexi.erl                              |  320 ---
 src/rexi/src/rexi_app.erl                          |   22 -
 src/rexi/src/rexi_buffer.erl                       |  104 -
 src/rexi/src/rexi_monitor.erl                      |   65 -
 src/rexi/src/rexi_server.erl                       |  193 --
 src/rexi/src/rexi_server_mon.erl                   |  176 --
 src/rexi/src/rexi_server_sup.erl                   |   29 -
 src/rexi/src/rexi_sup.erl                          |   64 -
 src/rexi/src/rexi_utils.erl                        |  105 -
 src/setup/.gitignore                               |    4 -
 src/setup/LICENSE                                  |  203 --
 src/setup/README.md                                |  210 --
 src/setup/src/setup.app.src                        |   27 -
 src/setup/src/setup.erl                            |  386 ----
 src/setup/src/setup_app.erl                        |   28 -
 src/setup/src/setup_epi.erl                        |   49 -
 src/setup/src/setup_httpd.erl                      |  180 --
 src/setup/src/setup_httpd_handlers.erl             |   32 -
 src/setup/src/setup_sup.erl                        |   44 -
 src/setup/test/t-frontend-setup.sh                 |   71 -
 src/setup/test/t-single-node-auto-setup.sh         |   24 -
 src/setup/test/t-single-node.sh                    |   46 -
 src/setup/test/t.sh                                |   63 -
 src/smoosh/README.md                               |  140 --
 src/smoosh/operator_guide.md                       |  396 ----
 src/smoosh/src/smoosh.app.src                      |   29 -
 src/smoosh/src/smoosh.erl                          |   69 -
 src/smoosh/src/smoosh_app.erl                      |   28 -
 src/smoosh/src/smoosh_channel.erl                  |  325 ---
 src/smoosh/src/smoosh_priority_queue.erl           |   86 -
 src/smoosh/src/smoosh_server.erl                   |  606 ------
 src/smoosh/src/smoosh_sup.erl                      |   38 -
 src/smoosh/src/smoosh_utils.erl                    |   92 -
 src/smoosh/test/exunit/scheduling_window_test.exs  |   79 -
 src/smoosh/test/exunit/test_helper.exs             |    2 -
 371 files changed, 6 insertions(+), 75257 deletions(-)

diff --git a/emilio.config b/emilio.config
index 0dad938..84a6571 100644
--- a/emilio.config
+++ b/emilio.config
@@ -8,13 +8,11 @@
     "src[\/]emilio[\/]*",
     "src[\/]folsom[\/]*",
     "src[\/]mochiweb[\/]*",
-    "src[\/]snappy[\/]*",
     "src[\/]ssl_verify_fun[\/]*",
     "src[\/]ibrowse[\/]*",
     "src[\/]jiffy[\/]*",
     "src[\/]meck[\/]*",
     "src[\/]proper[\/]*",
     "src[\/]recon[\/]*",
-    "src[\/]hyper[\/]*",
-    "src[\/]triq[\/]*"
+    "src[\/]hyper[\/]*"
 ]}.
diff --git a/mix.exs b/mix.exs
index 9cba1a4..12e0221 100644
--- a/mix.exs
+++ b/mix.exs
@@ -133,16 +133,13 @@ defmodule CouchDBTest.Mixfile do
       "b64url",
       "bear",
       "mochiweb",
-      "snappy",
       "rebar",
       "proper",
       "mochiweb",
       "meck",
-      "khash",
       "hyper",
       "fauxton",
-      "folsom",
-      "hqueue"
+      "folsom"
     ]
 
     deps |> Enum.map(fn app -> "src/#{app}" end)
diff --git a/rebar.config.script b/rebar.config.script
index e33a9e7..0f40cf0 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -118,33 +118,18 @@ SubDirs = [
     "src/chttpd",
     "src/couch",
     "src/couch_eval",
-    "src/couch_event",
-    "src/mem3",
-    "src/couch_index",
-    "src/couch_mrview",
     "src/couch_js",
     "src/couch_replicator",
-    "src/couch_plugins",
-    "src/couch_pse_tests",
     "src/couch_stats",
-    "src/couch_peruser",
     "src/couch_tests",
     "src/couch_views",
     "src/ctrace",
-    "src/ddoc_cache",
-    "src/dreyfus",
     "src/fabric",
     "src/aegis",
     "src/couch_jobs",
     "src/couch_expiring_cache",
-    "src/global_changes",
-    "src/ioq",
     "src/jwtf",
-    "src/ken",
     "src/mango",
-    "src/rexi",
-    "src/setup",
-    "src/smoosh",
     "src/ebtree",
     "src/couch_prometheus",
     "rel"
@@ -156,8 +141,6 @@ DepDescs = [
 {b64url,           "b64url",           {tag, "1.0.2"}},
 {erlfdb,           "erlfdb",           {tag, "v1.3.3"}},
 {ets_lru,          "ets-lru",          {tag, "1.1.0"}},
-{khash,            "khash",            {tag, "1.1.0"}},
-{snappy,           "snappy",           {tag, "CouchDB-1.0.4"}},
 
 %% Non-Erlang deps
 {docs,             {url, "https://github.com/apache/couchdb-documentation"},
diff --git a/rel/apps/couch_epi.config b/rel/apps/couch_epi.config
index f9f49e1..db85ef1 100644
--- a/rel/apps/couch_epi.config
+++ b/rel/apps/couch_epi.config
@@ -14,12 +14,7 @@
     couch_db_epi,
     fabric2_epi,
     chttpd_epi,
-    couch_index_epi,
     couch_views_epi,
     couch_replicator_epi,
-    dreyfus_epi,
-    global_changes_epi,
-    mango_epi,
-    mem3_epi,
-    setup_epi
+    mango_epi
 ]}.
diff --git a/rel/reltool.config b/rel/reltool.config
index 9c59aa5..7d35993 100644
--- a/rel/reltool.config
+++ b/rel/reltool.config
@@ -35,42 +35,26 @@
         couch,
         couch_epi,
         couch_jobs,
-        couch_index,
         couch_log,
-        couch_mrview,
-        couch_plugins,
         couch_replicator,
         couch_stats,
         couch_eval,
         couch_js,
-        couch_event,
-        couch_peruser,
         couch_views,
-        ddoc_cache,
-        dreyfus,
         ebtree,
         erlfdb,
         ets_lru,
         fabric,
         folsom,
-        global_changes,
         hyper,
         ibrowse,
-        ioq,
         jaeger_passage,
         jiffy,
         jwtf,
-        ken,
-        khash,
         local,
         mango,
-        mem3,
         mochiweb,
         passage,
-        rexi,
-        setup,
-        smoosh,
-        snappy,
         thrift_protocol,
         couch_prometheus,
         %% extra
@@ -109,40 +93,24 @@
     {app, couch_eval, [{incl_cond, include}]},
     {app, couch_js, [{incl_cond, include}]},
     {app, couch_jobs, [{incl_cond, include}]},
-    {app, couch_index, [{incl_cond, include}]},
     {app, couch_log, [{incl_cond, include}]},
-    {app, couch_mrview, [{incl_cond, include}]},
-    {app, couch_plugins, [{incl_cond, include}]},
     {app, couch_replicator, [{incl_cond, include}]},
     {app, couch_stats, [{incl_cond, include}]},
-    {app, couch_event, [{incl_cond, include}]},
-    {app, couch_peruser, [{incl_cond, include}]},
     {app, couch_views, [{incl_cond, include}]},
-    {app, ddoc_cache, [{incl_cond, include}]},
-    {app, dreyfus, [{incl_cond, include}]},
     {app, erlfdb, [{incl_cond, include}]},
     {app, ebtree, [{incl_cond, include}]},
     {app, ets_lru, [{incl_cond, include}]},
     {app, fabric, [{incl_cond, include}]},
     {app, folsom, [{incl_cond, include}]},
-    {app, global_changes, [{incl_cond, include}]},
     {app, hyper, [{incl_cond, include}]},
     {app, ibrowse, [{incl_cond, include}]},
-    {app, ioq, [{incl_cond, include}]},
     {app, jaeger_passage, [{incl_cond, include}]},
     {app, jiffy, [{incl_cond, include}]},
     {app, jwtf, [{incl_cond, include}]},
-    {app, ken, [{incl_cond, include}]},
     {app, local, [{incl_cond, include}]},
-    {app, khash, [{incl_cond, include}]},
     {app, mango, [{incl_cond, include}]},
-    {app, mem3, [{incl_cond, include}]},
     {app, mochiweb, [{incl_cond, include}]},
     {app, passage, [{incl_cond, include}]},
-    {app, rexi, [{incl_cond, include}]},
-    {app, setup, [{incl_cond, include}]},
-    {app, smoosh, [{incl_cond, include}]},
-    {app, snappy, [{incl_cond, include}]},
     {app, thrift_protocol, [{incl_cond, include}]},
     {app, couch_prometheus, [{incl_cond, include}]},
 
diff --git a/src/chttpd/src/chttpd_rewrite.erl b/src/chttpd/src/chttpd_rewrite.erl
deleted file mode 100644
index 1c2c1f3..0000000
--- a/src/chttpd/src/chttpd_rewrite.erl
+++ /dev/null
@@ -1,487 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-%
-% bind_path is based on bind method from Webmachine
-
-
-%% @doc Module for URL rewriting by pattern matching.
-
--module(chttpd_rewrite).
-
--compile(tuple_calls).
-
--export([handle_rewrite_req/3]).
--include_lib("couch/include/couch_db.hrl").
-
--define(SEPARATOR, $\/).
--define(MATCH_ALL, {bind, <<"*">>}).
-
-
-handle_rewrite_req(#httpd{}=Req, Db, DDoc) ->
-    RewritesSoFar = erlang:get(?REWRITE_COUNT),
-    MaxRewrites = config:get_integer("httpd", "rewrite_limit", 100),
-    case RewritesSoFar >= MaxRewrites of
-        true ->
-            throw({bad_request, <<"Exceeded rewrite recursion limit">>});
-        false ->
-            erlang:put(?REWRITE_COUNT, RewritesSoFar + 1)
-    end,
-    case get_rules(DDoc) of
-        Rules when is_list(Rules) ->
-            do_rewrite(Req, Rules);
-        Rules when is_binary(Rules) ->
-            case couch_query_servers:rewrite(Req, Db, DDoc) of
-                undefined ->
-                    chttpd:send_error(Req, 404, <<"rewrite_error">>,
-                        <<"Invalid path.">>);
-                Rewrite ->
-                    do_rewrite(Req, Rewrite)
-            end;
-        undefined ->
-            chttpd:send_error(Req, 404, <<"rewrite_error">>,
-                <<"Invalid path.">>)
-    end.
-
-
-get_rules(#doc{body={Props}}) ->
-    couch_util:get_value(<<"rewrites">>, Props).
-
-
-do_rewrite(#httpd{mochi_req=MochiReq}=Req, {Props}=Rewrite) when is_list(Props) ->
-    case couch_util:get_value(<<"code">>, Props) of
-        undefined ->
-            Method = rewrite_method(Req, Rewrite),
-            Headers = rewrite_headers(Req, Rewrite),
-            Path = ?b2l(rewrite_path(Req, Rewrite)),
-            NewMochiReq = mochiweb_request:new(MochiReq:get(socket),
-                                               Method,
-                                               Path,
-                                               MochiReq:get(version),
-                                               Headers),
-            Body = case couch_util:get_value(<<"body">>, Props) of
-                undefined -> erlang:get(mochiweb_request_body);
-                B -> B
-            end,
-            NewMochiReq:cleanup(),
-            case Body of
-                undefined -> [];
-                _ -> erlang:put(mochiweb_request_body, Body)
-            end,
-            couch_log:debug("rewrite to ~p", [Path]),
-            chttpd:handle_request_int(NewMochiReq);
-        Code ->
-            chttpd:send_response(
-                Req,
-                Code,
-                case couch_util:get_value(<<"headers">>, Props) of
-                    undefined -> [];
-                    {H1} -> H1
-                end,
-                rewrite_body(Rewrite))
-    end;
-do_rewrite(#httpd{method=Method,
-                  path_parts=[_DbName, <<"_design">>, _DesignName, _Rewrite|PathParts],
-                  mochi_req=MochiReq}=Req,
-           Rules) when is_list(Rules) ->
-    % create dispatch list from rules
-    Prefix = path_prefix(Req),
-    QueryList = lists:map(fun decode_query_value/1, chttpd:qs(Req)),
-
-    DispatchList =  [make_rule(Rule) || {Rule} <- Rules],
-    Method1 = couch_util:to_binary(Method),
-
-    %% get raw path by matching url to a rule.
-    RawPath = case try_bind_path(DispatchList, Method1,
-            PathParts, QueryList) of
-        no_dispatch_path ->
-            throw(not_found);
-        {NewPathParts, Bindings} ->
-            Parts = [quote_plus(X) || X <- NewPathParts],
-
-            % build new path, reencode query args, eventually convert
-            % them to json
-            Bindings1 = maybe_encode_bindings(Bindings),
-            Path = iolist_to_binary([
-                string:join(Parts, [?SEPARATOR]),
-                [["?", mochiweb_util:urlencode(Bindings1)] || Bindings1 =/= []]
-            ]),
-
-            % if path is relative detect it and rewrite path
-            safe_relative_path(Prefix, Path)
-        end,
-
-    % normalize final path (fix levels "." and "..")
-    RawPath1 = ?b2l(normalize_path(RawPath)),
-
-    couch_log:debug("rewrite to ~p ~n", [RawPath1]),
-
-    % build a new mochiweb request
-    MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
-                                     MochiReq:get(method),
-                                     RawPath1,
-                                     MochiReq:get(version),
-                                     MochiReq:get(headers)),
-
-    % cleanup, It force mochiweb to reparse raw uri.
-    MochiReq1:cleanup(),
-
-    chttpd:handle_request_int(MochiReq1).
-
-
-rewrite_method(#httpd{method=Method}, {Props}) ->
-    DefaultMethod = couch_util:to_binary(Method),
-    couch_util:get_value(<<"method">>, Props, DefaultMethod).
-
-rewrite_path(#httpd{}=Req, {Props}=Rewrite) ->
-    Prefix = path_prefix(Req),
-    RewritePath = case couch_util:get_value(<<"path">>, Props) of
-        undefined ->
-            throw({<<"rewrite_error">>,
-                   <<"Rewrite result must produce a new path.">>});
-        P -> P
-    end,
-    SafeRelativePath = safe_relative_path(Prefix, RewritePath),
-    NormalizedPath = normalize_path(SafeRelativePath),
-    QueryParams = rewrite_query_params(Req, Rewrite),
-    case QueryParams of
-        <<"">> ->
-            NormalizedPath;
-        QueryParams ->
-            <<NormalizedPath/binary, "?", QueryParams/binary>>
-    end.
-
-rewrite_query_params(#httpd{}=Req, {Props}) ->
-    RequestQS = chttpd:qs(Req),
-    RewriteQS = case couch_util:get_value(<<"query">>, Props) of
-        undefined -> RequestQS;
-        {V} -> V
-    end,
-    RewriteQSEsc = [{chttpd:quote(K), chttpd:quote(V)} || {K, V} <- RewriteQS],
-    iolist_to_binary(string:join([[K, "=", V] || {K, V} <- RewriteQSEsc], "&")).
-
-rewrite_headers(#httpd{mochi_req=MochiReq}, {Props}) ->
-    case couch_util:get_value(<<"headers">>, Props) of
-        undefined ->
-            MochiReq:get(headers);
-        {H} ->
-            mochiweb_headers:enter_from_list(
-                lists:map(fun({Key, Val}) -> {?b2l(Key), ?b2l(Val)} end, H),
-                MochiReq:get(headers))
-    end.
-
-rewrite_body({Props}) ->
-    Body = case couch_util:get_value(<<"body">>, Props) of
-        undefined -> erlang:get(mochiweb_request_body);
-        B -> B
-    end,
-    case Body of
-        undefined ->
-            [];
-        _ ->
-            erlang:put(mochiweb_request_body, Body),
-            Body
-    end.
-
-
-path_prefix(#httpd{path_parts=[DbName, <<"_design">>, DesignName | _]}) ->
-    EscapedDesignName = ?l2b(couch_util:url_encode(DesignName)),
-    EscapedDbName = ?l2b(couch_util:url_encode(DbName)),
-    DesignId = <<"_design/", EscapedDesignName/binary>>,
-    <<"/", EscapedDbName/binary, "/", DesignId/binary>>.
-
-safe_relative_path(Prefix, Path) ->
-    case mochiweb_util:safe_relative_path(?b2l(Path)) of
-        undefined ->
-            <<Prefix/binary, "/", Path/binary>>;
-        V0 ->
-            V1 = ?l2b(V0),
-            <<Prefix/binary, "/", V1/binary>>
-    end.
-
-
-quote_plus({bind, X}) ->
-    mochiweb_util:quote_plus(X);
-quote_plus(X) ->
-    mochiweb_util:quote_plus(X).
-
-%% @doc Try to find a rule matching current url. If none is found
-%% 404 error not_found is raised
-try_bind_path([], _Method, _PathParts, _QueryList) ->
-    no_dispatch_path;
-try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) ->
-    [{PathParts1, Method1}, RedirectPath, QueryArgs, Formats] = Dispatch,
-    case bind_method(Method1, Method) of
-        true ->
-            case bind_path(PathParts1, PathParts, []) of
-                {ok, Remaining, Bindings} ->
-                    Bindings1 = Bindings ++ QueryList,
-                    % we parse query args from the rule and fill
-                    % it eventually with bindings vars
-                    QueryArgs1 = make_query_list(QueryArgs, Bindings1,
-                        Formats, []),
-                    % remove params in QueryLists1 that are already in
-                    % QueryArgs1
-                    Bindings2 = lists:foldl(fun({K, V}, Acc) ->
-                        K1 = to_binding(K),
-                        KV = case couch_util:get_value(K1, QueryArgs1) of
-                            undefined -> [{K1, V}];
-                            _V1 -> []
-                        end,
-                        Acc ++ KV
-                    end, [], Bindings1),
-
-                    FinalBindings = Bindings2 ++ QueryArgs1,
-                    NewPathParts = make_new_path(RedirectPath, FinalBindings,
-                                    Remaining, []),
-                    {NewPathParts, FinalBindings};
-                fail ->
-                    try_bind_path(Rest, Method, PathParts, QueryList)
-            end;
-        false ->
-            try_bind_path(Rest, Method, PathParts, QueryList)
-    end.
-
-%% rewriting dynamically the quey list given as query member in
-%% rewrites. Each value is replaced by one binding or an argument
-%% passed in url.
-make_query_list([], _Bindings, _Formats, Acc) ->
-    Acc;
-make_query_list([{Key, {Value}}|Rest], Bindings, Formats, Acc) ->
-    Value1 = {Value},
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_binary(Value) ->
-    Value1 = replace_var(Value, Bindings, Formats),
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_list(Value) ->
-    Value1 = replace_var(Value, Bindings, Formats),
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) ->
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value}|Acc]).
-
-replace_var(<<"*">>=Value, Bindings, Formats) ->
-    get_var(Value, Bindings, Value, Formats);
-replace_var(<<":", Var/binary>> = Value, Bindings, Formats) ->
-    get_var(Var, Bindings, Value, Formats);
-replace_var(Value, _Bindings, _Formats) when is_binary(Value) ->
-    Value;
-replace_var(Value, Bindings, Formats) when is_list(Value) ->
-    lists:reverse(lists:foldl(fun
-                (<<":", Var/binary>>=Value1, Acc) ->
-                    [get_var(Var, Bindings, Value1, Formats)|Acc];
-                (Value1, Acc) ->
-                    [Value1|Acc]
-            end, [], Value));
-replace_var(Value, _Bindings, _Formats) ->
-    Value.
-
-maybe_json(Key, Value) ->
-    case lists:member(Key, [<<"key">>, <<"startkey">>, <<"start_key">>,
-                <<"endkey">>, <<"end_key">>, <<"keys">>]) of
-        true ->
-            ?JSON_ENCODE(Value);
-        false ->
-            Value
-    end.
-
-get_var(VarName, Props, Default, Formats) ->
-    VarName1 = to_binding(VarName),
-    Val = couch_util:get_value(VarName1, Props, Default),
-    maybe_format(VarName, Val, Formats).
-
-maybe_format(VarName, Value, Formats) ->
-    case couch_util:get_value(VarName, Formats) of
-        undefined ->
-             Value;
-        Format ->
-            format(Format, Value)
-    end.
-
-format(<<"int">>, Value) when is_integer(Value) ->
-    Value;
-format(<<"int">>, Value) when is_binary(Value) ->
-    format(<<"int">>, ?b2l(Value));
-format(<<"int">>, Value) when is_list(Value) ->
-    case (catch list_to_integer(Value)) of
-        IntVal when is_integer(IntVal) ->
-            IntVal;
-        _ ->
-            Value
-    end;
-format(<<"bool">>, Value) when is_binary(Value) ->
-    format(<<"bool">>, ?b2l(Value));
-format(<<"bool">>, Value) when is_list(Value) ->
-    case string:to_lower(Value) of
-        "true" -> true;
-        "false" -> false;
-        _ -> Value
-    end;
-format(_Format, Value) ->
-   Value.
-
-%% doc: build new patch from bindings. bindings are query args
-%% (+ dynamic query rewritten if needed) and bindings found in
-%% bind_path step.
-make_new_path([], _Bindings, _Remaining, Acc) ->
-    lists:reverse(Acc);
-make_new_path([?MATCH_ALL], _Bindings, Remaining, Acc) ->
-    Acc1 = lists:reverse(Acc) ++ Remaining,
-    Acc1;
-make_new_path([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
-    Acc1 = lists:reverse(Acc) ++ Remaining,
-    Acc1;
-make_new_path([{bind, P}|Rest], Bindings, Remaining, Acc) ->
-    P2 = case couch_util:get_value({bind, P}, Bindings) of
-        undefined -> << "undefined">>;
-        P1 ->
-            iolist_to_binary(P1)
-    end,
-    make_new_path(Rest, Bindings, Remaining, [P2|Acc]);
-make_new_path([P|Rest], Bindings, Remaining, Acc) ->
-    make_new_path(Rest, Bindings, Remaining, [P|Acc]).
-
-
-%% @doc If method of the query fith the rule method. If the
-%% method rule is '*', which is the default, all
-%% request method will bind. It allows us to make rules
-%% depending on HTTP method.
-bind_method(?MATCH_ALL, _Method) ->
-    true;
-bind_method({bind, Method}, Method) ->
-    true;
-bind_method(_, _) ->
-    false.
-
-
-%% @doc bind path. Using the rule from we try to bind variables given
-%% to the current url by pattern matching
-bind_path([], [], Bindings) ->
-    {ok, [], Bindings};
-bind_path([?MATCH_ALL], Rest, Bindings) when is_list(Rest) ->
-    {ok, Rest, Bindings};
-bind_path(_, [], _) ->
-    fail;
-bind_path([{bind, Token}|RestToken],[Match|RestMatch],Bindings) ->
-    bind_path(RestToken, RestMatch, [{{bind, Token}, Match}|Bindings]);
-bind_path([Token|RestToken], [Token|RestMatch], Bindings) ->
-    bind_path(RestToken, RestMatch, Bindings);
-bind_path(_, _, _) ->
-    fail.
-
-
-%% normalize path.
-normalize_path(Path) when is_binary(Path)->
-    normalize_path(?b2l(Path));
-normalize_path(Path) when is_list(Path)->
-    Segments = normalize_path1(string:tokens(Path, "/"), []),
-    NormalizedPath = string:join(Segments, [?SEPARATOR]),
-    iolist_to_binary(["/", NormalizedPath]).
-
-
-normalize_path1([], Acc) ->
-    lists:reverse(Acc);
-normalize_path1([".."|Rest], Acc) ->
-    Acc1 = case Acc of
-        [] -> [".."|Acc];
-        [T|_] when T =:= ".." -> [".."|Acc];
-        [_|R] -> R
-    end,
-    normalize_path1(Rest, Acc1);
-normalize_path1(["."|Rest], Acc) ->
-    normalize_path1(Rest, Acc);
-normalize_path1([Path|Rest], Acc) ->
-    normalize_path1(Rest, [Path|Acc]).
-
-
-%% @doc transform json rule in erlang for pattern matching
-make_rule(Rule) ->
-    Method = case couch_util:get_value(<<"method">>, Rule) of
-        undefined -> ?MATCH_ALL;
-        M -> to_binding(M)
-    end,
-    QueryArgs = case couch_util:get_value(<<"query">>, Rule) of
-        undefined -> [];
-        {Args} -> Args
-        end,
-    FromParts  = case couch_util:get_value(<<"from">>, Rule) of
-        undefined -> [?MATCH_ALL];
-        From ->
-            parse_path(From)
-        end,
-    ToParts  = case couch_util:get_value(<<"to">>, Rule) of
-        undefined ->
-            throw({error, invalid_rewrite_target});
-        To ->
-            parse_path(To)
-        end,
-    Formats = case couch_util:get_value(<<"formats">>, Rule) of
-        undefined -> [];
-        {Fmts} -> Fmts
-    end,
-    [{FromParts, Method}, ToParts, QueryArgs, Formats].
-
-parse_path(Path) ->
-    {ok, SlashRE} = re:compile(<<"\\/">>),
-    path_to_list(re:split(Path, SlashRE), [], 0).
-
-%% @doc convert a path rule (from or to) to an erlang list
-%% * and path variable starting by ":" are converted
-%% in erlang atom.
-path_to_list([], Acc, _DotDotCount) ->
-    lists:reverse(Acc);
-path_to_list([<<>>|R], Acc, DotDotCount) ->
-    path_to_list(R, Acc, DotDotCount);
-path_to_list([<<"*">>|R], Acc, DotDotCount) ->
-    path_to_list(R, [?MATCH_ALL|Acc], DotDotCount);
-path_to_list([<<"..">>|R], Acc, DotDotCount) when DotDotCount == 2 ->
-    case config:get("httpd", "secure_rewrites", "true") of
-    "false" ->
-        path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
-    _Else ->
-        couch_log:notice("insecure_rewrite_rule ~p blocked", [lists:reverse(Acc) ++ [<<"..">>] ++ R]),
-        throw({insecure_rewrite_rule, "too many ../.. segments"})
-    end;
-path_to_list([<<"..">>|R], Acc, DotDotCount) ->
-    path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
-path_to_list([P|R], Acc, DotDotCount) ->
-    P1 = case P of
-        <<":", Var/binary>> ->
-            to_binding(Var);
-        _ -> P
-    end,
-    path_to_list(R, [P1|Acc], DotDotCount).
-
-maybe_encode_bindings([]) ->
-    [];
-maybe_encode_bindings(Props) ->
-    lists:foldl(fun
-            ({{bind, <<"*">>}, _V}, Acc) ->
-                Acc;
-            ({{bind, K}, V}, Acc) ->
-                V1 = iolist_to_binary(maybe_json(K, V)),
-                [{K, V1}|Acc]
-        end, [], Props).
-
-decode_query_value({K,V}) ->
-    case lists:member(K, ["key", "startkey", "start_key",
-                "endkey", "end_key", "keys"]) of
-        true ->
-            {to_binding(K), ?JSON_DECODE(V)};
-        false ->
-            {to_binding(K), ?l2b(V)}
-    end.
-
-to_binding({bind, V}) ->
-    {bind, V};
-to_binding(V) when is_list(V) ->
-    to_binding(?l2b(V));
-to_binding(V) ->
-    {bind, V}.
diff --git a/src/couch/src/couch.app.src b/src/couch/src/couch.app.src
index 74674bb..e411b5e 100644
--- a/src/couch/src/couch.app.src
+++ b/src/couch/src/couch.app.src
@@ -14,15 +14,12 @@
     {description, "Apache CouchDB"},
     {vsn, git},
     {registered, [
-        couch_db_update,
-        couch_db_update_notifier_sup,
         couch_httpd,
         couch_primary_services,
         couch_proc_manager,
         couch_secondary_services,
         couch_server,
-        couch_sup,
-        couch_task_status
+        couch_sup
     ]},
     {mod, {couch_app, []}},
     {applications, [
@@ -42,8 +39,6 @@
         couch_epi,
         b64url,
         couch_log,
-        couch_event,
-        ioq,
         couch_stats,
         hyper,
         couch_prometheus
diff --git a/src/couch/src/couch_bt_engine.erl b/src/couch/src/couch_bt_engine.erl
deleted file mode 100644
index 48e751a..0000000
--- a/src/couch/src/couch_bt_engine.erl
+++ /dev/null
@@ -1,1246 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine).
--behavior(couch_db_engine).
-
--export([
-    exists/1,
-
-    delete/3,
-    delete_compaction_files/3,
-
-    init/2,
-    terminate/2,
-    handle_db_updater_call/2,
-    handle_db_updater_info/2,
-
-    incref/1,
-    decref/1,
-    monitored_by/1,
-
-    last_activity/1,
-
-    get_compacted_seq/1,
-    get_del_doc_count/1,
-    get_disk_version/1,
-    get_doc_count/1,
-    get_epochs/1,
-    get_purge_seq/1,
-    get_oldest_purge_seq/1,
-    get_purge_infos_limit/1,
-    get_revs_limit/1,
-    get_security/1,
-    get_props/1,
-    get_size_info/1,
-    get_partition_info/2,
-    get_update_seq/1,
-    get_uuid/1,
-
-    set_revs_limit/2,
-    set_purge_infos_limit/2,
-    set_security/2,
-    set_props/2,
-
-    set_update_seq/2,
-
-    open_docs/2,
-    open_local_docs/2,
-    read_doc_body/2,
-    load_purge_infos/2,
-
-    serialize_doc/2,
-    write_doc_body/2,
-    write_doc_infos/3,
-    purge_docs/3,
-    copy_purge_infos/2,
-
-    commit_data/1,
-
-    open_write_stream/2,
-    open_read_stream/2,
-    is_active_stream/2,
-
-    fold_docs/4,
-    fold_local_docs/4,
-    fold_changes/5,
-    fold_purge_infos/5,
-    count_changes_since/2,
-
-    start_compaction/4,
-    finish_compaction/4
-]).
-
-
--export([
-    init_state/4
-]).
-
-
--export([
-    id_tree_split/1,
-    id_tree_join/2,
-    id_tree_reduce/2,
-
-    seq_tree_split/1,
-    seq_tree_join/2,
-    seq_tree_reduce/2,
-
-    local_tree_split/1,
-    local_tree_join/2,
-
-    purge_tree_split/1,
-    purge_tree_join/2,
-    purge_tree_reduce/2,
-    purge_seq_tree_split/1,
-    purge_seq_tree_join/2
-]).
-
-
-% Used by the compactor
--export([
-    update_header/2,
-    copy_security/2,
-    copy_props/2
-]).
-
-
--include_lib("kernel/include/file.hrl").
--include_lib("couch/include/couch_db.hrl").
--include("couch_bt_engine.hrl").
-
-
-exists(FilePath) ->
-    case is_file(FilePath) of
-        true ->
-            true;
-        false ->
-            is_file(FilePath ++ ".compact")
-    end.
-
-
-delete(RootDir, FilePath, Async) ->
-    %% Delete any leftover compaction files. If we don't do this a
-    %% subsequent request for this DB will try to open them to use
-    %% as a recovery.
-    delete_compaction_files(RootDir, FilePath, [{context, compaction}]),
-
-    % Delete the actual database file
-    couch_file:delete(RootDir, FilePath, Async).
-
-
-delete_compaction_files(RootDir, FilePath, DelOpts) ->
-    lists:foreach(fun(Ext) ->
-        couch_file:delete(RootDir, FilePath ++ Ext, DelOpts)
-    end, [".compact", ".compact.data", ".compact.meta"]).
-
-
-init(FilePath, Options) ->
-    {ok, Fd} = open_db_file(FilePath, Options),
-    Header = case lists:member(create, Options) of
-        true ->
-            delete_compaction_files(FilePath),
-            Header0 = couch_bt_engine_header:new(),
-            Header1 = init_set_props(Fd, Header0, Options),
-            ok = couch_file:write_header(Fd, Header1),
-            Header1;
-        false ->
-            case couch_file:read_header(Fd) of
-                {ok, Header0} ->
-                    Header0;
-                no_valid_header ->
-                    delete_compaction_files(FilePath),
-                    Header0 =  couch_bt_engine_header:new(),
-                    ok = couch_file:write_header(Fd, Header0),
-                    Header0
-            end
-    end,
-    {ok, init_state(FilePath, Fd, Header, Options)}.
-
-
-terminate(_Reason, St) ->
-    % If the reason we died is because our fd disappeared
-    % then we don't need to try closing it again.
-    Ref = St#st.fd_monitor,
-    if Ref == closed -> ok; true ->
-        ok = couch_file:close(St#st.fd),
-        receive
-            {'DOWN', Ref, _,  _, _} ->
-                ok
-            after 500 ->
-                ok
-        end
-    end,
-    couch_util:shutdown_sync(St#st.fd),
-    ok.
-
-
-handle_db_updater_call(Msg, St) ->
-    {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
-
-
-handle_db_updater_info({'DOWN', Ref, _, _, _}, #st{fd_monitor=Ref} = St) ->
-    {stop, normal, St#st{fd=undefined, fd_monitor=closed}}.
-
-
-incref(St) ->
-    {ok, St#st{fd_monitor = erlang:monitor(process, St#st.fd)}}.
-
-
-decref(St) ->
-    true = erlang:demonitor(St#st.fd_monitor, [flush]),
-    ok.
-
-
-monitored_by(St) ->
-    case erlang:process_info(St#st.fd, monitored_by) of
-        {monitored_by, Pids} ->
-            lists:filter(fun is_pid/1, Pids);
-        _ ->
-            []
-    end.
-
-
-last_activity(#st{fd = Fd}) ->
-    couch_file:last_read(Fd).
-
-
-get_compacted_seq(#st{header = Header}) ->
-    couch_bt_engine_header:get(Header, compacted_seq).
-
-
-get_del_doc_count(#st{} = St) ->
-    {ok, Reds} = couch_btree:full_reduce(St#st.id_tree),
-    element(2, Reds).
-
-
-get_disk_version(#st{header = Header}) ->
-    couch_bt_engine_header:get(Header, disk_version).
-
-
-get_doc_count(#st{} = St) ->
-    {ok, Reds} = couch_btree:full_reduce(St#st.id_tree),
-    element(1, Reds).
-
-
-get_epochs(#st{header = Header}) ->
-    couch_bt_engine_header:get(Header, epochs).
-
-
-get_purge_seq(#st{purge_seq_tree = PurgeSeqTree}) ->
-    Fun = fun({PurgeSeq, _, _, _}, _Reds, _Acc) ->
-        {stop, PurgeSeq}
-    end,
-    {ok, _, PurgeSeq} = couch_btree:fold(PurgeSeqTree, Fun, 0, [{dir, rev}]),
-    PurgeSeq.
-
-
-get_oldest_purge_seq(#st{purge_seq_tree = PurgeSeqTree}) ->
-    Fun = fun({PurgeSeq, _, _, _}, _Reds, _Acc) ->
-        {stop, PurgeSeq}
-    end,
-    {ok, _, PurgeSeq} = couch_btree:fold(PurgeSeqTree, Fun, 0, []),
-    PurgeSeq.
-
-
-get_purge_infos_limit(#st{header = Header}) ->
-    couch_bt_engine_header:get(Header, purge_infos_limit).
-
-
-get_revs_limit(#st{header = Header}) ->
-    couch_bt_engine_header:get(Header, revs_limit).
-
-
-get_size_info(#st{} = St) ->
-    {ok, FileSize} = couch_file:bytes(St#st.fd),
-    {ok, DbReduction} = couch_btree:full_reduce(St#st.id_tree),
-    SizeInfo0 = element(3, DbReduction),
-    SizeInfo = case SizeInfo0 of
-        SI when is_record(SI, size_info) ->
-            SI;
-        {AS, ES} ->
-            #size_info{active=AS, external=ES};
-        AS ->
-            #size_info{active=AS}
-    end,
-    ActiveSize = active_size(St, SizeInfo),
-    ExternalSize = SizeInfo#size_info.external,
-    [
-        {active, ActiveSize},
-        {external, ExternalSize},
-        {file, FileSize}
-    ].
-
-
-partition_size_cb(traverse, Key, {DC, DDC, Sizes}, {Partition, DCAcc, DDCAcc, SizesAcc}) ->
-    case couch_partition:is_member(Key, Partition) of
-        true ->
-            {skip, {Partition, DC + DCAcc, DDC + DDCAcc, reduce_sizes(Sizes, SizesAcc)}};
-        false ->
-            {ok, {Partition, DCAcc, DDCAcc, SizesAcc}}
-    end;
-
-partition_size_cb(visit, FDI, _PrevReds, {Partition, DCAcc, DDCAcc, Acc}) ->
-    InPartition = couch_partition:is_member(FDI#full_doc_info.id, Partition),
-    Deleted = FDI#full_doc_info.deleted,
-    case {InPartition, Deleted} of
-        {true, true} ->
-            {ok, {Partition, DCAcc, DDCAcc + 1,
-                reduce_sizes(FDI#full_doc_info.sizes, Acc)}};
-        {true, false} ->
-            {ok, {Partition, DCAcc + 1, DDCAcc,
-                reduce_sizes(FDI#full_doc_info.sizes, Acc)}};
-        {false, _} ->
-            {ok, {Partition, DCAcc, DDCAcc, Acc}}
-    end.
-
-
-get_partition_info(#st{} = St, Partition) ->
-    StartKey = couch_partition:start_key(Partition),
-    EndKey = couch_partition:end_key(Partition),
-    Fun = fun partition_size_cb/4,
-    InitAcc = {Partition, 0, 0, #size_info{}},
-    Options = [{start_key, StartKey}, {end_key, EndKey}],
-    {ok, _, OutAcc} = couch_btree:fold(St#st.id_tree, Fun, InitAcc, Options),
-    {Partition, DocCount, DocDelCount, SizeInfo} = OutAcc,
-    [
-        {partition, Partition},
-        {doc_count, DocCount},
-        {doc_del_count, DocDelCount},
-        {sizes, [
-            {active, SizeInfo#size_info.active},
-            {external, SizeInfo#size_info.external}
-        ]}
-    ].
-
-
-get_security(#st{header = Header} = St) ->
-    case couch_bt_engine_header:get(Header, security_ptr) of
-        undefined ->
-            [];
-        Pointer ->
-            {ok, SecProps} = couch_file:pread_term(St#st.fd, Pointer),
-            SecProps
-    end.
-
-
-get_props(#st{header = Header} = St) ->
-    case couch_bt_engine_header:get(Header, props_ptr) of
-        undefined ->
-            [];
-        Pointer ->
-            {ok, Props} = couch_file:pread_term(St#st.fd, Pointer),
-            Props
-    end.
-
-
-get_update_seq(#st{header = Header}) ->
-    couch_bt_engine_header:get(Header, update_seq).
-
-
-get_uuid(#st{header = Header}) ->
-    couch_bt_engine_header:get(Header, uuid).
-
-
-set_revs_limit(#st{header = Header} = St, RevsLimit) ->
-    NewSt = St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {revs_limit, RevsLimit}
-        ]),
-        needs_commit = true
-    },
-    {ok, increment_update_seq(NewSt)}.
-
-
-set_purge_infos_limit(#st{header = Header} = St, PurgeInfosLimit) ->
-    NewSt = St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {purge_infos_limit, PurgeInfosLimit}
-        ]),
-        needs_commit = true
-    },
-    {ok, increment_update_seq(NewSt)}.
-
-
-set_security(#st{header = Header} = St, NewSecurity) ->
-    Options = [{compression, St#st.compression}],
-    {ok, Ptr, _} = couch_file:append_term(St#st.fd, NewSecurity, Options),
-    NewSt = St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {security_ptr, Ptr}
-        ]),
-        needs_commit = true
-    },
-    {ok, increment_update_seq(NewSt)}.
-
-
-set_props(#st{header = Header} = St, Props) ->
-    Options = [{compression, St#st.compression}],
-    {ok, Ptr, _} = couch_file:append_term(St#st.fd, Props, Options),
-    NewSt = St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {props_ptr, Ptr}
-        ]),
-        needs_commit = true
-    },
-    {ok, increment_update_seq(NewSt)}.
-
-
-open_docs(#st{} = St, DocIds) ->
-    Results = couch_btree:lookup(St#st.id_tree, DocIds),
-    lists:map(fun
-        ({ok, FDI}) -> FDI;
-        (not_found) -> not_found
-    end, Results).
-
-
-open_local_docs(#st{} = St, DocIds) ->
-    Results = couch_btree:lookup(St#st.local_tree, DocIds),
-    lists:map(fun
-        ({ok, Doc}) -> Doc;
-        (not_found) -> not_found
-    end, Results).
-
-
-read_doc_body(#st{} = St, #doc{} = Doc) ->
-    {ok, {Body, Atts}} = couch_file:pread_term(St#st.fd, Doc#doc.body),
-    Doc#doc{
-        body = Body,
-        atts = Atts
-    }.
-
-
-load_purge_infos(St, UUIDs) ->
-    Results = couch_btree:lookup(St#st.purge_tree, UUIDs),
-    lists:map(fun
-        ({ok, Info}) -> Info;
-        (not_found) -> not_found
-    end, Results).
-
-
-serialize_doc(#st{} = St, #doc{} = Doc) ->
-    Compress = fun(Term) ->
-        case couch_compress:is_compressed(Term, St#st.compression) of
-            true -> Term;
-            false -> couch_compress:compress(Term, St#st.compression)
-        end
-    end,
-    Body = Compress(Doc#doc.body),
-    Atts = Compress(Doc#doc.atts),
-    SummaryBin = ?term_to_bin({Body, Atts}),
-    Md5 = couch_hash:md5_hash(SummaryBin),
-    Data = couch_file:assemble_file_chunk(SummaryBin, Md5),
-    % TODO: This is a terrible hack to get around the issues
-    %       in COUCHDB-3255. We'll need to come back and figure
-    %       out a better approach to handling the case when we
-    %       need to generate a new revision id after the doc
-    %       has been serialized.
-    Doc#doc{
-        body = Data,
-        meta = [{comp_body, Body} | Doc#doc.meta]
-    }.
-
-
-write_doc_body(St, #doc{} = Doc) ->
-    #st{
-        fd = Fd
-    } = St,
-    {ok, Ptr, Written} = couch_file:append_raw_chunk(Fd, Doc#doc.body),
-    {ok, Doc#doc{body = Ptr}, Written}.
-
-
-write_doc_infos(#st{} = St, Pairs, LocalDocs) ->
-    #st{
-        id_tree = IdTree,
-        seq_tree = SeqTree,
-        local_tree = LocalTree
-    } = St,
-    FinalAcc = lists:foldl(fun({OldFDI, NewFDI}, Acc) ->
-        {AddAcc, RemIdsAcc, RemSeqsAcc} = Acc,
-        case {OldFDI, NewFDI} of
-            {not_found, #full_doc_info{}} ->
-                {[NewFDI | AddAcc], RemIdsAcc, RemSeqsAcc};
-            {#full_doc_info{id = Id}, #full_doc_info{id = Id}} ->
-                NewAddAcc = [NewFDI | AddAcc],
-                NewRemSeqsAcc = [OldFDI#full_doc_info.update_seq | RemSeqsAcc],
-                {NewAddAcc, RemIdsAcc, NewRemSeqsAcc};
-            {#full_doc_info{id = Id}, not_found} ->
-                NewRemIdsAcc = [Id | RemIdsAcc],
-                NewRemSeqsAcc = [OldFDI#full_doc_info.update_seq | RemSeqsAcc],
-                {AddAcc, NewRemIdsAcc, NewRemSeqsAcc}
-        end
-    end, {[], [], []}, Pairs),
-
-    {Add, RemIds, RemSeqs} = FinalAcc,
-    {ok, IdTree2} = couch_btree:add_remove(IdTree, Add, RemIds),
-    {ok, SeqTree2} = couch_btree:add_remove(SeqTree, Add, RemSeqs),
-
-    {AddLDocs, RemLDocIds} = lists:foldl(fun(Doc, {AddAcc, RemAcc}) ->
-        case Doc#doc.deleted of
-            true ->
-                {AddAcc, [Doc#doc.id | RemAcc]};
-            false ->
-                {[Doc | AddAcc], RemAcc}
-        end
-    end, {[], []}, LocalDocs),
-    {ok, LocalTree2} = couch_btree:add_remove(LocalTree, AddLDocs, RemLDocIds),
-
-    NewUpdateSeq = lists:foldl(fun(#full_doc_info{update_seq=Seq}, Acc) ->
-        erlang:max(Seq, Acc)
-    end, get_update_seq(St), Add),
-
-    NewHeader = couch_bt_engine_header:set(St#st.header, [
-        {update_seq, NewUpdateSeq}
-    ]),
-
-    {ok, St#st{
-        header = NewHeader,
-        id_tree = IdTree2,
-        seq_tree = SeqTree2,
-        local_tree = LocalTree2,
-        needs_commit = true
-    }}.
-
-
-purge_docs(#st{} = St, Pairs, PurgeInfos) ->
-    #st{
-        id_tree = IdTree,
-        seq_tree = SeqTree,
-        purge_tree = PurgeTree,
-        purge_seq_tree = PurgeSeqTree
-    } = St,
-
-    RemDocIds = [Old#full_doc_info.id || {Old, not_found} <- Pairs],
-    RemSeqs = [Old#full_doc_info.update_seq || {Old, _} <- Pairs],
-    DocsToAdd = [New || {_, New} <- Pairs, New /= not_found],
-    CurrSeq = couch_bt_engine_header:get(St#st.header, update_seq),
-    Seqs = [FDI#full_doc_info.update_seq || FDI <- DocsToAdd],
-    NewSeq = lists:max([CurrSeq | Seqs]),
-
-    % We bump NewUpdateSeq because we have to ensure that
-    % indexers see that they need to process the new purge
-    % information.
-    UpdateSeq = case NewSeq == CurrSeq of
-        true -> CurrSeq + 1;
-        false -> NewSeq
-    end,
-    Header = couch_bt_engine_header:set(St#st.header, [
-        {update_seq, UpdateSeq}
-    ]),
-
-    {ok, IdTree2} = couch_btree:add_remove(IdTree, DocsToAdd, RemDocIds),
-    {ok, SeqTree2} = couch_btree:add_remove(SeqTree, DocsToAdd, RemSeqs),
-    {ok, PurgeTree2} = couch_btree:add(PurgeTree, PurgeInfos),
-    {ok, PurgeSeqTree2} = couch_btree:add(PurgeSeqTree, PurgeInfos),
-    {ok, St#st{
-        header = Header,
-        id_tree = IdTree2,
-        seq_tree = SeqTree2,
-        purge_tree = PurgeTree2,
-        purge_seq_tree = PurgeSeqTree2,
-        needs_commit = true
-    }}.
-
-
-copy_purge_infos(#st{} = St, PurgeInfos) ->
-    #st{
-        purge_tree = PurgeTree,
-        purge_seq_tree = PurgeSeqTree
-    } = St,
-    {ok, PurgeTree2} = couch_btree:add(PurgeTree, PurgeInfos),
-    {ok, PurgeSeqTree2} = couch_btree:add(PurgeSeqTree, PurgeInfos),
-    {ok, St#st{
-       purge_tree = PurgeTree2,
-       purge_seq_tree = PurgeSeqTree2,
-       needs_commit = true
-    }}.
-
-
-commit_data(St) ->
-    #st{
-        fd = Fd,
-        header = OldHeader,
-        needs_commit = NeedsCommit
-    } = St,
-
-    NewHeader = update_header(St, OldHeader),
-
-    case NewHeader /= OldHeader orelse NeedsCommit of
-        true ->
-            couch_file:sync(Fd),
-            ok = couch_file:write_header(Fd, NewHeader),
-            couch_file:sync(Fd),
-            {ok, St#st{
-                header = NewHeader,
-                needs_commit = false
-            }};
-        false ->
-            {ok, St}
-    end.
-
-
-open_write_stream(#st{} = St, Options) ->
-    couch_stream:open({couch_bt_engine_stream, {St#st.fd, []}}, Options).
-
-
-open_read_stream(#st{} = St, StreamSt) ->
-    {ok, {couch_bt_engine_stream, {St#st.fd, StreamSt}}}.
-
-
-is_active_stream(#st{} = St, {couch_bt_engine_stream, {Fd, _}}) ->
-    St#st.fd == Fd;
-is_active_stream(_, _) ->
-    false.
-
-
-fold_docs(St, UserFun, UserAcc, Options) ->
-    fold_docs_int(St, St#st.id_tree, UserFun, UserAcc, Options).
-
-
-fold_local_docs(St, UserFun, UserAcc, Options) ->
-    case fold_docs_int(St, St#st.local_tree, UserFun, UserAcc, Options) of
-        {ok, _Reds, FinalAcc} -> {ok, null, FinalAcc};
-        {ok, FinalAcc} -> {ok, FinalAcc}
-    end.
-
-
-fold_changes(St, SinceSeq, UserFun, UserAcc, Options) ->
-    Fun = fun drop_reductions/4,
-    InAcc = {UserFun, UserAcc},
-    Opts = [{start_key, SinceSeq + 1}] ++ Options,
-    {ok, _, OutAcc} = couch_btree:fold(St#st.seq_tree, Fun, InAcc, Opts),
-    {_, FinalUserAcc} = OutAcc,
-    {ok, FinalUserAcc}.
-
-
-fold_purge_infos(St, StartSeq0, UserFun, UserAcc, Options) ->
-    PurgeSeqTree = St#st.purge_seq_tree,
-    StartSeq = StartSeq0 + 1,
-    MinSeq = get_oldest_purge_seq(St),
-    if MinSeq =< StartSeq -> ok; true ->
-        erlang:error({invalid_start_purge_seq, StartSeq0})
-    end,
-    Wrapper = fun(Info, _Reds, UAcc) ->
-        UserFun(Info, UAcc)
-    end,
-    Opts = [{start_key, StartSeq}] ++ Options,
-    {ok, _, OutAcc} = couch_btree:fold(PurgeSeqTree, Wrapper, UserAcc, Opts),
-    {ok, OutAcc}.
-
-
-count_changes_since(St, SinceSeq) ->
-    BTree = St#st.seq_tree,
-    FoldFun = fun(_SeqStart, PartialReds, 0) ->
-        {ok, couch_btree:final_reduce(BTree, PartialReds)}
-    end,
-    Opts = [{start_key, SinceSeq + 1}],
-    {ok, Changes} = couch_btree:fold_reduce(BTree, FoldFun, 0, Opts),
-    Changes.
-
-
-start_compaction(St, DbName, Options, Parent) ->
-    Args = [St, DbName, Options, Parent],
-    Pid = spawn_link(couch_bt_engine_compactor, start, Args),
-    {ok, St, Pid}.
-
-
-finish_compaction(OldState, DbName, Options, CompactFilePath) ->
-    {ok, NewState1} = ?MODULE:init(CompactFilePath, Options),
-    OldSeq = get_update_seq(OldState),
-    NewSeq = get_update_seq(NewState1),
-    case OldSeq == NewSeq of
-        true ->
-            finish_compaction_int(OldState, NewState1);
-        false ->
-            couch_log:info("Compaction file still behind main file "
-                           "(update seq=~p. compact update seq=~p). Retrying.",
-                           [OldSeq, NewSeq]),
-            ok = decref(NewState1),
-            start_compaction(OldState, DbName, Options, self())
-    end.
-
-
-id_tree_split(#full_doc_info{}=Info) ->
-    #full_doc_info{
-        id = Id,
-        update_seq = Seq,
-        deleted = Deleted,
-        sizes = SizeInfo,
-        rev_tree = Tree
-    } = Info,
-    {Id, {Seq, ?b2i(Deleted), split_sizes(SizeInfo), disk_tree(Tree)}}.
-
-
-id_tree_join(Id, {HighSeq, Deleted, DiskTree}) ->
-    % Handle old formats before data_size was added
-    id_tree_join(Id, {HighSeq, Deleted, #size_info{}, DiskTree});
-
-id_tree_join(Id, {HighSeq, Deleted, Sizes, DiskTree}) ->
-    #full_doc_info{
-        id = Id,
-        update_seq = HighSeq,
-        deleted = ?i2b(Deleted),
-        sizes = couch_db_updater:upgrade_sizes(Sizes),
-        rev_tree = rev_tree(DiskTree)
-    }.
-
-
-id_tree_reduce(reduce, FullDocInfos) ->
-    lists:foldl(fun(Info, {NotDeleted, Deleted, Sizes}) ->
-        Sizes2 = reduce_sizes(Sizes, Info#full_doc_info.sizes),
-        case Info#full_doc_info.deleted of
-        true ->
-            {NotDeleted, Deleted + 1, Sizes2};
-        false ->
-            {NotDeleted + 1, Deleted, Sizes2}
-        end
-    end, {0, 0, #size_info{}}, FullDocInfos);
-id_tree_reduce(rereduce, Reds) ->
-    lists:foldl(fun
-        ({NotDeleted, Deleted}, {AccNotDeleted, AccDeleted, _AccSizes}) ->
-            % pre 1.2 format, will be upgraded on compaction
-            {AccNotDeleted + NotDeleted, AccDeleted + Deleted, nil};
-        ({NotDeleted, Deleted, Sizes}, {AccNotDeleted, AccDeleted, AccSizes}) ->
-            AccSizes2 = reduce_sizes(AccSizes, Sizes),
-            {AccNotDeleted + NotDeleted, AccDeleted + Deleted, AccSizes2}
-    end, {0, 0, #size_info{}}, Reds).
-
-
-seq_tree_split(#full_doc_info{}=Info) ->
-    #full_doc_info{
-        id = Id,
-        update_seq = Seq,
-        deleted = Del,
-        sizes = SizeInfo,
-        rev_tree = Tree
-    } = Info,
-    {Seq, {Id, ?b2i(Del), split_sizes(SizeInfo), disk_tree(Tree)}}.
-
-
-seq_tree_join(Seq, {Id, Del, DiskTree}) when is_integer(Del) ->
-    seq_tree_join(Seq, {Id, Del, {0, 0}, DiskTree});
-
-seq_tree_join(Seq, {Id, Del, Sizes, DiskTree}) when is_integer(Del) ->
-    #full_doc_info{
-        id = Id,
-        update_seq = Seq,
-        deleted = ?i2b(Del),
-        sizes = join_sizes(Sizes),
-        rev_tree = rev_tree(DiskTree)
-    };
-
-seq_tree_join(KeySeq, {Id, RevInfos, DeletedRevInfos}) ->
-    % Older versions stored #doc_info records in the seq_tree.
-    % Compact to upgrade.
-    Revs = lists:map(fun({Rev, Seq, Bp}) ->
-        #rev_info{rev = Rev, seq = Seq, deleted = false, body_sp = Bp}
-    end, RevInfos),
-    DeletedRevs = lists:map(fun({Rev, Seq, Bp}) ->
-        #rev_info{rev = Rev, seq = Seq, deleted = true, body_sp = Bp}
-    end, DeletedRevInfos),
-    #doc_info{
-        id = Id,
-        high_seq = KeySeq,
-        revs = Revs ++ DeletedRevs
-    }.
-
-
-seq_tree_reduce(reduce, DocInfos) ->
-    % count the number of documents
-    length(DocInfos);
-seq_tree_reduce(rereduce, Reds) ->
-    lists:sum(Reds).
-
-
-local_tree_split(#doc{revs = {0, [Rev]}} = Doc) when is_binary(Rev) ->
-    #doc{
-        id = Id,
-        body = BodyData
-    } = Doc,
-    {Id, {binary_to_integer(Rev), BodyData}};
-
-local_tree_split(#doc{revs = {0, [Rev]}} = Doc) when is_integer(Rev) ->
-    #doc{
-        id = Id,
-        body = BodyData
-    } = Doc,
-    {Id, {Rev, BodyData}}.
-
-
-local_tree_join(Id, {Rev, BodyData}) when is_binary(Rev) ->
-    #doc{
-        id = Id,
-        revs = {0, [Rev]},
-        body = BodyData
-    };
-
-local_tree_join(Id, {Rev, BodyData}) when is_integer(Rev) ->
-    #doc{
-        id = Id,
-        revs = {0, [integer_to_binary(Rev)]},
-        body = BodyData
-    }.
-
-
-purge_tree_split({PurgeSeq, UUID, DocId, Revs}) ->
-    {UUID, {PurgeSeq, DocId, Revs}}.
-
-
-purge_tree_join(UUID, {PurgeSeq, DocId, Revs}) ->
-    {PurgeSeq, UUID, DocId, Revs}.
-
-
-purge_seq_tree_split({PurgeSeq, UUID, DocId, Revs}) ->
-    {PurgeSeq, {UUID, DocId, Revs}}.
-
-
-purge_seq_tree_join(PurgeSeq, {UUID, DocId, Revs}) ->
-    {PurgeSeq, UUID, DocId, Revs}.
-
-
-purge_tree_reduce(reduce, IdRevs) ->
-    % count the number of purge requests
-    length(IdRevs);
-purge_tree_reduce(rereduce, Reds) ->
-    lists:sum(Reds).
-
-
-set_update_seq(#st{header = Header} = St, UpdateSeq) ->
-    {ok, St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {update_seq, UpdateSeq}
-        ]),
-        needs_commit = true
-    }}.
-
-
-copy_security(#st{header = Header} = St, SecProps) ->
-    Options = [{compression, St#st.compression}],
-    {ok, Ptr, _} = couch_file:append_term(St#st.fd, SecProps, Options),
-    {ok, St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {security_ptr, Ptr}
-        ]),
-        needs_commit = true
-    }}.
-
-
-copy_props(#st{header = Header} = St, Props) ->
-    Options = [{compression, St#st.compression}],
-    {ok, Ptr, _} = couch_file:append_term(St#st.fd, Props, Options),
-    {ok, St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {props_ptr, Ptr}
-        ]),
-        needs_commit = true
-    }}.
-
-
-open_db_file(FilePath, Options) ->
-    case couch_file:open(FilePath, Options) of
-        {ok, Fd} ->
-            {ok, Fd};
-        {error, enoent} ->
-            % Couldn't find file. is there a compact version? This ca
-            % happen (rarely) if we crashed during the file switch.
-            case couch_file:open(FilePath ++ ".compact", [nologifmissing]) of
-                {ok, Fd} ->
-                    Fmt = "Recovering from compaction file: ~s~s",
-                    couch_log:info(Fmt, [FilePath, ".compact"]),
-                    ok = file:rename(FilePath ++ ".compact", FilePath),
-                    ok = couch_file:sync(Fd),
-                    {ok, Fd};
-                {error, enoent} ->
-                    throw({not_found, no_db_file})
-            end;
-        Error ->
-            throw(Error)
-    end.
-
-
-init_state(FilePath, Fd, Header0, Options) ->
-    ok = couch_file:sync(Fd),
-
-    Compression = couch_compress:get_compression_method(),
-
-    Header1 = couch_bt_engine_header:upgrade(Header0),
-    Header2 = set_default_security_object(Fd, Header1, Compression, Options),
-    Header = upgrade_purge_info(Fd, Header2),
-
-    IdTreeState = couch_bt_engine_header:id_tree_state(Header),
-    {ok, IdTree} = couch_btree:open(IdTreeState, Fd, [
-            {split, fun ?MODULE:id_tree_split/1},
-            {join, fun ?MODULE:id_tree_join/2},
-            {reduce, fun ?MODULE:id_tree_reduce/2},
-            {compression, Compression}
-        ]),
-
-    SeqTreeState = couch_bt_engine_header:seq_tree_state(Header),
-    {ok, SeqTree} = couch_btree:open(SeqTreeState, Fd, [
-            {split, fun ?MODULE:seq_tree_split/1},
-            {join, fun ?MODULE:seq_tree_join/2},
-            {reduce, fun ?MODULE:seq_tree_reduce/2},
-            {compression, Compression}
-        ]),
-
-    LocalTreeState = couch_bt_engine_header:local_tree_state(Header),
-    {ok, LocalTree} = couch_btree:open(LocalTreeState, Fd, [
-            {split, fun ?MODULE:local_tree_split/1},
-            {join, fun ?MODULE:local_tree_join/2},
-            {compression, Compression}
-        ]),
-
-    PurgeTreeState = couch_bt_engine_header:purge_tree_state(Header),
-    {ok, PurgeTree} = couch_btree:open(PurgeTreeState, Fd, [
-        {split, fun ?MODULE:purge_tree_split/1},
-        {join, fun ?MODULE:purge_tree_join/2},
-        {reduce, fun ?MODULE:purge_tree_reduce/2}
-    ]),
-
-    PurgeSeqTreeState = couch_bt_engine_header:purge_seq_tree_state(Header),
-    {ok, PurgeSeqTree} = couch_btree:open(PurgeSeqTreeState, Fd, [
-        {split, fun ?MODULE:purge_seq_tree_split/1},
-        {join, fun ?MODULE:purge_seq_tree_join/2},
-        {reduce, fun ?MODULE:purge_tree_reduce/2}
-    ]),
-
-    ok = couch_file:set_db_pid(Fd, self()),
-
-    St = #st{
-        filepath = FilePath,
-        fd = Fd,
-        fd_monitor = erlang:monitor(process, Fd),
-        header = Header,
-        needs_commit = false,
-        id_tree = IdTree,
-        seq_tree = SeqTree,
-        local_tree = LocalTree,
-        compression = Compression,
-        purge_tree = PurgeTree,
-        purge_seq_tree = PurgeSeqTree
-    },
-
-    % If this is a new database we've just created a
-    % new UUID and default security object which need
-    % to be written to disk.
-    case Header /= Header0 of
-        true ->
-            {ok, NewSt} = commit_data(St#st{needs_commit = true}),
-            NewSt;
-        false ->
-            St
-    end.
-
-
-update_header(St, Header) ->
-    couch_bt_engine_header:set(Header, [
-        {seq_tree_state, couch_btree:get_state(St#st.seq_tree)},
-        {id_tree_state, couch_btree:get_state(St#st.id_tree)},
-        {local_tree_state, couch_btree:get_state(St#st.local_tree)},
-        {purge_tree_state, couch_btree:get_state(St#st.purge_tree)},
-        {purge_seq_tree_state, couch_btree:get_state(St#st.purge_seq_tree)}
-    ]).
-
-
-increment_update_seq(#st{header = Header} = St) ->
-    UpdateSeq = couch_bt_engine_header:get(Header, update_seq),
-    St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {update_seq, UpdateSeq + 1}
-        ])
-    }.
-
-
-set_default_security_object(Fd, Header, Compression, Options) ->
-    case couch_bt_engine_header:get(Header, security_ptr) of
-        Pointer when is_integer(Pointer) ->
-            Header;
-        _ ->
-            Default = couch_util:get_value(default_security_object, Options),
-            AppendOpts = [{compression, Compression}],
-            {ok, Ptr, _} = couch_file:append_term(Fd, Default, AppendOpts),
-            couch_bt_engine_header:set(Header, security_ptr, Ptr)
-    end.
-
-
-% This function is here, and not in couch_bt_engine_header
-% because it requires modifying file contents
-upgrade_purge_info(Fd, Header) ->
-    case couch_bt_engine_header:get(Header, purge_tree_state) of
-        nil ->
-            Header;
-        Ptr when is_tuple(Ptr) ->
-            Header;
-        PurgeSeq when is_integer(PurgeSeq)->
-            % Pointer to old purged ids/revs is in purge_seq_tree_state
-            Ptr = couch_bt_engine_header:get(Header, purge_seq_tree_state),
-
-            case Ptr of
-                nil ->
-                    PTS = couch_bt_engine_header:purge_tree_state(Header),
-                    PurgeTreeSt = case PTS of 0 -> nil; Else -> Else end,
-                    couch_bt_engine_header:set(Header, [
-                        {purge_tree_state, PurgeTreeSt}
-                    ]);
-                _ ->
-                    {ok, PurgedIdsRevs} = couch_file:pread_term(Fd, Ptr),
-
-                    {Infos, _} = lists:foldl(fun({Id, Revs}, {InfoAcc, PSeq}) ->
-                        Info = {PSeq, couch_uuids:random(), Id, Revs},
-                        {[Info | InfoAcc], PSeq + 1}
-                    end, {[], PurgeSeq}, PurgedIdsRevs),
-
-                    {ok, PurgeTree} = couch_btree:open(nil, Fd, [
-                        {split, fun ?MODULE:purge_tree_split/1},
-                        {join, fun ?MODULE:purge_tree_join/2},
-                        {reduce, fun ?MODULE:purge_tree_reduce/2}
-                    ]),
-                    {ok, PurgeTree2} = couch_btree:add(PurgeTree, Infos),
-                    PurgeTreeSt = couch_btree:get_state(PurgeTree2),
-
-                    {ok, PurgeSeqTree} = couch_btree:open(nil, Fd, [
-                        {split, fun ?MODULE:purge_seq_tree_split/1},
-                        {join, fun ?MODULE:purge_seq_tree_join/2},
-                        {reduce, fun ?MODULE:purge_tree_reduce/2}
-                    ]),
-                    {ok, PurgeSeqTree2} = couch_btree:add(PurgeSeqTree, Infos),
-                    PurgeSeqTreeSt = couch_btree:get_state(PurgeSeqTree2),
-
-                    couch_bt_engine_header:set(Header, [
-                        {purge_tree_state, PurgeTreeSt},
-                        {purge_seq_tree_state, PurgeSeqTreeSt}
-                    ])
-            end
-    end.
-
-
-init_set_props(Fd, Header, Options) ->
-    case couch_util:get_value(props, Options) of
-        undefined ->
-            Header;
-        InitialProps ->
-            Compression = couch_compress:get_compression_method(),
-            AppendOpts = [{compression, Compression}],
-            {ok, Ptr, _} = couch_file:append_term(Fd, InitialProps, AppendOpts),
-            couch_bt_engine_header:set(Header, props_ptr, Ptr)
-    end.
-
-
-delete_compaction_files(FilePath) ->
-    RootDir = config:get("couchdb", "database_dir", "."),
-    DelOpts = [{context, compaction}],
-    delete_compaction_files(RootDir, FilePath, DelOpts).
-
-
-rev_tree(DiskTree) ->
-    couch_key_tree:map(fun
-        (_RevId, {Del, Ptr, Seq}) ->
-            #leaf{
-                deleted = ?i2b(Del),
-                ptr = Ptr,
-                seq = Seq
-            };
-        (_RevId, {Del, Ptr, Seq, Size}) ->
-            #leaf{
-                deleted = ?i2b(Del),
-                ptr = Ptr,
-                seq = Seq,
-                sizes = couch_db_updater:upgrade_sizes(Size)
-            };
-        (_RevId, {Del, Ptr, Seq, Sizes, Atts}) ->
-            #leaf{
-                deleted = ?i2b(Del),
-                ptr = Ptr,
-                seq = Seq,
-                sizes = couch_db_updater:upgrade_sizes(Sizes),
-                atts = Atts
-            };
-        (_RevId, ?REV_MISSING) ->
-            ?REV_MISSING
-    end, DiskTree).
-
-
-disk_tree(RevTree) ->
-    couch_key_tree:map(fun
-        (_RevId, ?REV_MISSING) ->
-            ?REV_MISSING;
-        (_RevId, #leaf{} = Leaf) ->
-            #leaf{
-                deleted = Del,
-                ptr = Ptr,
-                seq = Seq,
-                sizes = Sizes,
-                atts = Atts
-            } = Leaf,
-            {?b2i(Del), Ptr, Seq, split_sizes(Sizes), Atts}
-    end, RevTree).
-
-
-split_sizes(#size_info{}=SI) ->
-    {SI#size_info.active, SI#size_info.external}.
-
-
-join_sizes({Active, External}) when is_integer(Active), is_integer(External) ->
-    #size_info{active=Active, external=External}.
-
-
-reduce_sizes(nil, _) ->
-    nil;
-reduce_sizes(_, nil) ->
-    nil;
-reduce_sizes(#size_info{}=S1, #size_info{}=S2) ->
-    #size_info{
-        active = S1#size_info.active + S2#size_info.active,
-        external = S1#size_info.external + S2#size_info.external
-    };
-reduce_sizes(S1, S2) ->
-    US1 = couch_db_updater:upgrade_sizes(S1),
-    US2 = couch_db_updater:upgrade_sizes(S2),
-    reduce_sizes(US1, US2).
-
-
-active_size(#st{} = St, #size_info{} = SI) ->
-    Trees = [
-        St#st.id_tree,
-        St#st.seq_tree,
-        St#st.local_tree,
-        St#st.purge_tree,
-        St#st.purge_seq_tree
-    ],
-    lists:foldl(fun(T, Acc) ->
-        case couch_btree:size(T) of
-            _ when Acc == null ->
-                null;
-            nil ->
-                null;
-            Size ->
-                Acc + Size
-        end
-    end, SI#size_info.active, Trees).
-
-
-fold_docs_int(St, Tree, UserFun, UserAcc, Options) ->
-    Fun = case lists:member(include_deleted, Options) of
-        true -> fun include_deleted/4;
-        false -> fun skip_deleted/4
-    end,
-    RedFun = case lists:member(include_reductions, Options) of
-        true -> fun include_reductions/4;
-        false -> fun drop_reductions/4
-    end,
-    InAcc = {RedFun, {UserFun, UserAcc}},
-    {ok, Reds, OutAcc} = couch_btree:fold(Tree, Fun, InAcc, Options),
-    {_, {_, FinalUserAcc}} = OutAcc,
-    case lists:member(include_reductions, Options) of
-        true when Tree == St#st.id_tree ->
-            {ok, fold_docs_reduce_to_count(Reds), FinalUserAcc};
-        true when Tree == St#st.local_tree ->
-            {ok, 0, FinalUserAcc};
-        false ->
-            {ok, FinalUserAcc}
-    end.
-
-
-include_deleted(Case, Entry, Reds, {UserFun, UserAcc}) ->
-    {Go, NewUserAcc} = UserFun(Case, Entry, Reds, UserAcc),
-    {Go, {UserFun, NewUserAcc}}.
-
-
-% First element of the reductions is the total
-% number of undeleted documents.
-skip_deleted(traverse, _Entry, {0, _, _} = _Reds, Acc) ->
-    {skip, Acc};
-skip_deleted(visit, #full_doc_info{deleted = true}, _, Acc) ->
-    {ok, Acc};
-skip_deleted(Case, Entry, Reds, {UserFun, UserAcc}) ->
-    {Go, NewUserAcc} = UserFun(Case, Entry, Reds, UserAcc),
-    {Go, {UserFun, NewUserAcc}}.
-
-
-include_reductions(visit, FDI, Reds, {UserFun, UserAcc}) ->
-    {Go, NewUserAcc} = UserFun(FDI, Reds, UserAcc),
-    {Go, {UserFun, NewUserAcc}};
-include_reductions(_, _, _, Acc) ->
-    {ok, Acc}.
-
-
-drop_reductions(visit, FDI, _Reds, {UserFun, UserAcc}) ->
-    {Go, NewUserAcc} = UserFun(FDI, UserAcc),
-    {Go, {UserFun, NewUserAcc}};
-drop_reductions(_, _, _, Acc) ->
-    {ok, Acc}.
-
-
-fold_docs_reduce_to_count(Reds) ->
-    RedFun = fun id_tree_reduce/2,
-    FinalRed = couch_btree:final_reduce(RedFun, Reds),
-    element(1, FinalRed).
-
-
-finish_compaction_int(#st{} = OldSt, #st{} = NewSt1) ->
-    #st{
-        filepath = FilePath,
-        local_tree = OldLocal
-    } = OldSt,
-    #st{
-        filepath = CompactDataPath,
-        header = Header,
-        local_tree = NewLocal1
-    } = NewSt1,
-
-    % suck up all the local docs into memory and write them to the new db
-    LoadFun = fun(Value, _Offset, Acc) ->
-        {ok, [Value | Acc]}
-    end,
-    {ok, _, LocalDocs} = couch_btree:foldl(OldLocal, LoadFun, []),
-    {ok, NewLocal2} = couch_btree:add(NewLocal1, LocalDocs),
-
-    {ok, NewSt2} = commit_data(NewSt1#st{
-        header = couch_bt_engine_header:set(Header, [
-            {compacted_seq, get_update_seq(OldSt)},
-            {revs_limit, get_revs_limit(OldSt)},
-            {purge_infos_limit, get_purge_infos_limit(OldSt)}
-        ]),
-        local_tree = NewLocal2
-    }),
-
-    % Rename our *.compact.data file to *.compact so that if we
-    % die between deleting the old file and renaming *.compact
-    % we can recover correctly.
-    ok = file:rename(CompactDataPath, FilePath ++ ".compact"),
-
-    % Remove the uncompacted database file
-    RootDir = config:get("couchdb", "database_dir", "."),
-    couch_file:delete(RootDir, FilePath),
-
-    % Move our compacted file into its final location
-    ok = file:rename(FilePath ++ ".compact", FilePath),
-
-    % Delete the old meta compaction file after promoting
-    % the compaction file.
-    couch_file:delete(RootDir, FilePath ++ ".compact.meta"),
-
-    % We're finished with our old state
-    decref(OldSt),
-
-    % And return our finished new state
-    {ok, NewSt2#st{
-        filepath = FilePath
-    }, undefined}.
-
-
-is_file(Path) ->
-    case file:read_file_info(Path, [raw]) of
-        {ok, #file_info{type = regular}} -> true;
-        {ok, #file_info{type = directory}} -> true;
-        _ -> false
-    end.
diff --git a/src/couch/src/couch_bt_engine.hrl b/src/couch/src/couch_bt_engine.hrl
deleted file mode 100644
index e3c1d49..0000000
--- a/src/couch/src/couch_bt_engine.hrl
+++ /dev/null
@@ -1,27 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(st, {
-    filepath,
-    fd,
-    fd_monitor,
-    % deprecated but keeping it here to avoid altering the record size
-    fsync_options_deprecated,
-    header,
-    needs_commit,
-    id_tree,
-    seq_tree,
-    local_tree,
-    compression,
-    purge_tree,
-    purge_seq_tree
-}).
diff --git a/src/couch/src/couch_bt_engine_compactor.erl b/src/couch/src/couch_bt_engine_compactor.erl
deleted file mode 100644
index 0b3fb22..0000000
--- a/src/couch/src/couch_bt_engine_compactor.erl
+++ /dev/null
@@ -1,590 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_compactor).
-
-
--export([
-    start/4
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_bt_engine.hrl").
-
-
--record(comp_header, {
-    db_header,
-    meta_state
-}).
-
--record(merge_st, {
-    id_tree,
-    seq_tree,
-    curr,
-    rem_seqs,
-    infos
-}).
-
-
-start(#st{} = St, DbName, Options, Parent) ->
-    erlang:put(io_priority, {db_compact, DbName}),
-    #st{
-        filepath = FilePath,
-        header = Header
-    } = St,
-    couch_log:debug("Compaction process spawned for db \"~s\"", [DbName]),
-
-    couch_db_engine:trigger_on_compact(DbName),
-
-    {ok, NewSt, DName, DFd, MFd, Retry} =
-            open_compaction_files(Header, FilePath, Options),
-    erlang:monitor(process, MFd),
-
-    % This is a bit worrisome. init_db/4 will monitor the data fd
-    % but it doesn't know about the meta fd. For now I'll maintain
-    % that the data fd is the old normal fd and meta fd is special
-    % and hope everything works out for the best.
-    unlink(DFd),
-
-    NewSt1 = copy_purge_info(DbName, St, NewSt, Retry),
-    NewSt2 = copy_compact(DbName, St, NewSt1, Retry),
-    NewSt3 = sort_meta_data(NewSt2),
-    NewSt4 = commit_compaction_data(NewSt3),
-    NewSt5 = copy_meta_data(NewSt4),
-    {ok, NewSt6} = couch_bt_engine:commit_data(NewSt5),
-    ok = couch_bt_engine:decref(NewSt6),
-    ok = couch_file:close(MFd),
-
-    % Done
-    gen_server:cast(Parent, {compact_done, couch_bt_engine, DName}).
-
-
-open_compaction_files(SrcHdr, DbFilePath, Options) ->
-    DataFile = DbFilePath ++ ".compact.data",
-    MetaFile = DbFilePath ++ ".compact.meta",
-    {ok, DataFd, DataHdr} = open_compaction_file(DataFile),
-    {ok, MetaFd, MetaHdr} = open_compaction_file(MetaFile),
-    DataHdrIsDbHdr = couch_bt_engine_header:is_header(DataHdr),
-    case {DataHdr, MetaHdr} of
-        {#comp_header{}=A, #comp_header{}=A} ->
-            DbHeader = A#comp_header.db_header,
-            St0 = couch_bt_engine:init_state(
-                    DataFile, DataFd, DbHeader, Options),
-            St1 = bind_emsort(St0, MetaFd, A#comp_header.meta_state),
-            {ok, St1, DataFile, DataFd, MetaFd, St0#st.id_tree};
-        _ when DataHdrIsDbHdr ->
-            Header = couch_bt_engine_header:from(SrcHdr),
-            ok = reset_compaction_file(MetaFd, Header),
-            St0 = couch_bt_engine:init_state(
-                    DataFile, DataFd, DataHdr, Options),
-            St1 = bind_emsort(St0, MetaFd, nil),
-            {ok, St1, DataFile, DataFd, MetaFd, St0#st.id_tree};
-        _ ->
-            Header = couch_bt_engine_header:from(SrcHdr),
-            ok = reset_compaction_file(DataFd, Header),
-            ok = reset_compaction_file(MetaFd, Header),
-            St0 = couch_bt_engine:init_state(DataFile, DataFd, Header, Options),
-            St1 = bind_emsort(St0, MetaFd, nil),
-            {ok, St1, DataFile, DataFd, MetaFd, nil}
-    end.
-
-
-copy_purge_info(DbName, OldSt, NewSt, Retry) ->
-    MinPurgeSeq = couch_util:with_db(DbName, fun(Db) ->
-        couch_db:get_minimum_purge_seq(Db)
-    end),
-    OldPSTree = OldSt#st.purge_seq_tree,
-    StartSeq = couch_bt_engine:get_purge_seq(NewSt) + 1,
-    BufferSize = config:get_integer(
-            "database_compaction", "doc_buffer_size", 524288),
-    CheckpointAfter = config:get(
-            "database_compaction", "checkpoint_after", BufferSize * 10),
-
-    EnumFun = fun(Info, _Reds, {StAcc0, InfosAcc, InfosSize, CopiedSize}) ->
-        NewInfosSize = InfosSize + ?term_size(Info),
-        if NewInfosSize >= BufferSize ->
-            StAcc1 = copy_purge_infos(
-                    OldSt, StAcc0, [Info | InfosAcc], MinPurgeSeq, Retry),
-            NewCopiedSize = CopiedSize + NewInfosSize,
-            if NewCopiedSize >= CheckpointAfter ->
-                StAcc2 = commit_compaction_data(StAcc1),
-                {ok, {StAcc2, [], 0, 0}};
-            true ->
-                {ok, {StAcc1, [], 0, NewCopiedSize}}
-            end;
-        true ->
-            NewInfosAcc = [Info | InfosAcc],
-            {ok, {StAcc0, NewInfosAcc, NewInfosSize, CopiedSize}}
-        end
-    end,
-
-    InitAcc = {NewSt, [], 0, 0},
-    Opts = [{start_key, StartSeq}],
-    {ok, _, FinalAcc} = couch_btree:fold(OldPSTree, EnumFun, InitAcc, Opts),
-    {NewStAcc, Infos, _, _} = FinalAcc,
-    copy_purge_infos(OldSt, NewStAcc, Infos, MinPurgeSeq, Retry).
-
-
-copy_purge_infos(OldSt, NewSt0, Infos, MinPurgeSeq, Retry) ->
-    #st{
-        id_tree = OldIdTree
-    } = OldSt,
-
-    % Re-bind our id_tree to the backing btree
-    NewIdTreeState = couch_bt_engine_header:id_tree_state(NewSt0#st.header),
-    MetaFd = couch_emsort:get_fd(NewSt0#st.id_tree),
-    MetaState = couch_emsort:get_state(NewSt0#st.id_tree),
-    NewSt1 = bind_id_tree(NewSt0, NewSt0#st.fd, NewIdTreeState),
-
-    #st{
-        id_tree = NewIdTree0,
-        seq_tree = NewSeqTree0,
-        purge_tree = NewPurgeTree0,
-        purge_seq_tree = NewPurgeSeqTree0
-    } = NewSt1,
-
-    % Copy over the purge infos
-    InfosToAdd = lists:filter(fun({PSeq, _, _, _}) ->
-        PSeq > MinPurgeSeq
-    end, Infos),
-    {ok, NewPurgeTree1} = couch_btree:add(NewPurgeTree0, InfosToAdd),
-    {ok, NewPurgeSeqTree1} = couch_btree:add(NewPurgeSeqTree0, InfosToAdd),
-
-    NewSt2 = NewSt1#st{
-        purge_tree = NewPurgeTree1,
-        purge_seq_tree = NewPurgeSeqTree1
-    },
-
-    % If we're peforming a retry compaction we have to check if
-    % any of the referenced docs have been completely purged
-    % from the database. Any doc that has been completely purged
-    % must then be removed from our partially compacted database.
-    NewSt3 = if Retry == nil -> NewSt2; true ->
-        AllDocIds = [DocId || {_PurgeSeq, _UUID, DocId, _Revs} <- Infos],
-        UniqDocIds = lists:usort(AllDocIds),
-        OldIdResults = couch_btree:lookup(OldIdTree, UniqDocIds),
-        OldZipped = lists:zip(UniqDocIds, OldIdResults),
-
-        % The list of non-existant docs in the database being compacted
-        MaybeRemDocIds = [DocId || {DocId, not_found} <- OldZipped],
-
-        % Removing anything that exists in the partially compacted database
-        NewIdResults = couch_btree:lookup(NewIdTree0, MaybeRemDocIds),
-        ToRemove = [Doc || {ok, Doc} <- NewIdResults, Doc /= {ok, not_found}],
-
-        {RemIds, RemSeqs} = lists:unzip(lists:map(fun(FDI) ->
-            #full_doc_info{
-                id = Id,
-                update_seq = Seq
-            } = FDI,
-            {Id, Seq}
-        end, ToRemove)),
-
-        {ok, NewIdTree1} = couch_btree:add_remove(NewIdTree0, [], RemIds),
-        {ok, NewSeqTree1} = couch_btree:add_remove(NewSeqTree0, [], RemSeqs),
-
-        NewSt2#st{
-            id_tree = NewIdTree1,
-            seq_tree = NewSeqTree1
-        }
-    end,
-
-    Header = couch_bt_engine:update_header(NewSt3, NewSt3#st.header),
-    NewSt4 = NewSt3#st{
-        header = Header
-    },
-    bind_emsort(NewSt4, MetaFd, MetaState).
-
-
-copy_compact(DbName, St, NewSt0, Retry) ->
-    Compression = couch_compress:get_compression_method(),
-    NewSt = NewSt0#st{compression = Compression},
-    NewUpdateSeq = couch_bt_engine:get_update_seq(NewSt0),
-    TotalChanges = couch_bt_engine:count_changes_since(St, NewUpdateSeq),
-    BufferSize = list_to_integer(
-        config:get("database_compaction", "doc_buffer_size", "524288")),
-    CheckpointAfter = couch_util:to_integer(
-        config:get("database_compaction", "checkpoint_after",
-            BufferSize * 10)),
-
-    EnumBySeqFun =
-    fun(DocInfo, _Offset,
-            {AccNewSt, AccUncopied, AccUncopiedSize, AccCopiedSize}) ->
-
-        Seq = case DocInfo of
-            #full_doc_info{} -> DocInfo#full_doc_info.update_seq;
-            #doc_info{} -> DocInfo#doc_info.high_seq
-        end,
-
-        AccUncopiedSize2 = AccUncopiedSize + ?term_size(DocInfo),
-        if AccUncopiedSize2 >= BufferSize ->
-            NewSt2 = copy_docs(
-                St, AccNewSt, lists:reverse([DocInfo | AccUncopied]), Retry),
-            AccCopiedSize2 = AccCopiedSize + AccUncopiedSize2,
-            if AccCopiedSize2 >= CheckpointAfter ->
-                {ok, NewSt3} = couch_bt_engine:set_update_seq(NewSt2, Seq),
-                CommNewSt3 = commit_compaction_data(NewSt3),
-                {ok, {CommNewSt3, [], 0, 0}};
-            true ->
-                {ok, NewSt3} = couch_bt_engine:set_update_seq(NewSt2, Seq),
-                {ok, {NewSt3, [], 0, AccCopiedSize2}}
-            end;
-        true ->
-            {ok, {AccNewSt, [DocInfo | AccUncopied], AccUncopiedSize2,
-                AccCopiedSize}}
-        end
-    end,
-
-    TaskProps0 = [
-        {type, database_compaction},
-        {database, DbName},
-        {progress, 0},
-        {changes_done, 0},
-        {total_changes, TotalChanges}
-    ],
-    case (Retry =/= nil) and couch_task_status:is_task_added() of
-    true ->
-        couch_task_status:update([
-            {retry, true},
-            {progress, 0},
-            {changes_done, 0},
-            {total_changes, TotalChanges}
-        ]);
-    false ->
-        couch_task_status:add_task(TaskProps0),
-        couch_task_status:set_update_frequency(500)
-    end,
-
-    {ok, _, {NewSt2, Uncopied, _, _}} =
-        couch_btree:foldl(St#st.seq_tree, EnumBySeqFun,
-            {NewSt, [], 0, 0},
-            [{start_key, NewUpdateSeq + 1}]),
-
-    NewSt3 = copy_docs(St, NewSt2, lists:reverse(Uncopied), Retry),
-
-    % Copy the security information over
-    SecProps = couch_bt_engine:get_security(St),
-    {ok, NewSt4} = couch_bt_engine:copy_security(NewSt3, SecProps),
-
-    % Copy general properties over
-    Props = couch_bt_engine:get_props(St),
-    {ok, NewSt5} = couch_bt_engine:set_props(NewSt4, Props),
-
-    FinalUpdateSeq = couch_bt_engine:get_update_seq(St),
-    {ok, NewSt6} = couch_bt_engine:set_update_seq(NewSt5, FinalUpdateSeq),
-    commit_compaction_data(NewSt6).
-
-
-copy_docs(St, #st{} = NewSt, MixedInfos, Retry) ->
-    DocInfoIds = [Id || #doc_info{id=Id} <- MixedInfos],
-    LookupResults = couch_btree:lookup(St#st.id_tree, DocInfoIds),
-    % COUCHDB-968, make sure we prune duplicates during compaction
-    NewInfos0 = lists:usort(fun(#full_doc_info{id=A}, #full_doc_info{id=B}) ->
-        A =< B
-    end, merge_lookups(MixedInfos, LookupResults)),
-
-    NewInfos1 = lists:map(fun(Info) ->
-        {NewRevTree, FinalAcc} = couch_key_tree:mapfold(fun
-            ({RevPos, RevId}, #leaf{ptr=Sp}=Leaf, leaf, SizesAcc) ->
-                {Body, AttInfos} = copy_doc_attachments(St, Sp, NewSt),
-                #size_info{external = OldExternalSize} = Leaf#leaf.sizes,
-                ExternalSize = case OldExternalSize of
-                    0 when is_binary(Body) ->
-                        couch_compress:uncompressed_size(Body);
-                    0 ->
-                        couch_ejson_size:encoded_size(Body);
-                    N -> N
-                end,
-                Doc0 = #doc{
-                    id = Info#full_doc_info.id,
-                    revs = {RevPos, [RevId]},
-                    deleted = Leaf#leaf.deleted,
-                    body = Body,
-                    atts = AttInfos
-                },
-                Doc1 = couch_bt_engine:serialize_doc(NewSt, Doc0),
-                {ok, Doc2, ActiveSize} =
-                        couch_bt_engine:write_doc_body(NewSt, Doc1),
-                AttSizes = [{element(3,A), element(4,A)} || A <- AttInfos],
-                NewLeaf = Leaf#leaf{
-                    ptr = Doc2#doc.body,
-                    sizes = #size_info{
-                        active = ActiveSize,
-                        external = ExternalSize
-                    },
-                    atts = AttSizes
-                },
-                {NewLeaf, couch_db_updater:add_sizes(leaf, NewLeaf, SizesAcc)};
-            (_Rev, _Leaf, branch, SizesAcc) ->
-                {?REV_MISSING, SizesAcc}
-        end, {0, 0, []}, Info#full_doc_info.rev_tree),
-        {FinalAS, FinalES, FinalAtts} = FinalAcc,
-        TotalAttSize = lists:foldl(fun({_, S}, A) -> S + A end, 0, FinalAtts),
-        NewActiveSize = FinalAS + TotalAttSize,
-        NewExternalSize = FinalES + TotalAttSize,
-        Info#full_doc_info{
-            rev_tree = NewRevTree,
-            sizes = #size_info{
-                active = NewActiveSize,
-                external = NewExternalSize
-            }
-        }
-    end, NewInfos0),
-
-    Limit = couch_bt_engine:get_revs_limit(St),
-    NewInfos = lists:map(fun(FDI) ->
-        FDI#full_doc_info{
-            rev_tree = couch_key_tree:stem(FDI#full_doc_info.rev_tree, Limit)
-        }
-    end, NewInfos1),
-
-    RemoveSeqs =
-    case Retry of
-    nil ->
-        [];
-    OldDocIdTree ->
-        % Compaction is being rerun to catch up to writes during the
-        % first pass. This means we may have docs that already exist
-        % in the seq_tree in the .data file. Here we lookup any old
-        % update_seqs so that they can be removed.
-        Ids = [Id || #full_doc_info{id=Id} <- NewInfos],
-        Existing = couch_btree:lookup(OldDocIdTree, Ids),
-        [Seq || {ok, #full_doc_info{update_seq=Seq}} <- Existing]
-    end,
-
-    {ok, SeqTree} = couch_btree:add_remove(
-            NewSt#st.seq_tree, NewInfos, RemoveSeqs),
-
-    FDIKVs = lists:map(fun(#full_doc_info{id=Id, update_seq=Seq}=FDI) ->
-        {{Id, Seq}, FDI}
-    end, NewInfos),
-    {ok, IdEms} = couch_emsort:add(NewSt#st.id_tree, FDIKVs),
-    update_compact_task(length(NewInfos)),
-    NewSt#st{id_tree=IdEms, seq_tree=SeqTree}.
-
-
-copy_doc_attachments(#st{} = SrcSt, SrcSp, DstSt) ->
-    {ok, {BodyData, BinInfos0}} = couch_file:pread_term(SrcSt#st.fd, SrcSp),
-    BinInfos = case BinInfos0 of
-    _ when is_binary(BinInfos0) ->
-        couch_compress:decompress(BinInfos0);
-    _ when is_list(BinInfos0) ->
-        % pre 1.2 file format
-        BinInfos0
-    end,
-    % copy the bin values
-    NewBinInfos = lists:map(
-        fun({Name, Type, BinSp, AttLen, RevPos, ExpectedMd5}) ->
-            % 010 UPGRADE CODE
-            {ok, SrcStream} = couch_bt_engine:open_read_stream(SrcSt, BinSp),
-            {ok, DstStream} = couch_bt_engine:open_write_stream(DstSt, []),
-            ok = couch_stream:copy(SrcStream, DstStream),
-            {NewStream, AttLen, AttLen, ActualMd5, _IdentityMd5} =
-                couch_stream:close(DstStream),
-            {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
-            couch_util:check_md5(ExpectedMd5, ActualMd5),
-            {Name, Type, NewBinSp, AttLen, AttLen, RevPos, ExpectedMd5, identity};
-        ({Name, Type, BinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc1}) ->
-            {ok, SrcStream} = couch_bt_engine:open_read_stream(SrcSt, BinSp),
-            {ok, DstStream} = couch_bt_engine:open_write_stream(DstSt, []),
-            ok = couch_stream:copy(SrcStream, DstStream),
-            {NewStream, AttLen, _, ActualMd5, _IdentityMd5} =
-                couch_stream:close(DstStream),
-            {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
-            couch_util:check_md5(ExpectedMd5, ActualMd5),
-            Enc = case Enc1 of
-            true ->
-                % 0110 UPGRADE CODE
-                gzip;
-            false ->
-                % 0110 UPGRADE CODE
-                identity;
-            _ ->
-                Enc1
-            end,
-            {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc}
-        end, BinInfos),
-    {BodyData, NewBinInfos}.
-
-
-sort_meta_data(St0) ->
-    {ok, Ems} = couch_emsort:merge(St0#st.id_tree),
-    St0#st{id_tree=Ems}.
-
-
-copy_meta_data(#st{} = St) ->
-    #st{
-        fd = Fd,
-        header = Header,
-        id_tree = Src
-    } = St,
-    DstState = couch_bt_engine_header:id_tree_state(Header),
-    {ok, IdTree0} = couch_btree:open(DstState, Fd, [
-        {split, fun couch_bt_engine:id_tree_split/1},
-        {join, fun couch_bt_engine:id_tree_join/2},
-        {reduce, fun couch_bt_engine:id_tree_reduce/2}
-    ]),
-    {ok, Iter} = couch_emsort:iter(Src),
-    Acc0 = #merge_st{
-        id_tree=IdTree0,
-        seq_tree=St#st.seq_tree,
-        rem_seqs=[],
-        infos=[]
-    },
-    Acc = merge_docids(Iter, Acc0),
-    {ok, IdTree} = couch_btree:add(Acc#merge_st.id_tree, Acc#merge_st.infos),
-    {ok, SeqTree} = couch_btree:add_remove(
-        Acc#merge_st.seq_tree, [], Acc#merge_st.rem_seqs
-    ),
-    St#st{id_tree=IdTree, seq_tree=SeqTree}.
-
-
-open_compaction_file(FilePath) ->
-    case couch_file:open(FilePath, [nologifmissing]) of
-        {ok, Fd} ->
-            case couch_file:read_header(Fd) of
-                {ok, Header} -> {ok, Fd, Header};
-                no_valid_header -> {ok, Fd, nil}
-            end;
-        {error, enoent} ->
-            {ok, Fd} = couch_file:open(FilePath, [create]),
-            {ok, Fd, nil}
-    end.
-
-
-reset_compaction_file(Fd, Header) ->
-    ok = couch_file:truncate(Fd, 0),
-    ok = couch_file:write_header(Fd, Header).
-
-
-commit_compaction_data(#st{}=St) ->
-    % Compaction needs to write headers to both the data file
-    % and the meta file so if we need to restart we can pick
-    % back up from where we left off.
-    commit_compaction_data(St, couch_emsort:get_fd(St#st.id_tree)),
-    commit_compaction_data(St, St#st.fd).
-
-
-commit_compaction_data(#st{header = OldHeader} = St0, Fd) ->
-    DataState = couch_bt_engine_header:id_tree_state(OldHeader),
-    MetaFd = couch_emsort:get_fd(St0#st.id_tree),
-    MetaState = couch_emsort:get_state(St0#st.id_tree),
-    St1 = bind_id_tree(St0, St0#st.fd, DataState),
-    Header = couch_bt_engine:update_header(St1, St1#st.header),
-    CompHeader = #comp_header{
-        db_header = Header,
-        meta_state = MetaState
-    },
-    ok = couch_file:sync(Fd),
-    ok = couch_file:write_header(Fd, CompHeader),
-    St2 = St1#st{
-        header = Header
-    },
-    bind_emsort(St2, MetaFd, MetaState).
-
-
-bind_emsort(St, Fd, nil) ->
-    {ok, Ems} = couch_emsort:open(Fd),
-    St#st{id_tree=Ems};
-bind_emsort(St, Fd, State) ->
-    {ok, Ems} = couch_emsort:open(Fd, [{root, State}]),
-    St#st{id_tree=Ems}.
-
-
-bind_id_tree(St, Fd, State) ->
-    {ok, IdBtree} = couch_btree:open(State, Fd, [
-        {split, fun couch_bt_engine:id_tree_split/1},
-        {join, fun couch_bt_engine:id_tree_join/2},
-        {reduce, fun couch_bt_engine:id_tree_reduce/2}
-    ]),
-    St#st{id_tree=IdBtree}.
-
-
-merge_lookups(Infos, []) ->
-    Infos;
-merge_lookups([], _) ->
-    [];
-merge_lookups([#doc_info{}=DI | RestInfos], [{ok, FDI} | RestLookups]) ->
-    % Assert we've matched our lookups
-    if DI#doc_info.id == FDI#full_doc_info.id -> ok; true ->
-        erlang:error({mismatched_doc_infos, DI#doc_info.id})
-    end,
-    [FDI | merge_lookups(RestInfos, RestLookups)];
-merge_lookups([FDI | RestInfos], Lookups) ->
-    [FDI | merge_lookups(RestInfos, Lookups)].
-
-
-merge_docids(Iter, #merge_st{infos=Infos}=Acc) when length(Infos) > 1000 ->
-    #merge_st{
-        id_tree=IdTree0,
-        seq_tree=SeqTree0,
-        rem_seqs=RemSeqs
-    } = Acc,
-    {ok, IdTree1} = couch_btree:add(IdTree0, Infos),
-    {ok, SeqTree1} = couch_btree:add_remove(SeqTree0, [], RemSeqs),
-    Acc1 = Acc#merge_st{
-        id_tree=IdTree1,
-        seq_tree=SeqTree1,
-        rem_seqs=[],
-        infos=[]
-    },
-    merge_docids(Iter, Acc1);
-merge_docids(Iter, #merge_st{curr=Curr}=Acc) ->
-    case next_info(Iter, Curr, []) of
-        {NextIter, NewCurr, FDI, Seqs} ->
-            Acc1 = Acc#merge_st{
-                infos = [FDI | Acc#merge_st.infos],
-                rem_seqs = Seqs ++ Acc#merge_st.rem_seqs,
-                curr = NewCurr
-            },
-            merge_docids(NextIter, Acc1);
-        {finished, FDI, Seqs} ->
-            Acc#merge_st{
-                infos = [FDI | Acc#merge_st.infos],
-                rem_seqs = Seqs ++ Acc#merge_st.rem_seqs,
-                curr = undefined
-            };
-        empty ->
-            Acc
-    end.
-
-
-next_info(Iter, undefined, []) ->
-    case couch_emsort:next(Iter) of
-        {ok, {{Id, Seq}, FDI}, NextIter} ->
-            next_info(NextIter, {Id, Seq, FDI}, []);
-        finished ->
-            empty
-    end;
-next_info(Iter, {Id, Seq, FDI}, Seqs) ->
-    case couch_emsort:next(Iter) of
-        {ok, {{Id, NSeq}, NFDI}, NextIter} ->
-            next_info(NextIter, {Id, NSeq, NFDI}, [Seq | Seqs]);
-        {ok, {{NId, NSeq}, NFDI}, NextIter} ->
-            {NextIter, {NId, NSeq, NFDI}, FDI, Seqs};
-        finished ->
-            {finished, FDI, Seqs}
-    end.
-
-
-update_compact_task(NumChanges) ->
-    [Changes, Total] = couch_task_status:get([changes_done, total_changes]),
-    Changes2 = Changes + NumChanges,
-    Progress = case Total of
-    0 ->
-        0;
-    _ ->
-        (Changes2 * 100) div Total
-    end,
-    couch_task_status:update([{changes_done, Changes2}, {progress, Progress}]).
-
diff --git a/src/couch/src/couch_bt_engine_header.erl b/src/couch/src/couch_bt_engine_header.erl
deleted file mode 100644
index 3f9f518..0000000
--- a/src/couch/src/couch_bt_engine_header.erl
+++ /dev/null
@@ -1,451 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_header).
-
-
--export([
-    new/0,
-    from/1,
-    is_header/1,
-    upgrade/1,
-    get/2,
-    get/3,
-    set/2,
-    set/3
-]).
-
--export([
-    disk_version/1,
-    latest_disk_version/0,
-    update_seq/1,
-    id_tree_state/1,
-    seq_tree_state/1,
-    latest/1,
-    local_tree_state/1,
-    purge_tree_state/1,
-    purge_seq_tree_state/1,
-    purge_infos_limit/1,
-    security_ptr/1,
-    revs_limit/1,
-    uuid/1,
-    epochs/1,
-    compacted_seq/1
-]).
-
-
-% This should be updated anytime a header change happens that requires more
-% than filling in new defaults.
-%
-% As long the changes are limited to new header fields (with inline
-% defaults) added to the end of the record, then there is no need to increment
-% the disk revision number.
-%
-% if the disk revision is incremented, then new upgrade logic will need to be
-% added to couch_db_updater:init_db.
-
--define(LATEST_DISK_VERSION, 8).
-
--record(db_header, {
-    disk_version = ?LATEST_DISK_VERSION,
-    update_seq = 0,
-    unused = 0,
-    id_tree_state = nil,
-    seq_tree_state = nil,
-    local_tree_state = nil,
-    purge_tree_state = nil,
-    purge_seq_tree_state = nil, %purge tree: purge_seq -> uuid
-    security_ptr = nil,
-    revs_limit = 1000,
-    uuid,
-    epochs,
-    compacted_seq,
-    purge_infos_limit = 1000,
-    props_ptr
-}).
-
-
--define(PARTITION_DISK_VERSION, 8).
-
-
-new() ->
-    #db_header{
-        uuid = couch_uuids:random(),
-        epochs = [{node(), 0}]
-    }.
-
-
-from(Header0) ->
-    Header = upgrade(Header0),
-    #db_header{
-        uuid = Header#db_header.uuid,
-        epochs = Header#db_header.epochs,
-        compacted_seq = Header#db_header.compacted_seq
-    }.
-
-
-is_header(Header) ->
-    try
-        upgrade(Header),
-        true
-    catch _:_ ->
-        false
-    end.
-
-
-upgrade(Header) ->
-    Funs = [
-        fun upgrade_tuple/1,
-        fun upgrade_disk_version/1,
-        fun upgrade_uuid/1,
-        fun upgrade_epochs/1,
-        fun upgrade_compacted_seq/1
-    ],
-    lists:foldl(fun(F, HdrAcc) ->
-        F(HdrAcc)
-    end, Header, Funs).
-
-
-get(Header, Key) ->
-    ?MODULE:get(Header, Key, undefined).
-
-
-get(Header, Key, Default) ->
-    get_field(Header, Key, Default).
-
-
-set(Header, Key, Value) ->
-    ?MODULE:set(Header, [{Key, Value}]).
-
-
-set(Header0, Fields) ->
-    % A subtlety here is that if a database was open during
-    % the release upgrade that updates to uuids and epochs then
-    % this dynamic upgrade also assigns a uuid and epoch.
-    Header = upgrade(Header0),
-    lists:foldl(fun({Field, Value}, HdrAcc) ->
-        set_field(HdrAcc, Field, Value)
-    end, Header, Fields).
-
-
-disk_version(Header) ->
-    get_field(Header, disk_version).
-
-
-latest_disk_version() ->
-        ?LATEST_DISK_VERSION.
-
-
-update_seq(Header) ->
-    get_field(Header, update_seq).
-
-
-id_tree_state(Header) ->
-    get_field(Header, id_tree_state).
-
-
-seq_tree_state(Header) ->
-    get_field(Header, seq_tree_state).
-
-
-local_tree_state(Header) ->
-    get_field(Header, local_tree_state).
-
-
-purge_tree_state(Header) ->
-    get_field(Header, purge_tree_state).
-
-
-purge_seq_tree_state(Header) ->
-    get_field(Header, purge_seq_tree_state).
-
-
-security_ptr(Header) ->
-    get_field(Header, security_ptr).
-
-
-revs_limit(Header) ->
-    get_field(Header, revs_limit).
-
-
-uuid(Header) ->
-    get_field(Header, uuid).
-
-
-epochs(Header) ->
-    get_field(Header, epochs).
-
-
-compacted_seq(Header) ->
-    get_field(Header, compacted_seq).
-
-
-purge_infos_limit(Header) ->
-    get_field(Header, purge_infos_limit).
-
-
-get_field(Header, Field) ->
-    get_field(Header, Field, undefined).
-
-
-get_field(Header, Field, Default) ->
-    Idx = index(Field),
-    case Idx > tuple_size(Header) of
-        true -> Default;
-        false -> element(index(Field), Header)
-    end.
-
-
-set_field(Header, Field, Value) ->
-    setelement(index(Field), Header, Value).
-
-
-index(Field) ->
-    couch_util:get_value(Field, indexes()).
-
-
-indexes() ->
-    Fields = record_info(fields, db_header),
-    Indexes = lists:seq(2, record_info(size, db_header)),
-    lists:zip(Fields, Indexes).
-
-
-upgrade_tuple(Old) when is_record(Old, db_header) ->
-    Old;
-upgrade_tuple(Old) when is_tuple(Old) ->
-    NewSize = record_info(size, db_header),
-    if tuple_size(Old) < NewSize -> ok; true ->
-        erlang:error({invalid_header_size, Old})
-    end,
-    {_, New} = lists:foldl(fun(Val, {Idx, Hdr}) ->
-        {Idx+1, setelement(Idx, Hdr, Val)}
-    end, {1, #db_header{}}, tuple_to_list(Old)),
-    if is_record(New, db_header) -> ok; true ->
-        erlang:error({invalid_header_extension, {Old, New}})
-    end,
-    New.
-
--define(OLD_DISK_VERSION_ERROR,
-    "Database files from versions smaller than 0.10.0 are no longer supported").
-
-upgrade_disk_version(#db_header{}=Header) ->
-    case element(2, Header) of
-        1 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
-        2 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
-        3 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
-        4 -> Header#db_header{security_ptr = nil}; % [0.10 - 0.11)
-        5 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre 1.2
-        6 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre clustered purge
-        7 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre partitioned dbs
-        ?LATEST_DISK_VERSION -> Header;
-        _ ->
-            Reason = "Incorrect disk header version",
-            throw({database_disk_version_error, Reason})
-    end.
-
-
-upgrade_uuid(#db_header{}=Header) ->
-    case Header#db_header.uuid of
-        undefined ->
-            % Upgrading this old db file to a newer
-            % on disk format that includes a UUID.
-            Header#db_header{uuid=couch_uuids:random()};
-        _ ->
-            Header
-    end.
-
-
-upgrade_epochs(#db_header{}=Header) ->
-    NewEpochs = case Header#db_header.epochs of
-        undefined ->
-            % This node is taking over ownership of shard with
-            % and old version of couch file. Before epochs there
-            % was always an implicit assumption that a file was
-            % owned since eternity by the node it was on. This
-            % just codifies that assumption.
-            [{node(), 0}];
-        [{Node, _} | _] = Epochs0 when Node == node() ->
-            % Current node is the current owner of this db
-            Epochs0;
-        Epochs1 ->
-            % This node is taking over ownership of this db
-            % and marking the update sequence where it happened.
-            [{node(), Header#db_header.update_seq} | Epochs1]
-    end,
-    % Its possible for a node to open a db and claim
-    % ownership but never make a write to the db. This
-    % removes nodes that claimed ownership but never
-    % changed the database.
-    DedupedEpochs = remove_dup_epochs(NewEpochs),
-    Header#db_header{epochs=DedupedEpochs}.
-
-
-% This is slightly relying on the udpate_seq's being sorted
-% in epochs due to how we only ever push things onto the
-% front. Although if we ever had a case where the update_seq
-% is not monotonically increasing I don't know that we'd
-% want to remove dupes (by calling a sort on the input to this
-% function). So for now we don't sort but are relying on the
-% idea that epochs is always sorted.
-remove_dup_epochs([_]=Epochs) ->
-    Epochs;
-remove_dup_epochs([{N1, S}, {_N2, S}]) ->
-    % Seqs match, keep the most recent owner
-    [{N1, S}];
-remove_dup_epochs([_, _]=Epochs) ->
-    % Seqs don't match.
-    Epochs;
-remove_dup_epochs([{N1, S}, {_N2, S} | Rest]) ->
-    % Seqs match, keep the most recent owner
-    remove_dup_epochs([{N1, S} | Rest]);
-remove_dup_epochs([{N1, S1}, {N2, S2} | Rest]) ->
-    % Seqs don't match, recurse to check others
-    [{N1, S1} | remove_dup_epochs([{N2, S2} | Rest])].
-
-
-upgrade_compacted_seq(#db_header{}=Header) ->
-    case Header#db_header.compacted_seq of
-        undefined ->
-            Header#db_header{compacted_seq=0};
-        _ ->
-            Header
-    end.
-
-latest(?LATEST_DISK_VERSION) ->
-    true;
-latest(N) when is_integer(N), N < ?LATEST_DISK_VERSION ->
-    false;
-latest(_Else) ->
-    undefined.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-mk_header(Vsn) ->
-    {
-        db_header, % record name
-        Vsn, % disk version
-        100, % update_seq
-        0, % unused
-        foo, % id_tree_state
-        bar, % seq_tree_state
-        bam, % local_tree_state
-        flam, % was purge_seq - now purge_tree_state
-        baz, % was purged_docs - now purge_seq_tree_state
-        bang, % security_ptr
-        999 % revs_limit
-    }.
-
-
--ifdef(run_broken_tests).
-
-upgrade_v3_test() ->
-    Vsn3Header = mk_header(3),
-    NewHeader = upgrade_tuple(Vsn3Header),
-
-    % Tuple upgrades don't change
-    ?assert(is_record(NewHeader, db_header)),
-    ?assertEqual(3, disk_version(NewHeader)),
-    ?assertEqual(100, update_seq(NewHeader)),
-    ?assertEqual(foo, id_tree_state(NewHeader)),
-    ?assertEqual(bar, seq_tree_state(NewHeader)),
-    ?assertEqual(bam, local_tree_state(NewHeader)),
-    ?assertEqual(flam, purge_tree_state(NewHeader)),
-    ?assertEqual(baz, purge_seq_tree_state(NewHeader)),
-    ?assertEqual(bang, security_ptr(NewHeader)),
-    ?assertEqual(999, revs_limit(NewHeader)),
-    ?assertEqual(undefined, uuid(NewHeader)),
-    ?assertEqual(undefined, epochs(NewHeader)),
-
-    % Security ptr isn't changed until upgrade_disk_version/1
-    NewNewHeader = upgrade_disk_version(NewHeader),
-    ?assert(is_record(NewNewHeader, db_header)),
-    ?assertEqual(nil, security_ptr(NewNewHeader)),
-
-    % Assert upgrade works on really old headers
-    NewestHeader = upgrade(Vsn3Header),
-    ?assertMatch(<<_:32/binary>>, uuid(NewestHeader)),
-    ?assertEqual([{node(), 0}], epochs(NewestHeader)).
-
--endif.
-
-upgrade_v5_to_v8_test() ->
-    Vsn5Header = mk_header(5),
-    NewHeader = upgrade_disk_version(upgrade_tuple(Vsn5Header)),
-
-    ?assert(is_record(NewHeader, db_header)),
-    ?assertEqual(8, disk_version(NewHeader)),
-
-    % Security ptr isn't changed for v5 headers
-    ?assertEqual(bang, security_ptr(NewHeader)).
-
-
-upgrade_uuid_test() ->
-    Vsn5Header = mk_header(5),
-
-    % Upgraded headers get a new UUID
-    NewHeader = upgrade_uuid(upgrade_disk_version(upgrade_tuple(Vsn5Header))),
-    ?assertMatch(<<_:32/binary>>, uuid(NewHeader)),
-
-    % Headers with a UUID don't have their UUID changed
-    NewNewHeader = upgrade_uuid(upgrade_disk_version(upgrade_tuple(NewHeader))),
-    ?assertEqual(uuid(NewHeader), uuid(NewNewHeader)),
-
-    % Derived empty headers maintain the same UUID
-    ResetHeader = from(NewNewHeader),
-    ?assertEqual(uuid(NewHeader), uuid(ResetHeader)).
-
-
-upgrade_epochs_test() ->
-    Vsn5Header = mk_header(5),
-
-    % Upgraded headers get a default epochs set
-    NewHeader = upgrade(Vsn5Header),
-    ?assertEqual([{node(), 0}], epochs(NewHeader)),
-
-    % Fake an old entry in epochs
-    FakeFields = [
-        {update_seq, 20},
-        {epochs, [{'someothernode@someotherhost', 0}]}
-    ],
-    NotOwnedHeader = set(NewHeader, FakeFields),
-
-    OwnedEpochs = [
-        {node(), 20},
-        {'someothernode@someotherhost', 0}
-    ],
-
-    % Upgrading a header not owned by the local node updates
-    % the epochs appropriately.
-    NowOwnedHeader = upgrade(NotOwnedHeader),
-    ?assertEqual(OwnedEpochs, epochs(NowOwnedHeader)),
-
-    % Headers with epochs stay the same after upgrades
-    NewNewHeader = upgrade(NowOwnedHeader),
-    ?assertEqual(OwnedEpochs, epochs(NewNewHeader)),
-
-    % Getting a reset header maintains the epoch data
-    ResetHeader = from(NewNewHeader),
-    ?assertEqual(OwnedEpochs, epochs(ResetHeader)).
-
-
-get_uuid_from_old_header_test() ->
-    Vsn5Header = mk_header(5),
-    ?assertEqual(undefined, uuid(Vsn5Header)).
-
-
-get_epochs_from_old_header_test() ->
-    Vsn5Header = mk_header(5),
-    ?assertEqual(undefined, epochs(Vsn5Header)).
-
-
--endif.
diff --git a/src/couch/src/couch_bt_engine_stream.erl b/src/couch/src/couch_bt_engine_stream.erl
deleted file mode 100644
index 431894a..0000000
--- a/src/couch/src/couch_bt_engine_stream.erl
+++ /dev/null
@@ -1,70 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_stream).
-
--export([
-    foldl/3,
-    seek/2,
-    write/2,
-    finalize/1,
-    to_disk_term/1
-]).
-
-
-foldl({_Fd, []}, _Fun, Acc) ->
-    Acc;
-
-foldl({Fd, [{Pos, _} | Rest]}, Fun, Acc) ->
-    foldl({Fd, [Pos | Rest]}, Fun, Acc);
-
-foldl({Fd, [Bin | Rest]}, Fun, Acc) when is_binary(Bin) ->
-    % We're processing the first bit of data
-    % after we did a seek for a range fold.
-    foldl({Fd, Rest}, Fun, Fun(Bin, Acc));
-
-foldl({Fd, [Pos | Rest]}, Fun, Acc) when is_integer(Pos) ->
-    {ok, Bin} = couch_file:pread_binary(Fd, Pos),
-    foldl({Fd, Rest}, Fun, Fun(Bin, Acc)).
-
-
-seek({Fd, [{Pos, Length} | Rest]}, Offset) ->
-    case Length =< Offset of
-        true ->
-            seek({Fd, Rest}, Offset - Length);
-        false ->
-            seek({Fd, [Pos | Rest]}, Offset)
-    end;
-
-seek({Fd, [Pos | Rest]}, Offset) when is_integer(Pos) ->
-    {ok, Bin} = couch_file:pread_binary(Fd, Pos),
-    case iolist_size(Bin) =< Offset of
-        true ->
-            seek({Fd, Rest}, Offset - size(Bin));
-        false ->
-            <<_:Offset/binary, Tail/binary>> = Bin,
-            {ok, {Fd, [Tail | Rest]}}
-    end.
-
-
-write({Fd, Written}, Data) when is_pid(Fd) ->
-    {ok, Pos, _} = couch_file:append_binary(Fd, Data),
-    {ok, {Fd, [{Pos, iolist_size(Data)} | Written]}}.
-
-
-finalize({Fd, Written}) ->
-    {ok, {Fd, lists:reverse(Written)}}.
-
-
-to_disk_term({_Fd, Written}) ->
-    {ok, Written}.
-
diff --git a/src/couch/src/couch_btree.erl b/src/couch/src/couch_btree.erl
deleted file mode 100644
index ea0cf69..0000000
--- a/src/couch/src/couch_btree.erl
+++ /dev/null
@@ -1,855 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_btree).
-
--export([open/2, open/3, query_modify/4, add/2, add_remove/3]).
--export([fold/4, full_reduce/1, final_reduce/2, size/1, foldl/3, foldl/4]).
--export([fold_reduce/4, lookup/2, get_state/1, set_options/2]).
--export([extract/2, assemble/3, less/3]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(FILL_RATIO, 0.5).
-
-extract(#btree{extract_kv=undefined}, Value) ->
-    Value;
-extract(#btree{extract_kv=Extract}, Value) ->
-    Extract(Value).
-
-assemble(#btree{assemble_kv=undefined}, Key, Value) ->
-    {Key, Value};
-assemble(#btree{assemble_kv=Assemble}, Key, Value) ->
-    Assemble(Key, Value).
-
-less(#btree{less=undefined}, A, B) ->
-    A < B;
-less(#btree{less=Less}, A, B) ->
-    Less(A, B).
-
-% pass in 'nil' for State if a new Btree.
-open(State, Fd) ->
-    {ok, #btree{root=State, fd=Fd}}.
-
-set_options(Bt, []) ->
-    Bt;
-set_options(Bt, [{split, Extract}|Rest]) ->
-    set_options(Bt#btree{extract_kv=Extract}, Rest);
-set_options(Bt, [{join, Assemble}|Rest]) ->
-    set_options(Bt#btree{assemble_kv=Assemble}, Rest);
-set_options(Bt, [{less, Less}|Rest]) ->
-    set_options(Bt#btree{less=Less}, Rest);
-set_options(Bt, [{reduce, Reduce}|Rest]) ->
-    set_options(Bt#btree{reduce=Reduce}, Rest);
-set_options(Bt, [{compression, Comp}|Rest]) ->
-    set_options(Bt#btree{compression=Comp}, Rest).
-
-open(State, Fd, Options) ->
-    {ok, set_options(#btree{root=State, fd=Fd}, Options)}.
-
-get_state(#btree{root=Root}) ->
-    Root.
-
-final_reduce(#btree{reduce=Reduce}, Val) ->
-    final_reduce(Reduce, Val);
-final_reduce(Reduce, {[], []}) ->
-    Reduce(reduce, []);
-final_reduce(_Bt, {[], [Red]}) ->
-    Red;
-final_reduce(Reduce, {[], Reductions}) ->
-    Reduce(rereduce, Reductions);
-final_reduce(Reduce, {KVs, Reductions}) ->
-    Red = Reduce(reduce, KVs),
-    final_reduce(Reduce, {[], [Red | Reductions]}).
-
-fold_reduce(#btree{root=Root}=Bt, Fun, Acc, Options) ->
-    Dir = couch_util:get_value(dir, Options, fwd),
-    StartKey = couch_util:get_value(start_key, Options),
-    InEndRangeFun = make_key_in_end_range_function(Bt, Dir, Options),
-    KeyGroupFun = get_group_fun(Bt, Options),
-    try
-        {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
-            reduce_stream_node(Bt, Dir, Root, StartKey, InEndRangeFun, undefined, [], [],
-            KeyGroupFun, Fun, Acc),
-        if GroupedKey2 == undefined ->
-            {ok, Acc2};
-        true ->
-            case Fun(GroupedKey2, {GroupedKVsAcc2, GroupedRedsAcc2}, Acc2) of
-            {ok, Acc3} -> {ok, Acc3};
-            {stop, Acc3} -> {ok, Acc3}
-            end
-        end
-    catch
-        throw:{stop, AccDone} -> {ok, AccDone}
-    end.
-
-full_reduce(#btree{root=nil,reduce=Reduce}) ->
-    {ok, Reduce(reduce, [])};
-full_reduce(#btree{root=Root}) ->
-    {ok, element(2, Root)}.
-
-size(#btree{root = nil}) ->
-    0;
-size(#btree{root = {_P, _Red}}) ->
-    % pre 1.2 format
-    nil;
-size(#btree{root = {_P, _Red, Size}}) ->
-    Size.
-
-get_group_fun(Bt, Options) ->
-    case couch_util:get_value(key_group_level, Options) of
-        exact ->
-            make_group_fun(Bt, exact);
-        0 ->
-            fun(_, _) -> true end;
-        N when is_integer(N), N > 0 ->
-            make_group_fun(Bt, N);
-        undefined ->
-            couch_util:get_value(key_group_fun, Options, fun(_,_) -> true end)
-    end.
-
-make_group_fun(Bt, exact) ->
-    fun({Key1, _}, {Key2, _}) ->
-        case less(Bt, {Key1, nil}, {Key2, nil}) of
-            false ->
-                case less(Bt, {Key2, nil}, {Key1, nil}) of
-                    false ->
-                        true;
-                    _ ->
-                        false
-                end;
-            _ ->
-                false
-        end
-    end;
-make_group_fun(Bt, GroupLevel) when is_integer(GroupLevel), GroupLevel > 0 ->
-    fun
-        GF({{p, Partition, Key1}, Val1}, {{p, Partition, Key2}, Val2}) ->
-            GF({Key1, Val1}, {Key2, Val2});
-        GF({[_|_] = Key1, _}, {[_|_] = Key2, _}) ->
-            SL1 = lists:sublist(Key1, GroupLevel),
-            SL2 = lists:sublist(Key2, GroupLevel),
-            case less(Bt, {SL1, nil}, {SL2, nil}) of
-                false ->
-                    case less(Bt, {SL2, nil}, {SL1, nil}) of
-                        false ->
-                            true;
-                        _ ->
-                            false
-                    end;
-                _ ->
-                    false
-            end;
-        GF({Key1, _}, {Key2, _}) ->
-            case less(Bt, {Key1, nil}, {Key2, nil}) of
-                false ->
-                    case less(Bt, {Key2, nil}, {Key1, nil}) of
-                        false ->
-                            true;
-                        _ ->
-                            false
-                    end;
-                _ ->
-                    false
-            end
-    end.
-
-% wraps a 2 arity function with the proper 3 arity function
-convert_fun_arity(Fun) when is_function(Fun, 2) ->
-    fun
-        (visit, KV, _Reds, AccIn) -> Fun(KV, AccIn);
-        (traverse, _K, _Red, AccIn) -> {ok, AccIn}
-    end;
-convert_fun_arity(Fun) when is_function(Fun, 3) ->
-    fun
-        (visit, KV, Reds, AccIn) -> Fun(KV, Reds, AccIn);
-        (traverse, _K, _Red, AccIn) -> {ok, AccIn}
-    end;
-convert_fun_arity(Fun) when is_function(Fun, 4) ->
-    Fun.    % Already arity 4
-
-make_key_in_end_range_function(Bt, fwd, Options) ->
-    case couch_util:get_value(end_key_gt, Options) of
-    undefined ->
-        case couch_util:get_value(end_key, Options) of
-        undefined ->
-            fun(_Key) -> true end;
-        LastKey ->
-            fun(Key) -> not less(Bt, LastKey, Key) end
-        end;
-    EndKey ->
-        fun(Key) -> less(Bt, Key, EndKey) end
-    end;
-make_key_in_end_range_function(Bt, rev, Options) ->
-    case couch_util:get_value(end_key_gt, Options) of
-    undefined ->
-        case couch_util:get_value(end_key, Options) of
-        undefined ->
-            fun(_Key) -> true end;
-        LastKey ->
-            fun(Key) -> not less(Bt, Key, LastKey) end
-        end;
-    EndKey ->
-        fun(Key) -> less(Bt, EndKey, Key) end
-    end.
-
-
-foldl(Bt, Fun, Acc) ->
-    fold(Bt, Fun, Acc, []).
-
-foldl(Bt, Fun, Acc, Options) ->
-    fold(Bt, Fun, Acc, Options).
-
-
-fold(#btree{root=nil}, _Fun, Acc, _Options) ->
-    {ok, {[], []}, Acc};
-fold(#btree{root=Root}=Bt, Fun, Acc, Options) ->
-    Dir = couch_util:get_value(dir, Options, fwd),
-    InRange = make_key_in_end_range_function(Bt, Dir, Options),
-    Result =
-    case couch_util:get_value(start_key, Options) of
-    undefined ->
-        stream_node(Bt, [], Bt#btree.root, InRange, Dir,
-                convert_fun_arity(Fun), Acc);
-    StartKey ->
-        stream_node(Bt, [], Bt#btree.root, StartKey, InRange, Dir,
-                convert_fun_arity(Fun), Acc)
-    end,
-    case Result of
-    {ok, Acc2}->
-        FullReduction = element(2, Root),
-        {ok, {[], [FullReduction]}, Acc2};
-    {stop, LastReduction, Acc2} ->
-        {ok, LastReduction, Acc2}
-    end.
-
-add(Bt, InsertKeyValues) ->
-    add_remove(Bt, InsertKeyValues, []).
-
-add_remove(Bt, InsertKeyValues, RemoveKeys) ->
-    {ok, [], Bt2} = query_modify(Bt, [], InsertKeyValues, RemoveKeys),
-    {ok, Bt2}.
-
-query_modify(Bt, LookupKeys, InsertValues, RemoveKeys) ->
-    #btree{root=Root} = Bt,
-    InsertActions = lists:map(
-        fun(KeyValue) ->
-            {Key, Value} = extract(Bt, KeyValue),
-            {insert, Key, Value}
-        end, InsertValues),
-    RemoveActions = [{remove, Key, nil} || Key <- RemoveKeys],
-    FetchActions = [{fetch, Key, nil} || Key <- LookupKeys],
-    SortFun =
-        fun({OpA, A, _}, {OpB, B, _}) ->
-            case A == B of
-            % A and B are equal, sort by op.
-            true -> op_order(OpA) < op_order(OpB);
-            false ->
-                less(Bt, A, B)
-            end
-        end,
-    Actions = lists:sort(SortFun, lists:append([InsertActions, RemoveActions, FetchActions])),
-    {ok, KeyPointers, QueryResults} = modify_node(Bt, Root, Actions, []),
-    {ok, NewRoot} = complete_root(Bt, KeyPointers),
-    {ok, QueryResults, Bt#btree{root=NewRoot}}.
-
-% for ordering different operations with the same key.
-% fetch < remove < insert
-op_order(fetch) -> 1;
-op_order(remove) -> 2;
-op_order(insert) -> 3.
-
-lookup(#btree{root=Root, less=Less}=Bt, Keys) ->
-    SortedKeys = case Less of
-        undefined -> lists:sort(Keys);
-        _ -> lists:sort(Less, Keys)
-    end,
-    {ok, SortedResults} = lookup(Bt, Root, SortedKeys),
-    % We want to return the results in the same order as the keys were input
-    % but we may have changed the order when we sorted. So we need to put the
-    % order back into the results.
-    couch_util:reorder_results(Keys, SortedResults).
-
-lookup(_Bt, nil, Keys) ->
-    {ok, [{Key, not_found} || Key <- Keys]};
-lookup(Bt, Node, Keys) ->
-    Pointer = element(1, Node),
-    {NodeType, NodeList} = get_node(Bt, Pointer),
-    case NodeType of
-    kp_node ->
-        lookup_kpnode(Bt, list_to_tuple(NodeList), 1, Keys, []);
-    kv_node ->
-        lookup_kvnode(Bt, list_to_tuple(NodeList), 1, Keys, [])
-    end.
-
-lookup_kpnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
-    {ok, lists:reverse(Output)};
-lookup_kpnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound ->
-    {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
-lookup_kpnode(Bt, NodeTuple, LowerBound, [FirstLookupKey | _] = LookupKeys, Output) ->
-    N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), FirstLookupKey),
-    {Key, PointerInfo} = element(N, NodeTuple),
-    SplitFun = fun(LookupKey) -> not less(Bt, Key, LookupKey) end,
-    case lists:splitwith(SplitFun, LookupKeys) of
-    {[], GreaterQueries} ->
-        lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, Output);
-    {LessEqQueries, GreaterQueries} ->
-        {ok, Results} = lookup(Bt, PointerInfo, LessEqQueries),
-        lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, lists:reverse(Results, Output))
-    end.
-
-
-lookup_kvnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
-    {ok, lists:reverse(Output)};
-lookup_kvnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound ->
-    % keys not found
-    {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
-lookup_kvnode(Bt, NodeTuple, LowerBound, [LookupKey | RestLookupKeys], Output) ->
-    N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), LookupKey),
-    {Key, Value} = element(N, NodeTuple),
-    case less(Bt, LookupKey, Key) of
-    true ->
-        % LookupKey is less than Key
-        lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, not_found} | Output]);
-    false ->
-        case less(Bt, Key, LookupKey) of
-        true ->
-            % LookupKey is greater than Key
-            lookup_kvnode(Bt, NodeTuple, N+1, RestLookupKeys, [{LookupKey, not_found} | Output]);
-        false ->
-            % LookupKey is equal to Key
-            lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, {ok, assemble(Bt, LookupKey, Value)}} | Output])
-        end
-    end.
-
-
-complete_root(_Bt, []) ->
-    {ok, nil};
-complete_root(_Bt, [{_Key, PointerInfo}])->
-    {ok, PointerInfo};
-complete_root(Bt, KPs) ->
-    {ok, ResultKeyPointers} = write_node(Bt, kp_node, KPs),
-    complete_root(Bt, ResultKeyPointers).
-
-%%%%%%%%%%%%% The chunkify function sucks! %%%%%%%%%%%%%
-% It is inaccurate as it does not account for compression when blocks are
-% written. Plus with the "case byte_size(term_to_binary(InList)) of" code
-% it's probably really inefficient.
-
-chunkify(InList) ->
-    BaseChunkSize = get_chunk_size(),
-    case ?term_size(InList) of
-    Size when Size > BaseChunkSize ->
-        NumberOfChunksLikely = ((Size div BaseChunkSize) + 1),
-        ChunkThreshold = Size div NumberOfChunksLikely,
-        chunkify(InList, ChunkThreshold, [], 0, []);
-    _Else ->
-        [InList]
-    end.
-
-chunkify([], _ChunkThreshold, [], 0, OutputChunks) ->
-    lists:reverse(OutputChunks);
-chunkify([], _ChunkThreshold, [Item], _OutListSize, [PrevChunk | RestChunks]) ->
-    NewPrevChunk = PrevChunk ++ [Item],
-    lists:reverse(RestChunks, [NewPrevChunk]);
-chunkify([], _ChunkThreshold, OutList, _OutListSize, OutputChunks) ->
-    lists:reverse([lists:reverse(OutList) | OutputChunks]);
-chunkify([InElement | RestInList], ChunkThreshold, OutList, OutListSize, OutputChunks) ->
-    case ?term_size(InElement) of
-    Size when (Size + OutListSize) > ChunkThreshold andalso OutList /= [] ->
-        chunkify(RestInList, ChunkThreshold, [], 0, [lists:reverse([InElement | OutList]) | OutputChunks]);
-    Size ->
-        chunkify(RestInList, ChunkThreshold, [InElement | OutList], OutListSize + Size, OutputChunks)
-    end.
-
--compile({inline,[get_chunk_size/0]}).
-get_chunk_size() ->
-    try
-        list_to_integer(config:get("couchdb", "btree_chunk_size", "1279"))
-    catch error:badarg ->
-        1279
-    end.
-
-modify_node(Bt, RootPointerInfo, Actions, QueryOutput) ->
-    {NodeType, NodeList} = case RootPointerInfo of
-    nil ->
-        {kv_node, []};
-    _Tuple ->
-        Pointer = element(1, RootPointerInfo),
-        get_node(Bt, Pointer)
-    end,
-    NodeTuple = list_to_tuple(NodeList),
-
-    {ok, NewNodeList, QueryOutput2} =
-    case NodeType of
-    kp_node -> modify_kpnode(Bt, NodeTuple, 1, Actions, [], QueryOutput);
-    kv_node -> modify_kvnode(Bt, NodeTuple, 1, Actions, [], QueryOutput)
-    end,
-    case NewNodeList of
-    [] ->  % no nodes remain
-        {ok, [], QueryOutput2};
-    NodeList ->  % nothing changed
-        {LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple),
-        {ok, [{LastKey, RootPointerInfo}], QueryOutput2};
-    _Else2 ->
-        {ok, ResultList} = case RootPointerInfo of
-        nil ->
-            write_node(Bt, NodeType, NewNodeList);
-        _ ->
-            {LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple),
-            OldNode = {LastKey, RootPointerInfo},
-            write_node(Bt, OldNode, NodeType, NodeList, NewNodeList)
-        end,
-        {ok, ResultList, QueryOutput2}
-    end.
-
-reduce_node(#btree{reduce=nil}, _NodeType, _NodeList) ->
-    [];
-reduce_node(#btree{reduce=R}, kp_node, NodeList) ->
-    R(rereduce, [element(2, Node) || {_K, Node} <- NodeList]);
-reduce_node(#btree{reduce=R}=Bt, kv_node, NodeList) ->
-    R(reduce, [assemble(Bt, K, V) || {K, V} <- NodeList]).
-
-reduce_tree_size(kv_node, NodeSize, _KvList) ->
-    NodeSize;
-reduce_tree_size(kp_node, NodeSize, []) ->
-    NodeSize;
-reduce_tree_size(kp_node, _NodeSize, [{_K, {_P, _Red}} | _]) ->
-    % pre 1.2 format
-    nil;
-reduce_tree_size(kp_node, _NodeSize, [{_K, {_P, _Red, nil}} | _]) ->
-    nil;
-reduce_tree_size(kp_node, NodeSize, [{_K, {_P, _Red, Sz}} | NodeList]) ->
-    reduce_tree_size(kp_node, NodeSize + Sz, NodeList).
-
-get_node(#btree{fd = Fd}, NodePos) ->
-    {ok, {NodeType, NodeList}} = couch_file:pread_term(Fd, NodePos),
-    {NodeType, NodeList}.
-
-write_node(#btree{fd = Fd, compression = Comp} = Bt, NodeType, NodeList) ->
-    % split up nodes into smaller sizes
-    NodeListList = chunkify(NodeList),
-    % now write out each chunk and return the KeyPointer pairs for those nodes
-    ResultList = [
-        begin
-            {ok, Pointer, Size} = couch_file:append_term(
-                Fd, {NodeType, ANodeList}, [{compression, Comp}]),
-            {LastKey, _} = lists:last(ANodeList),
-            SubTreeSize = reduce_tree_size(NodeType, Size, ANodeList),
-            {LastKey, {Pointer, reduce_node(Bt, NodeType, ANodeList), SubTreeSize}}
-        end
-    ||
-        ANodeList <- NodeListList
-    ],
-    {ok, ResultList}.
-
-
-write_node(Bt, _OldNode, NodeType, [], NewList) ->
-    write_node(Bt, NodeType, NewList);
-write_node(Bt, _OldNode, NodeType, [_], NewList) ->
-    write_node(Bt, NodeType, NewList);
-write_node(Bt, OldNode, NodeType, OldList, NewList) ->
-    case can_reuse_old_node(OldList, NewList) of
-        {true, Prefix, Suffix} ->
-            {ok, PrefixKVs} = case Prefix of
-                [] -> {ok, []};
-                _ -> write_node(Bt, NodeType, Prefix)
-            end,
-            {ok, SuffixKVs} = case Suffix of
-                [] -> {ok, []};
-                _ -> write_node(Bt, NodeType, Suffix)
-            end,
-            Result = PrefixKVs ++ [OldNode] ++ SuffixKVs,
-            {ok, Result};
-        false ->
-            write_node(Bt, NodeType, NewList)
-    end.
-
-can_reuse_old_node(OldList, NewList) ->
-    {Prefix, RestNewList} = remove_prefix_kvs(hd(OldList), NewList),
-    case old_list_is_prefix(OldList, RestNewList, 0) of
-        {true, Size, Suffix} ->
-            ReuseThreshold = get_chunk_size() * ?FILL_RATIO,
-            if Size < ReuseThreshold -> false; true ->
-                {true, Prefix, Suffix}
-            end;
-        false ->
-            false
-    end.
-
-remove_prefix_kvs(KV1, [KV2 | Rest]) when KV2 < KV1 ->
-    {Prefix, RestNewList} = remove_prefix_kvs(KV1, Rest),
-    {[KV2 | Prefix], RestNewList};
-remove_prefix_kvs(_, RestNewList) ->
-    {[], RestNewList}.
-
-% No more KV's in the old node so its a prefix
-old_list_is_prefix([], Suffix, Size) ->
-    {true, Size, Suffix};
-% Some KV's have been removed from the old node
-old_list_is_prefix(_OldList, [], _Size) ->
-    false;
-% KV is equal in both old and new node so continue
-old_list_is_prefix([KV | Rest1], [KV | Rest2], Acc) ->
-    old_list_is_prefix(Rest1, Rest2, ?term_size(KV) + Acc);
-% KV mismatch between old and new node so not a prefix
-old_list_is_prefix(_OldList, _NewList, _Acc) ->
-    false.
-
-modify_kpnode(Bt, {}, _LowerBound, Actions, [], QueryOutput) ->
-    modify_node(Bt, nil, Actions, QueryOutput);
-modify_kpnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
-    {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound,
-            tuple_size(NodeTuple), [])), QueryOutput};
-modify_kpnode(Bt, NodeTuple, LowerBound,
-        [{_, FirstActionKey, _}|_]=Actions, ResultNode, QueryOutput) ->
-    Sz = tuple_size(NodeTuple),
-    N = find_first_gteq(Bt, NodeTuple, LowerBound, Sz, FirstActionKey),
-    case N =:= Sz of
-    true  ->
-        % perform remaining actions on last node
-        {_, PointerInfo} = element(Sz, NodeTuple),
-        {ok, ChildKPs, QueryOutput2} =
-            modify_node(Bt, PointerInfo, Actions, QueryOutput),
-        NodeList = lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound,
-            Sz - 1, ChildKPs)),
-        {ok, NodeList, QueryOutput2};
-    false ->
-        {NodeKey, PointerInfo} = element(N, NodeTuple),
-        SplitFun = fun({_ActionType, ActionKey, _ActionValue}) ->
-                not less(Bt, NodeKey, ActionKey)
-            end,
-        {LessEqQueries, GreaterQueries} = lists:splitwith(SplitFun, Actions),
-        {ok, ChildKPs, QueryOutput2} =
-                modify_node(Bt, PointerInfo, LessEqQueries, QueryOutput),
-        ResultNode2 = lists:reverse(ChildKPs, bounded_tuple_to_revlist(NodeTuple,
-                LowerBound, N - 1, ResultNode)),
-        modify_kpnode(Bt, NodeTuple, N+1, GreaterQueries, ResultNode2, QueryOutput2)
-    end.
-
-bounded_tuple_to_revlist(_Tuple, Start, End, Tail) when Start > End ->
-    Tail;
-bounded_tuple_to_revlist(Tuple, Start, End, Tail) ->
-    bounded_tuple_to_revlist(Tuple, Start+1, End, [element(Start, Tuple)|Tail]).
-
-bounded_tuple_to_list(Tuple, Start, End, Tail) ->
-    bounded_tuple_to_list2(Tuple, Start, End, [], Tail).
-
-bounded_tuple_to_list2(_Tuple, Start, End, Acc, Tail) when Start > End ->
-    lists:reverse(Acc, Tail);
-bounded_tuple_to_list2(Tuple, Start, End, Acc, Tail) ->
-    bounded_tuple_to_list2(Tuple, Start + 1, End, [element(Start, Tuple) | Acc], Tail).
-
-find_first_gteq(_Bt, _Tuple, Start, End, _Key) when Start == End ->
-    End;
-find_first_gteq(Bt, Tuple, Start, End, Key) ->
-    Mid = Start + ((End - Start) div 2),
-    {TupleKey, _} = element(Mid, Tuple),
-    case less(Bt, TupleKey, Key) of
-    true ->
-        find_first_gteq(Bt, Tuple, Mid+1, End, Key);
-    false ->
-        find_first_gteq(Bt, Tuple, Start, Mid, Key)
-    end.
-
-modify_kvnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
-    {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound, tuple_size(NodeTuple), [])), QueryOutput};
-modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], ResultNode, QueryOutput) when LowerBound > tuple_size(NodeTuple) ->
-    case ActionType of
-    insert ->
-        modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
-    remove ->
-        % just drop the action
-        modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, QueryOutput);
-    fetch ->
-        % the key/value must not exist in the tree
-        modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
-    end;
-modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], AccNode, QueryOutput) ->
-    N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), ActionKey),
-    {Key, Value} = element(N, NodeTuple),
-    ResultNode =  bounded_tuple_to_revlist(NodeTuple, LowerBound, N - 1, AccNode),
-    case less(Bt, ActionKey, Key) of
-    true ->
-        case ActionType of
-        insert ->
-            % ActionKey is less than the Key, so insert
-            modify_kvnode(Bt, NodeTuple, N, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
-        remove ->
-            % ActionKey is less than the Key, just drop the action
-            modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, QueryOutput);
-        fetch ->
-            % ActionKey is less than the Key, the key/value must not exist in the tree
-            modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
-        end;
-    false ->
-        % ActionKey and Key are maybe equal.
-        case less(Bt, Key, ActionKey) of
-        false ->
-            case ActionType of
-            insert ->
-                modify_kvnode(Bt, NodeTuple, N+1, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
-            remove ->
-                modify_kvnode(Bt, NodeTuple, N+1, RestActions, ResultNode, QueryOutput);
-            fetch ->
-                % ActionKey is equal to the Key, insert into the QueryOuput, but re-process the node
-                % since an identical action key can follow it.
-                modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{ok, assemble(Bt, Key, Value)} | QueryOutput])
-            end;
-        true ->
-            modify_kvnode(Bt, NodeTuple, N + 1, [{ActionType, ActionKey, ActionValue} | RestActions], [{Key, Value} | ResultNode], QueryOutput)
-        end
-    end.
-
-
-reduce_stream_node(_Bt, _Dir, nil, _KeyStart, _InEndRangeFun, GroupedKey, GroupedKVsAcc,
-        GroupedRedsAcc, _KeyGroupFun, _Fun, Acc) ->
-    {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
-reduce_stream_node(Bt, Dir, Node, KeyStart, InEndRangeFun, GroupedKey, GroupedKVsAcc,
-        GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
-    P = element(1, Node),
-    case get_node(Bt, P) of
-    {kp_node, NodeList} ->
-        NodeList2 = adjust_dir(Dir, NodeList),
-        reduce_stream_kp_node(Bt, Dir, NodeList2, KeyStart, InEndRangeFun, GroupedKey,
-                GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc);
-    {kv_node, KVs} ->
-        KVs2 = adjust_dir(Dir, KVs),
-        reduce_stream_kv_node(Bt, Dir, KVs2, KeyStart, InEndRangeFun, GroupedKey,
-                GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc)
-    end.
-
-reduce_stream_kv_node(Bt, Dir, KVs, KeyStart, InEndRangeFun,
-                        GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
-                        KeyGroupFun, Fun, Acc) ->
-
-    GTEKeyStartKVs =
-    case KeyStart of
-    undefined ->
-        KVs;
-    _ ->
-        DropFun = case Dir of
-        fwd ->
-            fun({Key, _}) -> less(Bt, Key, KeyStart) end;
-        rev ->
-            fun({Key, _}) -> less(Bt, KeyStart, Key) end
-        end,
-        lists:dropwhile(DropFun, KVs)
-    end,
-    KVs2 = lists:takewhile(
-        fun({Key, _}) -> InEndRangeFun(Key) end, GTEKeyStartKVs),
-    reduce_stream_kv_node2(Bt, KVs2, GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
-                        KeyGroupFun, Fun, Acc).
-
-
-reduce_stream_kv_node2(_Bt, [], GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
-        _KeyGroupFun, _Fun, Acc) ->
-    {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
-reduce_stream_kv_node2(Bt, [{Key, Value}| RestKVs], GroupedKey, GroupedKVsAcc,
-        GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
-    case GroupedKey of
-    undefined ->
-        reduce_stream_kv_node2(Bt, RestKVs, Key,
-                [assemble(Bt,Key,Value)], [], KeyGroupFun, Fun, Acc);
-    _ ->
-
-        case KeyGroupFun(GroupedKey, Key) of
-        true ->
-            reduce_stream_kv_node2(Bt, RestKVs, GroupedKey,
-                [assemble(Bt,Key,Value)|GroupedKVsAcc], GroupedRedsAcc, KeyGroupFun,
-                Fun, Acc);
-        false ->
-            case Fun(GroupedKey, {GroupedKVsAcc, GroupedRedsAcc}, Acc) of
-            {ok, Acc2} ->
-                reduce_stream_kv_node2(Bt, RestKVs, Key, [assemble(Bt,Key,Value)],
-                    [], KeyGroupFun, Fun, Acc2);
-            {stop, Acc2} ->
-                throw({stop, Acc2})
-            end
-        end
-    end.
-
-reduce_stream_kp_node(Bt, Dir, NodeList, KeyStart, InEndRangeFun,
-                        GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
-                        KeyGroupFun, Fun, Acc) ->
-    Nodes =
-    case KeyStart of
-    undefined ->
-        NodeList;
-    _ ->
-        case Dir of
-        fwd ->
-            lists:dropwhile(fun({Key, _}) -> less(Bt, Key, KeyStart) end, NodeList);
-        rev ->
-            RevKPs = lists:reverse(NodeList),
-            case lists:splitwith(fun({Key, _}) -> less(Bt, Key, KeyStart) end, RevKPs) of
-            {_Before, []} ->
-                NodeList;
-            {Before, [FirstAfter | _]} ->
-                [FirstAfter | lists:reverse(Before)]
-            end
-        end
-    end,
-    {InRange, MaybeInRange} = lists:splitwith(
-        fun({Key, _}) -> InEndRangeFun(Key) end, Nodes),
-    NodesInRange = case MaybeInRange of
-    [FirstMaybeInRange | _] when Dir =:= fwd ->
-        InRange ++ [FirstMaybeInRange];
-    _ ->
-        InRange
-    end,
-    reduce_stream_kp_node2(Bt, Dir, NodesInRange, KeyStart, InEndRangeFun,
-        GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc).
-
-
-reduce_stream_kp_node2(Bt, Dir, [{_Key, NodeInfo} | RestNodeList], KeyStart, InEndRangeFun,
-                        undefined, [], [], KeyGroupFun, Fun, Acc) ->
-    {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
-            reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, InEndRangeFun, undefined,
-                [], [], KeyGroupFun, Fun, Acc),
-    reduce_stream_kp_node2(Bt, Dir, RestNodeList, KeyStart, InEndRangeFun, GroupedKey2,
-            GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2);
-reduce_stream_kp_node2(Bt, Dir, NodeList, KeyStart, InEndRangeFun,
-        GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
-    {Grouped0, Ungrouped0} = lists:splitwith(fun({Key,_}) ->
-        KeyGroupFun(GroupedKey, Key) end, NodeList),
-    {GroupedNodes, UngroupedNodes} =
-    case Grouped0 of
-    [] ->
-        {Grouped0, Ungrouped0};
-    _ ->
-        [FirstGrouped | RestGrouped] = lists:reverse(Grouped0),
-        {RestGrouped, [FirstGrouped | Ungrouped0]}
-    end,
-    GroupedReds = [element(2, Node) || {_, Node} <- GroupedNodes],
-    case UngroupedNodes of
-    [{_Key, NodeInfo}|RestNodes] ->
-        {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
-            reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, InEndRangeFun, GroupedKey,
-                GroupedKVsAcc, GroupedReds ++ GroupedRedsAcc, KeyGroupFun, Fun, Acc),
-        reduce_stream_kp_node2(Bt, Dir, RestNodes, KeyStart, InEndRangeFun, GroupedKey2,
-                GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2);
-    [] ->
-        {ok, Acc, GroupedReds ++ GroupedRedsAcc, GroupedKVsAcc, GroupedKey}
-    end.
-
-adjust_dir(fwd, List) ->
-    List;
-adjust_dir(rev, List) ->
-    lists:reverse(List).
-
-stream_node(Bt, Reds, Node, StartKey, InRange, Dir, Fun, Acc) ->
-    Pointer = element(1, Node),
-    {NodeType, NodeList} = get_node(Bt, Pointer),
-    case NodeType of
-    kp_node ->
-        stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc);
-    kv_node ->
-        stream_kv_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc)
-    end.
-
-stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc) ->
-    Pointer = element(1, Node),
-    {NodeType, NodeList} = get_node(Bt, Pointer),
-    case NodeType of
-    kp_node ->
-        stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc);
-    kv_node ->
-        stream_kv_node2(Bt, Reds, [], adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc)
-    end.
-
-stream_kp_node(_Bt, _Reds, [], _InRange, _Dir, _Fun, Acc) ->
-    {ok, Acc};
-stream_kp_node(Bt, Reds, [{Key, Node} | Rest], InRange, Dir, Fun, Acc) ->
-    Red = element(2, Node),
-    case Fun(traverse, Key, Red, Acc) of
-    {ok, Acc2} ->
-        case stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc2) of
-        {ok, Acc3} ->
-            stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc3);
-        {stop, LastReds, Acc3} ->
-            {stop, LastReds, Acc3}
-        end;
-    {skip, Acc2} ->
-        stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc2);
-    {stop, Acc2} ->
-        {stop, Reds, Acc2}
-    end.
-
-drop_nodes(_Bt, Reds, _StartKey, []) ->
-    {Reds, []};
-drop_nodes(Bt, Reds, StartKey, [{NodeKey, Node} | RestKPs]) ->
-    case less(Bt, NodeKey, StartKey) of
-    true ->
-        drop_nodes(Bt, [element(2, Node) | Reds], StartKey, RestKPs);
-    false ->
-        {Reds, [{NodeKey, Node} | RestKPs]}
-    end.
-
-stream_kp_node(Bt, Reds, KPs, StartKey, InRange, Dir, Fun, Acc) ->
-    {NewReds, NodesToStream} =
-    case Dir of
-    fwd ->
-        % drop all nodes sorting before the key
-        drop_nodes(Bt, Reds, StartKey, KPs);
-    rev ->
-        % keep all nodes sorting before the key, AND the first node to sort after
-        RevKPs = lists:reverse(KPs),
-         case lists:splitwith(fun({Key, _Pointer}) -> less(Bt, Key, StartKey) end, RevKPs) of
-        {_RevsBefore, []} ->
-            % everything sorts before it
-            {Reds, KPs};
-        {RevBefore, [FirstAfter | Drop]} ->
-            {[element(2, Node) || {_K, Node} <- Drop] ++ Reds,
-                 [FirstAfter | lists:reverse(RevBefore)]}
-        end
-    end,
-    case NodesToStream of
-    [] ->
-        {ok, Acc};
-    [{_Key, Node} | Rest] ->
-        case stream_node(Bt, NewReds, Node, StartKey, InRange, Dir, Fun, Acc) of
-        {ok, Acc2} ->
-            Red = element(2, Node),
-            stream_kp_node(Bt, [Red | NewReds], Rest, InRange, Dir, Fun, Acc2);
-        {stop, LastReds, Acc2} ->
-            {stop, LastReds, Acc2}
-        end
-    end.
-
-stream_kv_node(Bt, Reds, KVs, StartKey, InRange, Dir, Fun, Acc) ->
-    DropFun =
-    case Dir of
-    fwd ->
-        fun({Key, _}) -> less(Bt, Key, StartKey) end;
-    rev ->
-        fun({Key, _}) -> less(Bt, StartKey, Key) end
-    end,
-    {LTKVs, GTEKVs} = lists:splitwith(DropFun, KVs),
-    AssembleLTKVs = [assemble(Bt,K,V) || {K,V} <- LTKVs],
-    stream_kv_node2(Bt, Reds, AssembleLTKVs, GTEKVs, InRange, Dir, Fun, Acc).
-
-stream_kv_node2(_Bt, _Reds, _PrevKVs, [], _InRange, _Dir, _Fun, Acc) ->
-    {ok, Acc};
-stream_kv_node2(Bt, Reds, PrevKVs, [{K,V} | RestKVs], InRange, Dir, Fun, Acc) ->
-    case InRange(K) of
-    false ->
-        {stop, {PrevKVs, Reds}, Acc};
-    true ->
-        AssembledKV = assemble(Bt, K, V),
-        case Fun(visit, AssembledKV, {PrevKVs, Reds}, Acc) of
-        {ok, Acc2} ->
-            stream_kv_node2(Bt, Reds, [AssembledKV | PrevKVs], RestKVs, InRange, Dir, Fun, Acc2);
-        {stop, Acc2} ->
-            {stop, {PrevKVs, Reds}, Acc2}
-        end
-    end.
diff --git a/src/couch/src/couch_changes.erl b/src/couch/src/couch_changes.erl
deleted file mode 100644
index 6e9294a..0000000
--- a/src/couch/src/couch_changes.erl
+++ /dev/null
@@ -1,724 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_changes).
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--export([
-    handle_db_changes/3,
-    get_changes_timeout/2,
-    wait_updated/3,
-    get_rest_updated/1,
-    configure_filter/4,
-    filter/3,
-    handle_db_event/3,
-    handle_view_event/3,
-    send_changes_doc_ids/6,
-    send_changes_design_docs/6
-]).
-
--export([changes_enumerator/2]).
-
-%% export so we can use fully qualified call to facilitate hot-code upgrade
--export([
-    keep_sending_changes/3
-]).
-
--record(changes_acc, {
-    db,
-    seq,
-    prepend,
-    filter,
-    callback,
-    user_acc,
-    resp_type,
-    limit,
-    include_docs,
-    doc_options,
-    conflicts,
-    timeout,
-    timeout_fun,
-    aggregation_kvs,
-    aggregation_results
-}).
-
-handle_db_changes(Args0, Req, Db0) ->
-    #changes_args{
-        style = Style,
-        filter = FilterName,
-        feed = Feed,
-        dir = Dir,
-        since = Since
-    } = Args0,
-    Filter = configure_filter(FilterName, Style, Req, Db0),
-    Args = Args0#changes_args{filter_fun = Filter},
-    DbName = couch_db:name(Db0),
-    StartListenerFun = fun() ->
-        couch_event:link_listener(
-            ?MODULE, handle_db_event, self(), [{dbname, DbName}]
-        )
-    end,
-    Start = fun() ->
-        {ok, Db} = couch_db:reopen(Db0),
-        StartSeq = case Dir of
-        rev ->
-            couch_db:get_update_seq(Db);
-        fwd ->
-            Since
-        end,
-        {Db, StartSeq}
-    end,
-    % begin timer to deal with heartbeat when filter function fails
-    case Args#changes_args.heartbeat of
-    undefined ->
-        erlang:erase(last_changes_heartbeat);
-    Val when is_integer(Val); Val =:= true ->
-        put(last_changes_heartbeat, os:timestamp())
-    end,
-
-    case lists:member(Feed, ["continuous", "longpoll", "eventsource"]) of
-    true ->
-        fun(CallbackAcc) ->
-            {Callback, UserAcc} = get_callback_acc(CallbackAcc),
-            {ok, Listener} = StartListenerFun(),
-
-            {Db, StartSeq} = Start(),
-            UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
-            {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
-            Acc0 = build_acc(Args, Callback, UserAcc2, Db, StartSeq,
-                             <<"">>, Timeout, TimeoutFun),
-            try
-                keep_sending_changes(
-                    Args#changes_args{dir=fwd},
-                    Acc0,
-                    true)
-            after
-                couch_event:stop_listener(Listener),
-                get_rest_updated(ok) % clean out any remaining update messages
-            end
-        end;
-    false ->
-        fun(CallbackAcc) ->
-            {Callback, UserAcc} = get_callback_acc(CallbackAcc),
-            UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
-            {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
-            {Db, StartSeq} = Start(),
-            Acc0 = build_acc(Args#changes_args{feed="normal"}, Callback,
-                             UserAcc2, Db, StartSeq, <<>>,
-                             Timeout, TimeoutFun),
-            {ok, #changes_acc{seq = LastSeq, user_acc = UserAcc3}} =
-                send_changes(
-                    Acc0,
-                    Dir,
-                    true),
-            end_sending_changes(Callback, UserAcc3, LastSeq, Feed)
-        end
-    end.
-
-
-handle_db_event(_DbName, updated, Parent) ->
-    Parent ! updated,
-    {ok, Parent};
-handle_db_event(_DbName, deleted, Parent) ->
-    Parent ! deleted,
-    {ok, Parent};
-handle_db_event(_DbName, _Event, Parent) ->
-    {ok, Parent}.
-
-
-handle_view_event(_DbName, Msg, {Parent, DDocId}) ->
-    case Msg of
-        {index_commit, DDocId} ->
-            Parent ! updated;
-        {index_delete, DDocId} ->
-            Parent ! deleted;
-        _ ->
-            ok
-    end,
-    {ok, {Parent, DDocId}}.
-
-get_callback_acc({Callback, _UserAcc} = Pair) when is_function(Callback, 3) ->
-    Pair;
-get_callback_acc(Callback) when is_function(Callback, 2) ->
-    {fun(Ev, Data, _) -> Callback(Ev, Data) end, ok}.
-
-
-configure_filter("_doc_ids", Style, Req, _Db) ->
-    {doc_ids, Style, get_doc_ids(Req)};
-configure_filter("_selector", Style, Req, _Db) ->
-    {selector, Style,  get_selector_and_fields(Req)};
-configure_filter("_design", Style, _Req, _Db) ->
-    {design_docs, Style};
-configure_filter("_view", Style, Req, Db) ->
-    ViewName = get_view_qs(Req),
-    if ViewName /= "" -> ok; true ->
-        throw({bad_request, "`view` filter parameter is not provided."})
-    end,
-    ViewNameParts = string:tokens(ViewName, "/"),
-    case [?l2b(couch_httpd:unquote(Part)) || Part <- ViewNameParts] of
-        [DName, VName] ->
-            {ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
-            check_member_exists(DDoc, [<<"views">>, VName]),
-            case couch_db:is_clustered(Db) of
-                true ->
-                    DIR = fabric_util:doc_id_and_rev(DDoc),
-                    {fetch, view, Style, DIR, VName};
-                false ->
-                    {view, Style, DDoc, VName}
-            end;
-        [] ->
-            Msg = "`view` must be of the form `designname/viewname`",
-            throw({bad_request, Msg})
-    end;
-configure_filter([$_ | _], _Style, _Req, _Db) ->
-    throw({bad_request, "unknown builtin filter name"});
-configure_filter("", main_only, _Req, _Db) ->
-    {default, main_only};
-configure_filter("", all_docs, _Req, _Db) ->
-    {default, all_docs};
-configure_filter(FilterName, Style, Req, Db) ->
-    FilterNameParts = string:tokens(FilterName, "/"),
-    case [?l2b(couch_httpd:unquote(Part)) || Part <- FilterNameParts] of
-        [DName, FName] ->
-            {ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
-            check_member_exists(DDoc, [<<"filters">>, FName]),
-            case couch_db:is_clustered(Db) of
-                true ->
-                    DIR = fabric_util:doc_id_and_rev(DDoc),
-                    {fetch, custom, Style, Req, DIR, FName};
-                false->
-                    {custom, Style, Req, DDoc, FName}
-            end;
-
-        [] ->
-            {default, Style};
-        _Else ->
-            Msg = "`filter` must be of the form `designname/filtername`",
-            throw({bad_request, Msg})
-    end.
-
-
-filter(Db, #full_doc_info{}=FDI, Filter) ->
-    filter(Db, couch_doc:to_doc_info(FDI), Filter);
-filter(_Db, DocInfo, {default, Style}) ->
-    apply_style(DocInfo, Style);
-filter(_Db, DocInfo, {doc_ids, Style, DocIds}) ->
-    case lists:member(DocInfo#doc_info.id, DocIds) of
-        true ->
-            apply_style(DocInfo, Style);
-        false ->
-            []
-    end;
-filter(Db, DocInfo, {selector, Style, {Selector, _Fields}}) ->
-    Docs = open_revs(Db, DocInfo, Style),
-    Passes = [mango_selector:match(Selector, couch_doc:to_json_obj(Doc, []))
-        || Doc <- Docs],
-    filter_revs(Passes, Docs);
-filter(_Db, DocInfo, {design_docs, Style}) ->
-    case DocInfo#doc_info.id of
-        <<"_design", _/binary>> ->
-            apply_style(DocInfo, Style);
-        _ ->
-            []
-    end;
-filter(Db, DocInfo, {view, Style, DDoc, VName}) ->
-    Docs = open_revs(Db, DocInfo, Style),
-    {ok, Passes} = couch_query_servers:filter_view(DDoc, VName, Docs),
-    filter_revs(Passes, Docs);
-filter(Db, DocInfo, {custom, Style, Req0, DDoc, FName}) ->
-    Req = case Req0 of
-        {json_req, _} -> Req0;
-        #httpd{} -> {json_req, couch_httpd_external:json_req_obj(Req0, Db)}
-    end,
-    Docs = open_revs(Db, DocInfo, Style),
-    {ok, Passes} = couch_query_servers:filter_docs(Req, Db, DDoc, FName, Docs),
-    filter_revs(Passes, Docs).
-
-
-get_view_qs({json_req, {Props}}) ->
-    {Query} = couch_util:get_value(<<"query">>, Props, {[]}),
-    binary_to_list(couch_util:get_value(<<"view">>, Query, ""));
-get_view_qs(Req) ->
-    couch_httpd:qs_value(Req, "view", "").
-
-get_doc_ids({json_req, {Props}}) ->
-    check_docids(couch_util:get_value(<<"doc_ids">>, Props));
-get_doc_ids(#httpd{method='POST'}=Req) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    {Props} = couch_httpd:json_body_obj(Req),
-    check_docids(couch_util:get_value(<<"doc_ids">>, Props));
-get_doc_ids(#httpd{method='GET'}=Req) ->
-    DocIds = ?JSON_DECODE(couch_httpd:qs_value(Req, "doc_ids", "null")),
-    check_docids(DocIds);
-get_doc_ids(_) ->
-    throw({bad_request, no_doc_ids_provided}).
-
-
-get_selector_and_fields({json_req, {Props}}) ->
-    Selector = check_selector(couch_util:get_value(<<"selector">>, Props)),
-    Fields = check_fields(couch_util:get_value(<<"fields">>, Props, nil)),
-    {Selector, Fields};
-get_selector_and_fields(#httpd{method='POST'}=Req) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    get_selector_and_fields({json_req,  couch_httpd:json_body_obj(Req)});
-get_selector_and_fields(_) ->
-    throw({bad_request, "Selector must be specified in POST payload"}).
-
-
-check_docids(DocIds) when is_list(DocIds) ->
-    lists:foreach(fun
-        (DocId) when not is_binary(DocId) ->
-            Msg = "`doc_ids` filter parameter is not a list of doc ids.",
-            throw({bad_request, Msg});
-        (_) -> ok
-    end, DocIds),
-    DocIds;
-check_docids(_) ->
-    Msg = "`doc_ids` filter parameter is not a list of doc ids.",
-    throw({bad_request, Msg}).
-
-
-check_selector(Selector={_}) ->
-    try
-        mango_selector:normalize(Selector)
-    catch
-        {mango_error, Mod, Reason0} ->
-            {_StatusCode, _Error, Reason} = mango_error:info(Mod, Reason0),
-            throw({bad_request, Reason})
-    end;
-check_selector(_Selector) ->
-    throw({bad_request, "Selector error: expected a JSON object"}).
-
-
-check_fields(nil) ->
-    nil;
-check_fields(Fields) when is_list(Fields) ->
-    try
-        {ok, Fields1} = mango_fields:new(Fields),
-        Fields1
-    catch
-        {mango_error, Mod, Reason0} ->
-            {_StatusCode, _Error, Reason} = mango_error:info(Mod, Reason0),
-            throw({bad_request, Reason})
-    end;
-check_fields(_Fields) ->
-    throw({bad_request, "Selector error: fields must be JSON array"}).
-
-
-open_ddoc(Db, DDocId) ->
-    DbName = couch_db:name(Db),
-    case couch_db:is_clustered(Db) of
-        true ->
-            case ddoc_cache:open_doc(mem3:dbname(DbName), DDocId) of
-                {ok, _} = Resp -> Resp;
-                Else -> throw(Else)
-            end;
-        false ->
-            case couch_db:open_doc(Db, DDocId, [ejson_body]) of
-                {ok, _} = Resp -> Resp;
-                Else -> throw(Else)
-            end
-    end.
-
-
-check_member_exists(#doc{body={Props}}, Path) ->
-    couch_util:get_nested_json_value({Props}, Path).
-
-
-apply_style(#doc_info{revs=Revs}, main_only) ->
-    [#rev_info{rev=Rev} | _] = Revs,
-    [{[{<<"rev">>, couch_doc:rev_to_str(Rev)}]}];
-apply_style(#doc_info{revs=Revs}, all_docs) ->
-    [{[{<<"rev">>, couch_doc:rev_to_str(R)}]} || #rev_info{rev=R} <- Revs].
-
-
-open_revs(Db, DocInfo, Style) ->
-    DocInfos = case Style of
-        main_only -> [DocInfo];
-        all_docs -> [DocInfo#doc_info{revs=[R]}|| R <- DocInfo#doc_info.revs]
-    end,
-    OpenOpts = [deleted, conflicts],
-    % Relying on list comprehensions to silence errors
-    OpenResults = [couch_db:open_doc(Db, DI, OpenOpts) || DI <- DocInfos],
-    [Doc || {ok, Doc} <- OpenResults].
-
-
-filter_revs(Passes, Docs) ->
-    lists:flatmap(fun
-        ({true, #doc{revs={RevPos, [RevId | _]}}}) ->
-            RevStr = couch_doc:rev_to_str({RevPos, RevId}),
-            Change = {[{<<"rev">>, RevStr}]},
-            [Change];
-        (_) ->
-            []
-    end, lists:zip(Passes, Docs)).
-
-
-get_changes_timeout(Args, Callback) ->
-    #changes_args{
-        heartbeat = Heartbeat,
-        timeout = Timeout,
-        feed = ResponseType
-    } = Args,
-    DefaultTimeout = list_to_integer(
-        config:get("httpd", "changes_timeout", "60000")
-    ),
-    case Heartbeat of
-    undefined ->
-        case Timeout of
-        undefined ->
-            {DefaultTimeout, fun(UserAcc) -> {stop, UserAcc} end};
-        infinity ->
-            {infinity, fun(UserAcc) -> {stop, UserAcc} end};
-        _ ->
-            {lists:min([DefaultTimeout, Timeout]),
-                fun(UserAcc) -> {stop, UserAcc} end}
-        end;
-    true ->
-        {DefaultTimeout,
-            fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end};
-    _ ->
-        {lists:min([DefaultTimeout, Heartbeat]),
-            fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end}
-    end.
-
-start_sending_changes(_Callback, UserAcc, ResponseType)
-        when ResponseType =:= "continuous"
-        orelse ResponseType =:= "eventsource" ->
-    UserAcc;
-start_sending_changes(Callback, UserAcc, ResponseType) ->
-    Callback(start, ResponseType, UserAcc).
-
-build_acc(Args, Callback, UserAcc, Db, StartSeq, Prepend, Timeout, TimeoutFun) ->
-    #changes_args{
-        include_docs = IncludeDocs,
-        doc_options = DocOpts,
-        conflicts = Conflicts,
-        limit = Limit,
-        feed = ResponseType,
-        filter_fun = Filter
-    } = Args,
-    #changes_acc{
-        db = Db,
-        seq = StartSeq,
-        prepend = Prepend,
-        filter = Filter,
-        callback = Callback,
-        user_acc = UserAcc,
-        resp_type = ResponseType,
-        limit = Limit,
-        include_docs = IncludeDocs,
-        doc_options = DocOpts,
-        conflicts = Conflicts,
-        timeout = Timeout,
-        timeout_fun = TimeoutFun,
-        aggregation_results=[],
-        aggregation_kvs=[]
-    }.
-
-send_changes(Acc, Dir, FirstRound) ->
-    #changes_acc{
-        db = Db,
-        seq = StartSeq,
-        filter = Filter
-    } = maybe_upgrade_changes_acc(Acc),
-    DbEnumFun = fun changes_enumerator/2,
-    case can_optimize(FirstRound, Filter) of
-        {true, Fun} ->
-            Fun(Db, StartSeq, Dir, DbEnumFun, Acc, Filter);
-        _ ->
-            Opts = [{dir, Dir}],
-            couch_db:fold_changes(Db, StartSeq, DbEnumFun, Acc, Opts)
-    end.
-
-
-can_optimize(true, {doc_ids, _Style, DocIds}) ->
-    MaxDocIds = config:get_integer("couchdb",
-        "changes_doc_ids_optimization_threshold", 100),
-    if length(DocIds) =< MaxDocIds ->
-        {true, fun send_changes_doc_ids/6};
-    true ->
-        false
-    end;
-can_optimize(true, {design_docs, _Style}) ->
-    {true, fun send_changes_design_docs/6};
-can_optimize(_, _) ->
-    false.
-
-
-send_changes_doc_ids(Db, StartSeq, Dir, Fun, Acc0, {doc_ids, _Style, DocIds}) ->
-    Results = couch_db:get_full_doc_infos(Db, DocIds),
-    FullInfos = lists:foldl(fun
-        (#full_doc_info{}=FDI, Acc) -> [FDI | Acc];
-        (not_found, Acc) -> Acc
-    end, [], Results),
-    send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
-
-
-send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0, {design_docs, _Style}) ->
-    FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
-    Opts = [
-        include_deleted,
-        {start_key, <<"_design/">>},
-        {end_key_gt, <<"_design0">>}
-    ],
-    {ok, FullInfos} = couch_db:fold_docs(Db, FoldFun, [], Opts),
-    send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
-
-
-send_lookup_changes(FullDocInfos, StartSeq, Dir, Db, Fun, Acc0) ->
-    FoldFun = case Dir of
-        fwd -> fun lists:foldl/3;
-        rev -> fun lists:foldr/3
-    end,
-    GreaterFun = case Dir of
-        fwd -> fun(A, B) -> A > B end;
-        rev -> fun(A, B) -> A =< B end
-    end,
-    DocInfos = lists:foldl(fun(FDI, Acc) ->
-        DI = couch_doc:to_doc_info(FDI),
-        case GreaterFun(DI#doc_info.high_seq, StartSeq) of
-            true -> [DI | Acc];
-            false -> Acc
-        end
-    end, [], FullDocInfos),
-    SortedDocInfos = lists:keysort(#doc_info.high_seq, DocInfos),
-    FinalAcc = try
-        FoldFun(fun(DocInfo, Acc) ->
-            case Fun(DocInfo, Acc) of
-                {ok, NewAcc} ->
-                    NewAcc;
-                {stop, NewAcc} ->
-                    throw({stop, NewAcc})
-            end
-        end, Acc0, SortedDocInfos)
-    catch
-        {stop, Acc} -> Acc
-    end,
-    case Dir of
-        fwd ->
-            FinalAcc0 = case element(1, FinalAcc) of
-                changes_acc -> % we came here via couch_http or internal call
-                    FinalAcc#changes_acc{seq = couch_db:get_update_seq(Db)};
-                fabric_changes_acc -> % we came here via chttpd / fabric / rexi
-                    FinalAcc#fabric_changes_acc{seq = couch_db:get_update_seq(Db)}
-            end,
-            {ok, FinalAcc0};
-        rev -> {ok, FinalAcc}
-    end.
-
-
-keep_sending_changes(Args, Acc0, FirstRound) ->
-    #changes_args{
-        feed = ResponseType,
-        limit = Limit,
-        db_open_options = DbOptions
-    } = Args,
-
-    {ok, ChangesAcc} = send_changes(Acc0, fwd, FirstRound),
-
-    #changes_acc{
-        db = Db, callback = Callback,
-        timeout = Timeout, timeout_fun = TimeoutFun, seq = EndSeq,
-        prepend = Prepend2, user_acc = UserAcc2, limit = NewLimit
-    } = maybe_upgrade_changes_acc(ChangesAcc),
-
-    couch_db:close(Db),
-    if Limit > NewLimit, ResponseType == "longpoll" ->
-        end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType);
-    true ->
-        case wait_updated(Timeout, TimeoutFun, UserAcc2) of
-        {updated, UserAcc4} ->
-            DbOptions1 = [{user_ctx, couch_db:get_user_ctx(Db)} | DbOptions],
-            case couch_db:open(couch_db:name(Db), DbOptions1) of
-            {ok, Db2} ->
-                ?MODULE:keep_sending_changes(
-                  Args#changes_args{limit=NewLimit},
-                  ChangesAcc#changes_acc{
-                    db = Db2,
-                    user_acc = UserAcc4,
-                    seq = EndSeq,
-                    prepend = Prepend2,
-                    timeout = Timeout,
-                    timeout_fun = TimeoutFun},
-                  false);
-            _Else ->
-                end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType)
-            end;
-        {stop, UserAcc4} ->
-            end_sending_changes(Callback, UserAcc4, EndSeq, ResponseType)
-        end
-    end.
-
-end_sending_changes(Callback, UserAcc, EndSeq, ResponseType) ->
-    Callback({stop, EndSeq}, ResponseType, UserAcc).
-
-changes_enumerator(Value, Acc) ->
-    #changes_acc{
-        filter = Filter, callback = Callback, prepend = Prepend,
-        user_acc = UserAcc, limit = Limit, resp_type = ResponseType, db = Db,
-        timeout = Timeout, timeout_fun = TimeoutFun
-    } = maybe_upgrade_changes_acc(Acc),
-    Results0 = filter(Db, Value, Filter),
-    Results = [Result || Result <- Results0, Result /= null],
-    Seq = case Value of
-        #full_doc_info{} ->
-            Value#full_doc_info.update_seq;
-        #doc_info{} ->
-            Value#doc_info.high_seq
-    end,
-    Go = if (Limit =< 1) andalso Results =/= [] -> stop; true -> ok end,
-    case Results of
-    [] ->
-        {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
-        case Done of
-        stop ->
-            {stop, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}};
-        ok ->
-            {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}}
-        end;
-    _ ->
-        if ResponseType =:= "continuous" orelse ResponseType =:= "eventsource" ->
-            ChangesRow = changes_row(Results, Value, Acc),
-            UserAcc2 = Callback({change, ChangesRow, <<>>}, ResponseType, UserAcc),
-            reset_heartbeat(),
-            {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2, limit = Limit - 1}};
-        true ->
-            ChangesRow = changes_row(Results, Value, Acc),
-            UserAcc2 = Callback({change, ChangesRow, Prepend}, ResponseType, UserAcc),
-            reset_heartbeat(),
-            {Go, Acc#changes_acc{
-                seq = Seq, prepend = <<",\n">>,
-                user_acc = UserAcc2, limit = Limit - 1}}
-        end
-    end.
-
-
-
-changes_row(Results, #full_doc_info{} = FDI, Acc) ->
-    changes_row(Results, couch_doc:to_doc_info(FDI), Acc);
-changes_row(Results, DocInfo, Acc0) ->
-    Acc = maybe_upgrade_changes_acc(Acc0),
-    #doc_info{
-        id = Id, high_seq = Seq, revs = [#rev_info{deleted = Del} | _]
-    } = DocInfo,
-    {[{<<"seq">>, Seq}, {<<"id">>, Id}, {<<"changes">>, Results}] ++
-        deleted_item(Del) ++ maybe_get_changes_doc(DocInfo, Acc)}.
-
-maybe_get_changes_doc(Value, #changes_acc{include_docs=true}=Acc) ->
-    #changes_acc{
-        db = Db,
-        doc_options = DocOpts,
-        conflicts = Conflicts,
-        filter = Filter
-    } = Acc,
-    Opts = case Conflicts of
-               true -> [deleted, conflicts];
-               false -> [deleted]
-           end,
-    load_doc(Db, Value, Opts, DocOpts, Filter);
-
-maybe_get_changes_doc(_Value, _Acc) ->
-    [].
-
-
-load_doc(Db, Value, Opts, DocOpts, Filter) ->
-    case couch_index_util:load_doc(Db, Value, Opts) of
-        null ->
-            [{doc, null}];
-        Doc ->
-            [{doc, doc_to_json(Doc, DocOpts, Filter)}]
-    end.
-
-
-doc_to_json(Doc, DocOpts, {selector, _Style, {_Selector, Fields}})
-    when Fields =/= nil ->
-    mango_fields:extract(couch_doc:to_json_obj(Doc, DocOpts), Fields);
-doc_to_json(Doc, DocOpts, _Filter) ->
-    couch_doc:to_json_obj(Doc, DocOpts).
-
-
-deleted_item(true) -> [{<<"deleted">>, true}];
-deleted_item(_) -> [].
-
-% waits for a updated msg, if there are multiple msgs, collects them.
-wait_updated(Timeout, TimeoutFun, UserAcc) ->
-    receive
-    updated ->
-        get_rest_updated(UserAcc);
-    deleted ->
-        {stop, UserAcc}
-    after Timeout ->
-        {Go, UserAcc2} = TimeoutFun(UserAcc),
-        case Go of
-        ok ->
-            ?MODULE:wait_updated(Timeout, TimeoutFun, UserAcc2);
-        stop ->
-            {stop, UserAcc2}
-        end
-    end.
-
-get_rest_updated(UserAcc) ->
-    receive
-    updated ->
-        get_rest_updated(UserAcc)
-    after 0 ->
-        {updated, UserAcc}
-    end.
-
-reset_heartbeat() ->
-    case get(last_changes_heartbeat) of
-    undefined ->
-        ok;
-    _ ->
-        put(last_changes_heartbeat, os:timestamp())
-    end.
-
-maybe_heartbeat(Timeout, TimeoutFun, Acc) ->
-    Before = get(last_changes_heartbeat),
-    case Before of
-    undefined ->
-        {ok, Acc};
-    _ ->
-        Now = os:timestamp(),
-        case timer:now_diff(Now, Before) div 1000 >= Timeout of
-        true ->
-            Acc2 = TimeoutFun(Acc),
-            put(last_changes_heartbeat, Now),
-            Acc2;
-        false ->
-            {ok, Acc}
-        end
-    end.
-
-
-maybe_upgrade_changes_acc(#changes_acc{} = Acc) ->
-    Acc;
-maybe_upgrade_changes_acc(Acc) when tuple_size(Acc) == 19 ->
-    #changes_acc{
-        db = element(2, Acc),
-        seq = element(6, Acc),
-        prepend = element(7, Acc),
-        filter = element(8, Acc),
-        callback = element(9, Acc),
-        user_acc = element(10, Acc),
-        resp_type = element(11, Acc),
-        limit = element(12, Acc),
-        include_docs = element(13, Acc),
-        doc_options = element(14, Acc),
-        conflicts = element(15, Acc),
-        timeout = element(16, Acc),
-        timeout_fun = element(17, Acc),
-        aggregation_kvs = element(18, Acc),
-        aggregation_results = element(19, Acc)
-    }.
diff --git a/src/couch/src/couch_compress.erl b/src/couch/src/couch_compress.erl
deleted file mode 100644
index cfcc2a4..0000000
--- a/src/couch/src/couch_compress.erl
+++ /dev/null
@@ -1,99 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_compress).
-
--export([compress/2, decompress/1, is_compressed/2]).
--export([get_compression_method/0]).
--export([uncompressed_size/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
-% binaries compressed with snappy have their first byte set to this value
--define(SNAPPY_PREFIX, 1).
-% Term prefixes documented at:
-%      http://www.erlang.org/doc/apps/erts/erl_ext_dist.html
--define(TERM_PREFIX, 131).
--define(COMPRESSED_TERM_PREFIX, 131, 80).
-
-
-get_compression_method() ->
-    case config:get("couchdb", "file_compression") of
-    undefined ->
-        ?DEFAULT_COMPRESSION;
-    Method1 ->
-        case string:tokens(Method1, "_") of
-        [Method] ->
-            list_to_existing_atom(Method);
-        [Method, Level] ->
-            {list_to_existing_atom(Method), list_to_integer(Level)}
-        end
-    end.
-
-
-compress(<<?SNAPPY_PREFIX, _/binary>> = Bin, snappy) ->
-    Bin;
-compress(<<?SNAPPY_PREFIX, _/binary>> = Bin, Method) ->
-    compress(decompress(Bin), Method);
-compress(<<?COMPRESSED_TERM_PREFIX, _/binary>> = Bin, {deflate, _Level}) ->
-    Bin;
-compress(<<?TERM_PREFIX, _/binary>> = Bin, Method) ->
-    compress(decompress(Bin), Method);
-compress(Term, none) ->
-    ?term_to_bin(Term);
-compress(Term, {deflate, Level}) ->
-    term_to_binary(Term, [{minor_version, 1}, {compressed, Level}]);
-compress(Term, snappy) ->
-    Bin = ?term_to_bin(Term),
-    try
-        {ok, CompressedBin} = snappy:compress(Bin),
-        <<?SNAPPY_PREFIX, CompressedBin/binary>>
-    catch exit:snappy_nif_not_loaded ->
-        Bin
-    end.
-
-
-decompress(<<?SNAPPY_PREFIX, Rest/binary>>) ->
-    {ok, TermBin} = snappy:decompress(Rest),
-    binary_to_term(TermBin);
-decompress(<<?TERM_PREFIX, _/binary>> = Bin) ->
-    binary_to_term(Bin);
-decompress(_) ->
-    error(invalid_compression).
-
-
-is_compressed(<<?SNAPPY_PREFIX, _/binary>>, Method) ->
-    Method =:= snappy;
-is_compressed(<<?COMPRESSED_TERM_PREFIX, _/binary>>, {deflate, _Level}) ->
-    true;
-is_compressed(<<?COMPRESSED_TERM_PREFIX, _/binary>>, _Method) ->
-    false;
-is_compressed(<<?TERM_PREFIX, _/binary>>, Method) ->
-    Method =:= none;
-is_compressed(Term, _Method) when not is_binary(Term) ->
-    false;
-is_compressed(_, _) ->
-    error(invalid_compression).
-
-
-uncompressed_size(<<?SNAPPY_PREFIX, Rest/binary>>) ->
-    {ok, Size} = snappy:uncompressed_length(Rest),
-    Size;
-uncompressed_size(<<?COMPRESSED_TERM_PREFIX, Size:32, _/binary>> = _Bin) ->
-    % See http://erlang.org/doc/apps/erts/erl_ext_dist.html
-    % The uncompressed binary would be encoded with <<131, Rest/binary>>
-    % so need to add 1 for 131
-    Size + 1;
-uncompressed_size(<<?TERM_PREFIX, _/binary>> = Bin) ->
-    byte_size(Bin);
-uncompressed_size(_) ->
-    error(invalid_compression).
diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl
deleted file mode 100644
index 6587205..0000000
--- a/src/couch/src/couch_db.erl
+++ /dev/null
@@ -1,2086 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db).
-
--export([
-    create/2,
-    open/2,
-    open_int/2,
-    incref/1,
-    reopen/1,
-    close/1,
-
-    clustered_db/2,
-    clustered_db/3,
-
-    monitor/1,
-    monitored_by/1,
-    is_idle/1,
-
-    is_admin/1,
-    check_is_admin/1,
-    check_is_member/1,
-
-    name/1,
-    get_after_doc_read_fun/1,
-    get_before_doc_update_fun/1,
-    get_committed_update_seq/1,
-    get_compacted_seq/1,
-    get_compactor_pid/1,
-    get_compactor_pid_sync/1,
-    get_db_info/1,
-    get_partition_info/2,
-    get_del_doc_count/1,
-    get_doc_count/1,
-    get_epochs/1,
-    get_filepath/1,
-    get_instance_start_time/1,
-    get_pid/1,
-    get_revs_limit/1,
-    get_security/1,
-    get_update_seq/1,
-    get_user_ctx/1,
-    get_uuid/1,
-    get_purge_seq/1,
-    get_oldest_purge_seq/1,
-    get_purge_infos_limit/1,
-
-    is_db/1,
-    is_system_db/1,
-    is_clustered/1,
-    is_system_db_name/1,
-    is_partitioned/1,
-
-    set_revs_limit/2,
-    set_purge_infos_limit/2,
-    set_security/2,
-    set_user_ctx/2,
-
-    load_validation_funs/1,
-    reload_validation_funs/1,
-
-    open_doc/2,
-    open_doc/3,
-    open_doc_revs/4,
-    open_doc_int/3,
-    get_doc_info/2,
-    get_full_doc_info/2,
-    get_full_doc_infos/2,
-    get_missing_revs/2,
-    get_design_doc/2,
-    get_design_docs/1,
-    get_design_doc_count/1,
-    get_purge_infos/2,
-
-    get_minimum_purge_seq/1,
-    purge_client_exists/3,
-
-    validate_docid/2,
-    doc_from_json_obj_validate/2,
-
-    update_doc/3,
-    update_doc/4,
-    update_docs/4,
-    update_docs/2,
-    update_docs/3,
-    delete_doc/3,
-
-    purge_docs/2,
-    purge_docs/3,
-
-    with_stream/3,
-    open_write_stream/2,
-    open_read_stream/2,
-    is_active_stream/2,
-
-    fold_docs/3,
-    fold_docs/4,
-    fold_local_docs/4,
-    fold_design_docs/4,
-    fold_changes/4,
-    fold_changes/5,
-    count_changes_since/2,
-    fold_purge_infos/4,
-    fold_purge_infos/5,
-
-    calculate_start_seq/3,
-    owner_of/2,
-
-    start_compact/1,
-    cancel_compact/1,
-    wait_for_compaction/1,
-    wait_for_compaction/2,
-
-    dbname_suffix/1,
-    normalize_dbname/1,
-    validate_dbname/1,
-
-    make_doc/5,
-    new_revid/1
-]).
-
-
--export([
-    start_link/4
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_db_int.hrl").
-
--define(DBNAME_REGEX,
-    "^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*" % use the stock CouchDB regex
-    "(\\.[0-9]{10,})?$" % but allow an optional shard timestamp at the end
-).
-
-start_link(Engine, DbName, Filepath, Options) ->
-    Arg = {Engine, DbName, Filepath, Options},
-    proc_lib:start_link(couch_db_updater, init, [Arg]).
-
-create(DbName, Options) ->
-    couch_server:create(DbName, Options).
-
-% this is for opening a database for internal purposes like the replicator
-% or the view indexer. it never throws a reader error.
-open_int(DbName, Options) ->
-    couch_server:open(DbName, Options).
-
-% this should be called anytime an http request opens the database.
-% it ensures that the http userCtx is a valid reader
-open(DbName, Options) ->
-    case couch_server:open(DbName, Options) of
-        {ok, Db} ->
-            try
-                check_is_member(Db),
-                {ok, Db}
-            catch
-                throw:Error ->
-                    close(Db),
-                    throw(Error)
-            end;
-        Else -> Else
-    end.
-
-
-reopen(#db{} = Db) ->
-    % We could have just swapped out the storage engine
-    % for this database during a compaction so we just
-    % reimplement this as a close/open pair now.
-    try
-        open(Db#db.name, [{user_ctx, Db#db.user_ctx} | Db#db.options])
-    after
-        close(Db)
-    end.
-
-
-% You shouldn't call this. Its part of the ref counting between
-% couch_server and couch_db instances.
-incref(#db{} = Db) ->
-    couch_db_engine:incref(Db).
-
-clustered_db(DbName, Options) when is_list(Options) ->
-    UserCtx = couch_util:get_value(user_ctx, Options, #user_ctx{}),
-    SecProps = couch_util:get_value(security, Options, []),
-    Props = couch_util:get_value(props, Options, []),
-    {ok, #db{
-        name = DbName,
-        user_ctx = UserCtx,
-        security = SecProps,
-        options = [{props, Props}]
-    }};
-
-clustered_db(DbName, #user_ctx{} = UserCtx) ->
-    clustered_db(DbName, [{user_ctx, UserCtx}]).
-
-clustered_db(DbName, UserCtx, SecProps) ->
-    clustered_db(DbName, [{user_ctx, UserCtx}, {security, SecProps}]).
-
-is_db(#db{}) ->
-    true;
-is_db(_) ->
-    false.
-
-is_system_db(#db{options = Options}) ->
-    lists:member(sys_db, Options).
-
-is_clustered(#{}) ->
-    true;
-is_clustered(#db{main_pid = nil}) ->
-    true;
-is_clustered(#db{}) ->
-    false;
-is_clustered(?OLD_DB_REC = Db) ->
-    ?OLD_DB_MAIN_PID(Db) == undefined.
-
-is_partitioned(#db{options = Options}) ->
-    Props = couch_util:get_value(props, Options, []),
-    couch_util:get_value(partitioned, Props, false).
-
-close(#db{} = Db) ->
-    ok = couch_db_engine:decref(Db);
-close(?OLD_DB_REC) ->
-    ok.
-
-is_idle(#db{compactor_pid=nil} = Db) ->
-    monitored_by(Db) == [];
-is_idle(_Db) ->
-    false.
-
-monitored_by(Db) ->
-    case couch_db_engine:monitored_by(Db) of
-        Pids when is_list(Pids) ->
-            PidTracker = whereis(couch_stats_process_tracker),
-            Pids -- [Db#db.main_pid, PidTracker];
-        undefined ->
-            []
-    end.
-
-
-monitor(#db{main_pid=MainPid}) ->
-    erlang:monitor(process, MainPid).
-
-start_compact(#db{} = Db) ->
-    gen_server:call(Db#db.main_pid, start_compact).
-
-cancel_compact(#db{main_pid=Pid}) ->
-    gen_server:call(Pid, cancel_compact).
-
-wait_for_compaction(Db) ->
-    wait_for_compaction(Db, infinity).
-
-wait_for_compaction(#db{main_pid=Pid}=Db, Timeout) ->
-    Start = os:timestamp(),
-    case gen_server:call(Pid, compactor_pid) of
-        CPid when is_pid(CPid) ->
-            Ref = erlang:monitor(process, CPid),
-            receive
-                {'DOWN', Ref, _, _, normal} when Timeout == infinity ->
-                    wait_for_compaction(Db, Timeout);
-                {'DOWN', Ref, _, _, normal} ->
-                    Elapsed = timer:now_diff(os:timestamp(), Start) div 1000,
-                    wait_for_compaction(Db, Timeout - Elapsed);
-                {'DOWN', Ref, _, _, Reason} ->
-                    {error, Reason}
-            after Timeout ->
-                erlang:demonitor(Ref, [flush]),
-                {error, Timeout}
-            end;
-        _ ->
-            ok
-    end.
-
-delete_doc(Db, Id, Revisions) ->
-    DeletedDocs = [#doc{id=Id, revs=[Rev], deleted=true} || Rev <- Revisions],
-    {ok, [Result]} = update_docs(Db, DeletedDocs, []),
-    {ok, Result}.
-
-open_doc(Db, IdOrDocInfo) ->
-    open_doc(Db, IdOrDocInfo, []).
-
-open_doc(Db, Id, Options) ->
-    increment_stat(Db, [couchdb, database_reads]),
-    case open_doc_int(Db, Id, Options) of
-    {ok, #doc{deleted=true}=Doc} ->
-        case lists:member(deleted, Options) of
-        true ->
-            apply_open_options({ok, Doc},Options);
-        false ->
-            {not_found, deleted}
-        end;
-    Else ->
-        apply_open_options(Else,Options)
-    end.
-
-apply_open_options({ok, Doc},Options) ->
-    apply_open_options2(Doc,Options);
-apply_open_options(Else,_Options) ->
-    Else.
-
-apply_open_options2(Doc,[]) ->
-    {ok, Doc};
-apply_open_options2(#doc{atts=Atts0,revs=Revs}=Doc,
-        [{atts_since, PossibleAncestors}|Rest]) ->
-    RevPos = find_ancestor_rev_pos(Revs, PossibleAncestors),
-    Atts = lists:map(fun(Att) ->
-        [AttPos, Data] = couch_att:fetch([revpos, data], Att),
-        if  AttPos > RevPos -> couch_att:store(data, Data, Att);
-            true -> couch_att:store(data, stub, Att)
-        end
-    end, Atts0),
-    apply_open_options2(Doc#doc{atts=Atts}, Rest);
-apply_open_options2(Doc, [ejson_body | Rest]) ->
-    apply_open_options2(couch_doc:with_ejson_body(Doc), Rest);
-apply_open_options2(Doc,[_|Rest]) ->
-    apply_open_options2(Doc,Rest).
-
-
-find_ancestor_rev_pos({_, []}, _AttsSinceRevs) ->
-    0;
-find_ancestor_rev_pos(_DocRevs, []) ->
-    0;
-find_ancestor_rev_pos({RevPos, [RevId|Rest]}, AttsSinceRevs) ->
-    case lists:member({RevPos, RevId}, AttsSinceRevs) of
-    true ->
-        RevPos;
-    false ->
-        find_ancestor_rev_pos({RevPos - 1, Rest}, AttsSinceRevs)
-    end.
-
-open_doc_revs(Db, Id, Revs, Options) ->
-    increment_stat(Db, [couchdb, database_reads]),
-    [{ok, Results}] = open_doc_revs_int(Db, [{Id, Revs}], Options),
-    {ok, [apply_open_options(Result, Options) || Result <- Results]}.
-
-% Each returned result is a list of tuples:
-% {Id, MissingRevs, PossibleAncestors}
-% if no revs are missing, it's omitted from the results.
-get_missing_revs(Db, IdRevsList) ->
-    Results = get_full_doc_infos(Db, [Id1 || {Id1, _Revs} <- IdRevsList]),
-    {ok, find_missing(IdRevsList, Results)}.
-
-find_missing([], []) ->
-    [];
-find_missing([{Id, Revs}|RestIdRevs], [FullInfo | RestLookupInfo])
-        when is_record(FullInfo, full_doc_info) ->
-    case couch_key_tree:find_missing(FullInfo#full_doc_info.rev_tree, Revs) of
-    [] ->
-        find_missing(RestIdRevs, RestLookupInfo);
-    MissingRevs ->
-        #doc_info{revs=RevsInfo} = couch_doc:to_doc_info(FullInfo),
-        LeafRevs = [Rev || #rev_info{rev=Rev} <- RevsInfo],
-        % Find the revs that are possible parents of this rev
-        PossibleAncestors =
-        lists:foldl(fun({LeafPos, LeafRevId}, Acc) ->
-            % this leaf is a "possible ancenstor" of the missing
-            % revs if this LeafPos lessthan any of the missing revs
-            case lists:any(fun({MissingPos, _}) ->
-                    LeafPos < MissingPos end, MissingRevs) of
-            true ->
-                [{LeafPos, LeafRevId} | Acc];
-            false ->
-                Acc
-            end
-        end, [], LeafRevs),
-        [{Id, MissingRevs, PossibleAncestors} |
-                find_missing(RestIdRevs, RestLookupInfo)]
-    end;
-find_missing([{Id, Revs}|RestIdRevs], [not_found | RestLookupInfo]) ->
-    [{Id, Revs, []} | find_missing(RestIdRevs, RestLookupInfo)].
-
-get_doc_info(Db, Id) ->
-    case get_full_doc_info(Db, Id) of
-    #full_doc_info{} = FDI ->
-        {ok, couch_doc:to_doc_info(FDI)};
-    Else ->
-        Else
-    end.
-
-get_full_doc_info(Db, Id) ->
-    [Result] = get_full_doc_infos(Db, [Id]),
-    Result.
-
-get_full_doc_infos(Db, Ids) ->
-    couch_db_engine:open_docs(Db, Ids).
-
-purge_docs(Db, IdRevs) ->
-    purge_docs(Db, IdRevs, []).
-
--spec purge_docs(#db{}, [{UUId, Id, [Rev]}], [PurgeOption]) ->
-    {ok, [Reply]} when
-    UUId :: binary(),
-    Id :: binary() | list(),
-    Rev :: {non_neg_integer(), binary()},
-    PurgeOption :: interactive_edit | replicated_changes,
-    Reply :: {ok, []} | {ok, [Rev]}.
-purge_docs(#db{main_pid = Pid} = Db, UUIDsIdsRevs, Options) ->
-    UUIDsIdsRevs2 = [{UUID, couch_util:to_binary(Id), Revs}
-        || {UUID, Id, Revs}  <- UUIDsIdsRevs],
-    % Check here if any UUIDs already exist when
-    % we're not replicating purge infos
-    IsRepl = lists:member(replicated_changes, Options),
-    if IsRepl -> ok; true ->
-        UUIDs = [UUID || {UUID, _, _} <- UUIDsIdsRevs2],
-        lists:foreach(fun(Resp) ->
-            if Resp == not_found -> ok; true ->
-                Fmt = "Duplicate purge info UIUD: ~s",
-                Reason = io_lib:format(Fmt, [element(2, Resp)]),
-                throw({badreq, Reason})
-            end
-        end, get_purge_infos(Db, UUIDs))
-    end,
-    increment_stat(Db, [couchdb, database_purges]),
-    gen_server:call(Pid, {purge_docs, UUIDsIdsRevs2, Options}).
-
--spec get_purge_infos(#db{}, [UUId]) -> [PurgeInfo] when
-    UUId :: binary(),
-    PurgeInfo :: {PurgeSeq, UUId, Id, [Rev]} | not_found,
-    PurgeSeq :: non_neg_integer(),
-    Id :: binary(),
-    Rev :: {non_neg_integer(), binary()}.
-get_purge_infos(Db, UUIDs) ->
-    couch_db_engine:load_purge_infos(Db, UUIDs).
-
-
-get_minimum_purge_seq(#db{} = Db) ->
-    PurgeSeq = couch_db_engine:get_purge_seq(Db),
-    OldestPurgeSeq = couch_db_engine:get_oldest_purge_seq(Db),
-    PurgeInfosLimit = couch_db_engine:get_purge_infos_limit(Db),
-
-    FoldFun = fun(#doc{id = DocId, body = {Props}}, SeqAcc) ->
-        case DocId of
-            <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> ->
-                ClientSeq = couch_util:get_value(<<"purge_seq">>, Props),
-                DbName = couch_db:name(Db),
-                % If there's a broken doc we have to keep every
-                % purge info until the doc is fixed or removed.
-                Fmt = "Invalid purge doc '~s' on ~p with purge_seq '~w'",
-                case ClientSeq of
-                    CS when is_integer(CS), CS >= PurgeSeq - PurgeInfosLimit ->
-                        {ok, SeqAcc};
-                    CS when is_integer(CS) ->
-                        case purge_client_exists(DbName, DocId, Props) of
-                            true ->
-                                {ok, erlang:min(CS, SeqAcc)};
-                            false ->
-                                couch_log:error(Fmt, [DocId, DbName, ClientSeq]),
-                                {ok, SeqAcc}
-                        end;
-                    _ ->
-                        couch_log:error(Fmt, [DocId, DbName, ClientSeq]),
-                        {ok, erlang:min(OldestPurgeSeq, SeqAcc)}
-                end;
-            _ ->
-                {stop, SeqAcc}
-        end
-    end,
-    InitMinSeq = PurgeSeq - PurgeInfosLimit,
-    Opts = [
-        {start_key, list_to_binary(?LOCAL_DOC_PREFIX ++ "purge-")}
-    ],
-    {ok, MinIdxSeq} = couch_db:fold_local_docs(Db, FoldFun, InitMinSeq, Opts),
-    FinalSeq = case MinIdxSeq < PurgeSeq - PurgeInfosLimit of
-        true -> MinIdxSeq;
-        false -> erlang:max(0, PurgeSeq - PurgeInfosLimit)
-    end,
-    % Log a warning if we've got a purge sequence exceeding the
-    % configured threshold.
-    if FinalSeq >= (PurgeSeq - PurgeInfosLimit) -> ok; true ->
-        Fmt = "The purge sequence for '~s' exceeds configured threshold",
-        couch_log:warning(Fmt, [couch_db:name(Db)])
-    end,
-    FinalSeq.
-
-
-purge_client_exists(DbName, DocId, Props) ->
-    % Warn about clients that have not updated their purge
-    % checkpoints in the last "index_lag_warn_seconds"
-    LagWindow = config:get_integer(
-            "purge", "index_lag_warn_seconds", 86400), % Default 24 hours
-
-    {Mega, Secs, _} = os:timestamp(),
-    NowSecs = Mega * 1000000 + Secs,
-    LagThreshold = NowSecs - LagWindow,
-
-    try
-        Exists = couch_db_plugin:is_valid_purge_client(DbName, Props),
-        if not Exists -> ok; true ->
-            Updated = couch_util:get_value(<<"updated_on">>, Props),
-            if is_integer(Updated) and Updated > LagThreshold -> ok; true ->
-                Diff = NowSecs - Updated,
-                Fmt1 = "Purge checkpoint '~s' not updated in ~p seconds
-                    in database ~p",
-                couch_log:error(Fmt1, [DocId, Diff, DbName])
-            end
-        end,
-        Exists
-    catch _:_ ->
-        % If we fail to check for a client we have to assume that
-        % it exists.
-        Fmt2 = "Failed to check purge checkpoint using
-            document '~p' in database ~p",
-        couch_log:error(Fmt2, [DocId, DbName]),
-        true
-    end.
-
-
-set_purge_infos_limit(#db{main_pid=Pid}=Db, Limit) when Limit > 0 ->
-    check_is_admin(Db),
-    gen_server:call(Pid, {set_purge_infos_limit, Limit}, infinity);
-set_purge_infos_limit(_Db, _Limit) ->
-    throw(invalid_purge_infos_limit).
-
-
-get_after_doc_read_fun(#db{after_doc_read = Fun}) ->
-    Fun.
-
-get_before_doc_update_fun(#db{before_doc_update = Fun}) ->
-    Fun.
-
-get_committed_update_seq(#db{committed_update_seq=Seq}) ->
-    Seq.
-
-get_update_seq(#db{} = Db)->
-    couch_db_engine:get_update_seq(Db).
-
-get_user_ctx(#db{user_ctx = UserCtx}) ->
-    UserCtx;
-get_user_ctx(?OLD_DB_REC = Db) ->
-    ?OLD_DB_USER_CTX(Db).
-
-get_purge_seq(#db{}=Db) ->
-    couch_db_engine:get_purge_seq(Db).
-
-get_oldest_purge_seq(#db{}=Db) ->
-    couch_db_engine:get_oldest_purge_seq(Db).
-
-get_purge_infos_limit(#db{}=Db) ->
-    couch_db_engine:get_purge_infos_limit(Db).
-
-get_pid(#db{main_pid = Pid}) ->
-    Pid.
-
-get_del_doc_count(Db) ->
-    {ok, couch_db_engine:get_del_doc_count(Db)}.
-
-get_doc_count(Db) ->
-    {ok, couch_db_engine:get_doc_count(Db)}.
-
-get_uuid(#db{}=Db) ->
-    couch_db_engine:get_uuid(Db).
-
-get_epochs(#db{}=Db) ->
-    Epochs = couch_db_engine:get_epochs(Db),
-    validate_epochs(Epochs),
-    Epochs.
-
-get_filepath(#db{filepath = FilePath}) ->
-    FilePath.
-
-get_instance_start_time(#db{instance_start_time = IST}) ->
-    IST.
-
-get_compacted_seq(#db{}=Db) ->
-    couch_db_engine:get_compacted_seq(Db).
-
-get_compactor_pid(#db{compactor_pid = Pid}) ->
-    Pid.
-
-get_compactor_pid_sync(#db{main_pid=Pid}=Db) ->
-    case gen_server:call(Pid, compactor_pid, infinity) of
-        CPid when is_pid(CPid) ->
-            CPid;
-        _ ->
-            nil
-    end.
-
-get_db_info(Db) ->
-    #db{
-        name = Name,
-        compactor_pid = Compactor,
-        instance_start_time = StartTime,
-        committed_update_seq = CommittedUpdateSeq
-    } = Db,
-    {ok, DocCount} = get_doc_count(Db),
-    {ok, DelDocCount} = get_del_doc_count(Db),
-    SizeInfo = couch_db_engine:get_size_info(Db),
-    DiskVersion = couch_db_engine:get_disk_version(Db),
-    Uuid = case get_uuid(Db) of
-        undefined -> null;
-        Uuid0 -> Uuid0
-    end,
-    CompactedSeq = case get_compacted_seq(Db) of
-        undefined -> null;
-        Else1 -> Else1
-    end,
-    Props = case couch_db_engine:get_props(Db) of
-        undefined -> null;
-        Else2 -> {Else2}
-    end,
-    InfoList = [
-        {db_name, Name},
-        {engine, couch_db_engine:get_engine(Db)},
-        {doc_count, DocCount},
-        {doc_del_count, DelDocCount},
-        {update_seq, get_update_seq(Db)},
-        {purge_seq, couch_db_engine:get_purge_seq(Db)},
-        {compact_running, Compactor /= nil},
-        {sizes, {SizeInfo}},
-        {instance_start_time, StartTime},
-        {disk_format_version, DiskVersion},
-        {committed_update_seq, CommittedUpdateSeq},
-        {compacted_seq, CompactedSeq},
-        {props, Props},
-        {uuid, Uuid}
-    ],
-    {ok, InfoList}.
-
-get_partition_info(#db{} = Db, Partition) when is_binary(Partition) ->
-    Info = couch_db_engine:get_partition_info(Db, Partition),
-    {ok, Info};
-get_partition_info(_Db, _Partition) ->
-    throw({bad_request, <<"`partition` is not valid">>}).
-
-
-get_design_doc(#db{name = <<"shards/", _/binary>> = ShardDbName}, DDocId0) ->
-    DDocId = couch_util:normalize_ddoc_id(DDocId0),
-    DbName = mem3:dbname(ShardDbName),
-    {_, Ref} = spawn_monitor(fun() ->
-        exit(fabric:open_doc(DbName, DDocId, []))
-    end),
-    receive {'DOWN', Ref, _, _, Response} ->
-        Response
-    end;
-get_design_doc(#db{} = Db, DDocId0) ->
-    DDocId = couch_util:normalize_ddoc_id(DDocId0),
-    couch_db:open_doc_int(Db, DDocId, [ejson_body]).
-
-get_design_docs(#db{name = <<"shards/", _/binary>> = ShardDbName}) ->
-    DbName = mem3:dbname(ShardDbName),
-    {_, Ref} = spawn_monitor(fun() -> exit(fabric:design_docs(DbName)) end),
-    receive {'DOWN', Ref, _, _, Response} ->
-        Response
-    end;
-get_design_docs(#db{} = Db) ->
-    FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
-    {ok, Docs} = fold_design_docs(Db, FoldFun, [], []),
-    {ok, lists:reverse(Docs)}.
-
-get_design_doc_count(#db{} = Db) ->
-    FoldFun = fun(_, Acc) -> {ok, Acc + 1} end,
-    fold_design_docs(Db, FoldFun, 0, []).
-
-check_is_admin(#db{user_ctx=UserCtx}=Db) ->
-    case is_admin(Db) of
-        true -> ok;
-        false ->
-            Reason = <<"You are not a db or server admin.">>,
-            throw_security_error(UserCtx, Reason)
-    end.
-
-check_is_member(#db{user_ctx=UserCtx}=Db) ->
-    case is_member(Db) of
-        true -> ok;
-        false -> throw_security_error(UserCtx)
-    end.
-
-is_admin(#db{user_ctx=UserCtx}=Db) ->
-    case couch_db_plugin:check_is_admin(Db) of
-        true -> true;
-        false ->
-            {Admins} = get_admins(Db),
-            is_authorized(UserCtx, Admins)
-    end.
-
-is_member(#db{user_ctx=UserCtx}=Db) ->
-    case is_admin(Db) of
-        true -> true;
-        false ->
-            case is_public_db(Db) of
-                true -> true;
-                false ->
-                    {Members} = get_members(Db),
-                    is_authorized(UserCtx, Members)
-            end
-    end.
-
-is_public_db(#db{}=Db) ->
-    {Members} = get_members(Db),
-    Names = couch_util:get_value(<<"names">>, Members, []),
-    Roles = couch_util:get_value(<<"roles">>, Members, []),
-    Names =:= [] andalso Roles =:= [].
-
-is_authorized(#user_ctx{name=UserName,roles=UserRoles}, Security) ->
-    Names = couch_util:get_value(<<"names">>, Security, []),
-    Roles = couch_util:get_value(<<"roles">>, Security, []),
-    case check_security(roles, UserRoles, [<<"_admin">> | Roles]) of
-        true -> true;
-        false -> check_security(names, UserName, Names)
-    end.
-
-check_security(roles, [], _) ->
-    false;
-check_security(roles, UserRoles, Roles) ->
-    UserRolesSet = ordsets:from_list(UserRoles),
-    RolesSet = ordsets:from_list(Roles),
-    not ordsets:is_disjoint(UserRolesSet, RolesSet);
-check_security(names, _, []) ->
-    false;
-check_security(names, null, _) ->
-    false;
-check_security(names, UserName, Names) ->
-    lists:member(UserName, Names).
-
-throw_security_error(#user_ctx{name=null}=UserCtx) ->
-    Reason = <<"You are not authorized to access this db.">>,
-    throw_security_error(UserCtx, Reason);
-throw_security_error(#user_ctx{name=_}=UserCtx) ->
-    Reason = <<"You are not allowed to access this db.">>,
-    throw_security_error(UserCtx, Reason).
-throw_security_error(#user_ctx{}=UserCtx, Reason) ->
-    Error = security_error_type(UserCtx),
-    throw({Error, Reason}).
-
-security_error_type(#user_ctx{name=null}) ->
-    unauthorized;
-security_error_type(#user_ctx{name=_}) ->
-    forbidden.
-
-
-get_admins(#db{security=SecProps}) ->
-    couch_util:get_value(<<"admins">>, SecProps, {[]}).
-
-get_members(#db{security=SecProps}) ->
-    % we fallback to readers here for backwards compatibility
-    couch_util:get_value(<<"members">>, SecProps,
-        couch_util:get_value(<<"readers">>, SecProps, {[]})).
-
-get_security(#db{security=SecProps}) ->
-    {SecProps};
-get_security(?OLD_DB_REC = Db) ->
-    {?OLD_DB_SECURITY(Db)}.
-
-set_security(#db{main_pid=Pid}=Db, {NewSecProps}) when is_list(NewSecProps) ->
-    check_is_admin(Db),
-    ok = validate_security_object(NewSecProps),
-    gen_server:call(Pid, {set_security, NewSecProps}, infinity);
-set_security(_, _) ->
-    throw(bad_request).
-
-set_user_ctx(#db{} = Db, UserCtx) ->
-    {ok, Db#db{user_ctx = UserCtx}}.
-
-validate_security_object(SecProps) ->
-    Admins = couch_util:get_value(<<"admins">>, SecProps, {[]}),
-    % we fallback to readers here for backwards compatibility
-    Members = couch_util:get_value(<<"members">>, SecProps,
-        couch_util:get_value(<<"readers">>, SecProps, {[]})),
-    ok = validate_names_and_roles(Admins),
-    ok = validate_names_and_roles(Members),
-    ok.
-
-% validate user input
-validate_names_and_roles({Props}) when is_list(Props) ->
-    case couch_util:get_value(<<"names">>, Props, []) of
-    Ns when is_list(Ns) ->
-            [throw("names must be a JSON list of strings") ||N <- Ns, not is_binary(N)],
-            Ns;
-    _ ->
-        throw("names must be a JSON list of strings")
-    end,
-    case couch_util:get_value(<<"roles">>, Props, []) of
-    Rs when is_list(Rs) ->
-        [throw("roles must be a JSON list of strings") ||R <- Rs, not is_binary(R)],
-        Rs;
-    _ ->
-        throw("roles must be a JSON list of strings")
-    end,
-    ok;
-validate_names_and_roles(_) ->
-    throw("admins or members must be a JSON list of strings").
-
-get_revs_limit(#db{} = Db) ->
-    couch_db_engine:get_revs_limit(Db).
-
-set_revs_limit(#db{main_pid=Pid}=Db, Limit) when Limit > 0 ->
-    check_is_admin(Db),
-    gen_server:call(Pid, {set_revs_limit, Limit}, infinity);
-set_revs_limit(_Db, _Limit) ->
-    throw(invalid_revs_limit).
-
-name(#db{name=Name}) ->
-    Name;
-name(?OLD_DB_REC = Db) ->
-    ?OLD_DB_NAME(Db).
-
-
-validate_docid(#db{} = Db, DocId) when is_binary(DocId) ->
-    couch_doc:validate_docid(DocId, name(Db)),
-    case is_partitioned(Db) of
-        true ->
-            couch_partition:validate_docid(DocId);
-        false ->
-            ok
-    end.
-
-
-doc_from_json_obj_validate(#db{} = Db, DocJson) ->
-    Doc = couch_doc:from_json_obj_validate(DocJson, name(Db)),
-    {Props} = DocJson,
-    case couch_util:get_value(<<"_id">>, Props) of
-        DocId when is_binary(DocId) ->
-            % Only validate the docid if it was provided
-            validate_docid(Db, DocId);
-        _ ->
-            ok
-    end,
-    Doc.
-
-
-update_doc(Db, Doc, Options) ->
-    update_doc(Db, Doc, Options, interactive_edit).
-
-update_doc(Db, Doc, Options, UpdateType) ->
-    case update_docs(Db, [Doc], Options, UpdateType) of
-    {ok, [{ok, NewRev}]} ->
-        {ok, NewRev};
-    {ok, [{{_Id, _Rev}, Error}]} ->
-        throw(Error);
-    {ok, [Error]} ->
-        throw(Error);
-    {ok, []} ->
-        % replication success
-        {Pos, [RevId | _]} = Doc#doc.revs,
-        {ok, {Pos, RevId}}
-    end.
-
-update_docs(Db, Docs) ->
-    update_docs(Db, Docs, []).
-
-% group_alike_docs groups the sorted documents into sublist buckets, by id.
-% ([DocA, DocA, DocB, DocC], []) -> [[DocA, DocA], [DocB], [DocC]]
-group_alike_docs(Docs) ->
-    % Here we're just asserting that our doc sort is stable so that
-    % if we have duplicate docids we don't have to worry about the
-    % behavior of lists:sort/2 which isn't documented anyhwere as
-    % being stable.
-    WithPos = lists:zip(Docs, lists:seq(1, length(Docs))),
-    SortFun = fun({D1, P1}, {D2, P2}) -> {D1#doc.id, P1} =< {D2#doc.id, P2} end,
-    SortedDocs = [D || {D, _} <- lists:sort(SortFun, WithPos)],
-    group_alike_docs(SortedDocs, []).
-
-group_alike_docs([], Buckets) ->
-    lists:reverse(lists:map(fun lists:reverse/1, Buckets));
-group_alike_docs([Doc|Rest], []) ->
-    group_alike_docs(Rest, [[Doc]]);
-group_alike_docs([Doc|Rest], [Bucket|RestBuckets]) ->
-    [#doc{id=BucketId}|_] = Bucket,
-    case Doc#doc.id == BucketId of
-    true ->
-        % add to existing bucket
-        group_alike_docs(Rest, [[Doc|Bucket]|RestBuckets]);
-    false ->
-        % add to new bucket
-       group_alike_docs(Rest, [[Doc]|[Bucket|RestBuckets]])
-    end.
-
-validate_doc_update(#db{}=Db, #doc{id= <<"_design/",_/binary>>}=Doc, _GetDiskDocFun) ->
-    case catch check_is_admin(Db) of
-        ok -> validate_ddoc(Db, Doc);
-        Error -> Error
-    end;
-validate_doc_update(#db{validate_doc_funs = undefined} = Db, Doc, Fun) ->
-    ValidationFuns = load_validation_funs(Db),
-    validate_doc_update(Db#db{validate_doc_funs=ValidationFuns}, Doc, Fun);
-validate_doc_update(#db{validate_doc_funs=[]}, _Doc, _GetDiskDocFun) ->
-    ok;
-validate_doc_update(_Db, #doc{id= <<"_local/",_/binary>>}, _GetDiskDocFun) ->
-    ok;
-validate_doc_update(Db, Doc, GetDiskDocFun) ->
-    case get(io_priority) of
-        {internal_repl, _} ->
-            ok;
-        _ ->
-            validate_doc_update_int(Db, Doc, GetDiskDocFun)
-    end.
-
-validate_ddoc(Db, DDoc) ->
-    try
-        ok = couch_mrview:validate(Db, couch_doc:with_ejson_body(DDoc))
-    catch
-        throw:{invalid_design_doc, Reason} ->
-            {bad_request, invalid_design_doc, Reason};
-        throw:{compilation_error, Reason} ->
-            {bad_request, compilation_error, Reason};
-        throw:Error ->
-            Error
-    end.
-
-validate_doc_update_int(Db, Doc, GetDiskDocFun) ->
-    Fun = fun() ->
-        DiskDoc = GetDiskDocFun(),
-        JsonCtx = couch_util:json_user_ctx(Db),
-        SecObj = get_security(Db),
-        try
-            [case Fun(Doc, DiskDoc, JsonCtx, SecObj) of
-                ok -> ok;
-                Error -> throw(Error)
-             end || Fun <- Db#db.validate_doc_funs],
-            ok
-        catch
-            throw:Error ->
-                Error
-        end
-    end,
-    couch_stats:update_histogram([couchdb, query_server, vdu_process_time],
-                                 Fun).
-
-
-% to be safe, spawn a middleman here
-load_validation_funs(#db{main_pid=Pid, name = <<"shards/", _/binary>>}=Db) ->
-    {_, Ref} = spawn_monitor(fun() ->
-        exit(ddoc_cache:open(mem3:dbname(Db#db.name), validation_funs))
-    end),
-    receive
-        {'DOWN', Ref, _, _, {ok, Funs}} ->
-            gen_server:cast(Pid, {load_validation_funs, Funs}),
-            Funs;
-        {'DOWN', Ref, _, _, {database_does_not_exist, _StackTrace}} ->
-            ok = couch_server:close_db_if_idle(Db#db.name),
-            erlang:error(database_does_not_exist);
-        {'DOWN', Ref, _, _, Reason} ->
-            couch_log:error("could not load validation funs ~p", [Reason]),
-            throw(internal_server_error)
-    end;
-load_validation_funs(#db{main_pid=Pid}=Db) ->
-    {ok, DDocInfos} = get_design_docs(Db),
-    OpenDocs = fun
-        (#full_doc_info{}=D) ->
-            {ok, Doc} = open_doc_int(Db, D, [ejson_body]),
-            Doc
-    end,
-    DDocs = lists:map(OpenDocs, DDocInfos),
-    Funs = lists:flatmap(fun(DDoc) ->
-        case couch_doc:get_validate_doc_fun(DDoc) of
-            nil -> [];
-            Fun -> [Fun]
-        end
-    end, DDocs),
-    gen_server:cast(Pid, {load_validation_funs, Funs}),
-    Funs.
-
-reload_validation_funs(#db{} = Db) ->
-    gen_server:cast(Db#db.main_pid, {load_validation_funs, undefined}).
-
-prep_and_validate_update(Db, #doc{id=Id,revs={RevStart, Revs}}=Doc,
-        OldFullDocInfo, LeafRevsDict, AllowConflict) ->
-    case Revs of
-    [PrevRev|_] ->
-        case dict:find({RevStart, PrevRev}, LeafRevsDict) of
-        {ok, {#leaf{deleted=Deleted, ptr=DiskSp}, DiskRevs}} ->
-            case couch_doc:has_stubs(Doc) of
-            true ->
-                DiskDoc = make_doc(Db, Id, Deleted, DiskSp, DiskRevs),
-                Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
-                {validate_doc_update(Db, Doc2, fun() -> DiskDoc end), Doc2};
-            false ->
-                LoadDiskDoc = fun() -> make_doc(Db,Id,Deleted,DiskSp,DiskRevs) end,
-                {validate_doc_update(Db, Doc, LoadDiskDoc), Doc}
-            end;
-        error when AllowConflict ->
-            couch_doc:merge_stubs(Doc, #doc{}), % will generate error if
-                                                        % there are stubs
-            {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
-        error ->
-            {conflict, Doc}
-        end;
-    [] ->
-        % new doc, and we have existing revs.
-        % reuse existing deleted doc
-        if OldFullDocInfo#full_doc_info.deleted orelse AllowConflict ->
-            {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
-        true ->
-            {conflict, Doc}
-        end
-    end.
-
-
-
-prep_and_validate_updates(_Db, [], [], _AllowConflict, AccPrepped,
-        AccFatalErrors) ->
-    AccPrepped2 = lists:reverse(lists:map(fun lists:reverse/1, AccPrepped)),
-    {AccPrepped2, AccFatalErrors};
-prep_and_validate_updates(Db, [DocBucket|RestBuckets], [not_found|RestLookups],
-        AllowConflict, AccPrepped, AccErrors) ->
-    % no existing revs are known,
-    {PreppedBucket, AccErrors3} = lists:foldl(
-        fun(#doc{revs=Revs}=Doc, {AccBucket, AccErrors2}) ->
-            case couch_doc:has_stubs(Doc) of
-            true ->
-                couch_doc:merge_stubs(Doc, #doc{}); % will throw exception
-            false -> ok
-            end,
-            case Revs of
-            {0, []} ->
-                case validate_doc_update(Db, Doc, fun() -> nil end) of
-                ok ->
-                    {[Doc | AccBucket], AccErrors2};
-                Error ->
-                    {AccBucket, [{doc_tag(Doc), Error} | AccErrors2]}
-                end;
-            _ ->
-                % old revs specified but none exist, a conflict
-                {AccBucket, [{doc_tag(Doc), conflict} | AccErrors2]}
-            end
-        end,
-        {[], AccErrors}, DocBucket),
-
-    prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict,
-            [PreppedBucket | AccPrepped], AccErrors3);
-prep_and_validate_updates(Db, [DocBucket|RestBuckets],
-        [#full_doc_info{rev_tree=OldRevTree}=OldFullDocInfo|RestLookups],
-        AllowConflict, AccPrepped, AccErrors) ->
-    Leafs = couch_key_tree:get_all_leafs(OldRevTree),
-    LeafRevsDict = dict:from_list([
-        {{Start, RevId}, {Leaf, Revs}} ||
-        {Leaf, {Start, [RevId | _]} = Revs} <- Leafs
-    ]),
-    {PreppedBucket, AccErrors3} = lists:foldl(
-        fun(Doc, {Docs2Acc, AccErrors2}) ->
-            case prep_and_validate_update(Db, Doc, OldFullDocInfo,
-                    LeafRevsDict, AllowConflict) of
-            {ok, Doc2} ->
-                {[Doc2 | Docs2Acc], AccErrors2};
-            {Error, _} ->
-                % Record the error
-                {Docs2Acc, [{doc_tag(Doc), Error} |AccErrors2]}
-            end
-        end,
-        {[], AccErrors}, DocBucket),
-    prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict,
-            [PreppedBucket | AccPrepped], AccErrors3).
-
-
-update_docs(Db, Docs, Options) ->
-    update_docs(Db, Docs, Options, interactive_edit).
-
-
-prep_and_validate_replicated_updates(_Db, [], [], AccPrepped, AccErrors) ->
-    Errors2 = [{{Id, {Pos, Rev}}, Error} ||
-            {#doc{id=Id,revs={Pos,[Rev|_]}}, Error} <- AccErrors],
-    AccPrepped2 = lists:reverse(lists:map(fun lists:reverse/1, AccPrepped)),
-    {AccPrepped2, lists:reverse(Errors2)};
-prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldInfo], AccPrepped, AccErrors) ->
-    case OldInfo of
-    not_found ->
-        {ValidatedBucket, AccErrors3} = lists:foldl(
-            fun(Doc, {AccPrepped2, AccErrors2}) ->
-                case couch_doc:has_stubs(Doc) of
-                true ->
-                    couch_doc:merge_stubs(Doc, #doc{}); % will throw exception
-                false -> ok
-                end,
-                case validate_doc_update(Db, Doc, fun() -> nil end) of
-                ok ->
-                    {[Doc | AccPrepped2], AccErrors2};
-                Error ->
-                    {AccPrepped2, [{Doc, Error} | AccErrors2]}
-                end
-            end,
-            {[], AccErrors}, Bucket),
-        prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo, [ValidatedBucket | AccPrepped], AccErrors3);
-    #full_doc_info{rev_tree=OldTree} ->
-        OldLeafs = couch_key_tree:get_all_leafs_full(OldTree),
-        OldLeafsLU = [{Start, RevId} || {Start, [{RevId, _}|_]} <- OldLeafs],
-        NewPaths = lists:map(fun couch_doc:to_path/1, Bucket),
-        NewRevTree = couch_key_tree:multi_merge(OldTree, NewPaths),
-        Leafs = couch_key_tree:get_all_leafs_full(NewRevTree),
-        LeafRevsFullDict = dict:from_list( [{{Start, RevId}, FullPath} || {Start, [{RevId, _}|_]}=FullPath <- Leafs]),
-        {ValidatedBucket, AccErrors3} =
-        lists:foldl(
-            fun(#doc{id=Id,revs={Pos, [RevId|_]}}=Doc, {AccValidated, AccErrors2}) ->
-                IsOldLeaf = lists:member({Pos, RevId}, OldLeafsLU),
-                case dict:find({Pos, RevId}, LeafRevsFullDict) of
-                {ok, {Start, Path}} when not IsOldLeaf ->
-                    % our unflushed doc is a leaf node. Go back on the path
-                    % to find the previous rev that's on disk.
-
-                    LoadPrevRevFun = fun() ->
-                                make_first_doc_on_disk(Db,Id,Start-1, tl(Path))
-                            end,
-
-                    case couch_doc:has_stubs(Doc) of
-                    true ->
-                        DiskDoc = case LoadPrevRevFun() of
-                            #doc{} = DiskDoc0 ->
-                                DiskDoc0;
-                            _ ->
-                                % Force a missing_stub exception
-                                couch_doc:merge_stubs(Doc, #doc{})
-                        end,
-                        Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
-                        GetDiskDocFun = fun() -> DiskDoc end;
-                    false ->
-                        Doc2 = Doc,
-                        GetDiskDocFun = LoadPrevRevFun
-                    end,
-
-                    case validate_doc_update(Db, Doc2, GetDiskDocFun) of
-                    ok ->
-                        {[Doc2 | AccValidated], AccErrors2};
-                    Error ->
-                        {AccValidated, [{Doc, Error} | AccErrors2]}
-                    end;
-                _ ->
-                    % this doc isn't a leaf or already exists in the tree.
-                    % ignore but consider it a success.
-                    {AccValidated, AccErrors2}
-                end
-            end,
-            {[], AccErrors}, Bucket),
-        prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo,
-                [ValidatedBucket | AccPrepped], AccErrors3)
-    end.
-
-
-
-new_revid(#doc{body=Body, revs={OldStart,OldRevs}, atts=Atts, deleted=Deleted}) ->
-    DigestedAtts = lists:foldl(fun(Att, Acc) ->
-        [N, T, M] = couch_att:fetch([name, type, md5], Att),
-        case M == <<>> of
-            true -> Acc;
-            false -> [{N, T, M} | Acc]
-        end
-    end, [], Atts),
-    case DigestedAtts of
-        Atts2 when length(Atts) =/= length(Atts2) ->
-            % We must have old style non-md5 attachments
-            ?l2b(integer_to_list(couch_util:rand32()));
-        Atts2 ->
-            OldRev = case OldRevs of [] -> 0; [OldRev0|_] -> OldRev0 end,
-            couch_hash:md5_hash(term_to_binary([Deleted, OldStart, OldRev, Body, Atts2], [{minor_version, 1}]))
-    end.
-
-new_revs([], OutBuckets, IdRevsAcc) ->
-    {lists:reverse(OutBuckets), IdRevsAcc};
-new_revs([Bucket|RestBuckets], OutBuckets, IdRevsAcc) ->
-    {NewBucket, IdRevsAcc3} = lists:mapfoldl(
-        fun(#doc{revs={Start, RevIds}}=Doc, IdRevsAcc2)->
-        NewRevId = new_revid(Doc),
-        {Doc#doc{revs={Start+1, [NewRevId | RevIds]}},
-            [{doc_tag(Doc), {ok, {Start+1, NewRevId}}} | IdRevsAcc2]}
-    end, IdRevsAcc, Bucket),
-    new_revs(RestBuckets, [NewBucket|OutBuckets], IdRevsAcc3).
-
-check_dup_atts(#doc{atts=Atts}=Doc) ->
-    lists:foldl(fun(Att, Names) ->
-        Name = couch_att:fetch(name, Att),
-        case ordsets:is_element(Name, Names) of
-            true -> throw({bad_request, <<"Duplicate attachments">>});
-            false -> ordsets:add_element(Name, Names)
-        end
-    end, ordsets:new(), Atts),
-    Doc.
-
-tag_docs([]) ->
-    [];
-tag_docs([#doc{meta=Meta}=Doc | Rest]) ->
-    [Doc#doc{meta=[{ref, make_ref()} | Meta]} | tag_docs(Rest)].
-
-doc_tag(#doc{meta=Meta}) ->
-    case lists:keyfind(ref, 1, Meta) of
-        {ref, Ref} when is_reference(Ref) -> Ref;
-        false -> throw(doc_not_tagged);
-        Else -> throw({invalid_doc_tag, Else})
-    end.
-
-update_docs(Db, Docs0, Options, replicated_changes) ->
-    Docs = tag_docs(Docs0),
-
-    PrepValidateFun = fun(Db0, DocBuckets0, ExistingDocInfos) ->
-        prep_and_validate_replicated_updates(Db0, DocBuckets0,
-            ExistingDocInfos, [], [])
-    end,
-
-    {ok, DocBuckets, NonRepDocs, DocErrors}
-        = before_docs_update(Db, Docs, PrepValidateFun, replicated_changes),
-
-    DocBuckets2 = [[doc_flush_atts(Db, check_dup_atts(Doc))
-            || Doc <- Bucket] || Bucket <- DocBuckets],
-    {ok, _} = write_and_commit(Db, DocBuckets2,
-        NonRepDocs, [merge_conflicts | Options]),
-    {ok, DocErrors};
-
-update_docs(Db, Docs0, Options, interactive_edit) ->
-    Docs = tag_docs(Docs0),
-
-    AllOrNothing = lists:member(all_or_nothing, Options),
-    PrepValidateFun = fun(Db0, DocBuckets0, ExistingDocInfos) ->
-        prep_and_validate_updates(Db0, DocBuckets0, ExistingDocInfos,
-            AllOrNothing, [], [])
-    end,
-
-    {ok, DocBuckets, NonRepDocs, DocErrors}
-        = before_docs_update(Db, Docs, PrepValidateFun, interactive_edit),
-
-    if (AllOrNothing) and (DocErrors /= []) ->
-        RefErrorDict = dict:from_list([{doc_tag(Doc), Doc} || Doc <- Docs]),
-        {aborted, lists:map(fun({Ref, Error}) ->
-            #doc{id=Id,revs={Start,RevIds}} = dict:fetch(Ref, RefErrorDict),
-            case {Start, RevIds} of
-                {Pos, [RevId | _]} -> {{Id, {Pos, RevId}}, Error};
-                {0, []} -> {{Id, {0, <<>>}}, Error}
-            end
-        end, DocErrors)};
-    true ->
-        Options2 = if AllOrNothing -> [merge_conflicts];
-                true -> [] end ++ Options,
-        DocBuckets2 = [[
-                doc_flush_atts(Db, set_new_att_revpos(
-                        check_dup_atts(Doc)))
-                || Doc <- B] || B <- DocBuckets],
-        {DocBuckets3, IdRevs} = new_revs(DocBuckets2, [], []),
-
-        {ok, CommitResults} = write_and_commit(Db, DocBuckets3,
-            NonRepDocs, Options2),
-
-        ResultsDict = lists:foldl(fun({Key, Resp}, ResultsAcc) ->
-            dict:store(Key, Resp, ResultsAcc)
-        end, dict:from_list(IdRevs), CommitResults ++ DocErrors),
-        {ok, lists:map(fun(Doc) ->
-            dict:fetch(doc_tag(Doc), ResultsDict)
-        end, Docs)}
-    end.
-
-% Returns the first available document on disk. Input list is a full rev path
-% for the doc.
-make_first_doc_on_disk(_Db, _Id, _Pos, []) ->
-    nil;
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, #doc{}} | RestPath]) ->
-    make_first_doc_on_disk(Db, Id, Pos-1, RestPath);
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, ?REV_MISSING}|RestPath]) ->
-    make_first_doc_on_disk(Db, Id, Pos - 1, RestPath);
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, #leaf{deleted=IsDel, ptr=Sp}} |_]=DocPath) ->
-    Revs = [Rev || {Rev, _} <- DocPath],
-    make_doc(Db, Id, IsDel, Sp, {Pos, Revs}).
-
-collect_results_with_metrics(Pid, MRef, []) ->
-    Begin = os:timestamp(),
-    try
-        collect_results(Pid, MRef, [])
-    after
-        ResultsTime = timer:now_diff(os:timestamp(), Begin) div 1000,
-        couch_stats:update_histogram(
-            [couchdb, collect_results_time],
-            ResultsTime
-        )
-    end.
-
-collect_results(Pid, MRef, ResultsAcc) ->
-    receive
-    {result, Pid, Result} ->
-        collect_results(Pid, MRef, [Result | ResultsAcc]);
-    {done, Pid} ->
-        {ok, ResultsAcc};
-    {retry, Pid} ->
-        retry;
-    {'DOWN', MRef, _, _, Reason} ->
-        exit(Reason)
-    end.
-
-write_and_commit(#db{main_pid=Pid, user_ctx=Ctx}=Db, DocBuckets1,
-        NonRepDocs, Options) ->
-    DocBuckets = prepare_doc_summaries(Db, DocBuckets1),
-    MergeConflicts = lists:member(merge_conflicts, Options),
-    MRef = erlang:monitor(process, Pid),
-    try
-        Pid ! {update_docs, self(), DocBuckets, NonRepDocs, MergeConflicts},
-        case collect_results_with_metrics(Pid, MRef, []) of
-        {ok, Results} -> {ok, Results};
-        retry ->
-            % This can happen if the db file we wrote to was swapped out by
-            % compaction. Retry by reopening the db and writing to the current file
-            {ok, Db2} = open(Db#db.name, [{user_ctx, Ctx}]),
-            DocBuckets2 = [
-                [doc_flush_atts(Db2, Doc) || Doc <- Bucket] ||
-                Bucket <- DocBuckets1
-            ],
-            % We only retry once
-            DocBuckets3 = prepare_doc_summaries(Db2, DocBuckets2),
-            close(Db2),
-            Pid ! {update_docs, self(), DocBuckets3, NonRepDocs, MergeConflicts},
-            case collect_results_with_metrics(Pid, MRef, []) of
-            {ok, Results} -> {ok, Results};
-            retry -> throw({update_error, compaction_retry})
-            end
-        end
-    after
-        erlang:demonitor(MRef, [flush])
-    end.
-
-
-prepare_doc_summaries(Db, BucketList) ->
-    [lists:map(
-        fun(#doc{body = Body, atts = Atts} = Doc0) ->
-            DiskAtts = [couch_att:to_disk_term(Att) || Att <- Atts],
-            {ok, SizeInfo} = couch_att:size_info(Atts),
-            AttsStream = case Atts of
-                [Att | _] ->
-                    {stream, StreamEngine} = couch_att:fetch(data, Att),
-                    StreamEngine;
-                [] ->
-                    nil
-            end,
-            Doc1 = Doc0#doc{
-                atts = DiskAtts,
-                meta = [
-                    {size_info, SizeInfo},
-                    {atts_stream, AttsStream},
-                    {ejson_size, couch_ejson_size:encoded_size(Body)}
-                ] ++ Doc0#doc.meta
-            },
-            couch_db_engine:serialize_doc(Db, Doc1)
-        end,
-        Bucket) || Bucket <- BucketList].
-
-
-before_docs_update(#db{validate_doc_funs = VDFuns} = Db, Docs, PVFun, UpdateType) ->
-    increment_stat(Db, [couchdb, database_writes]),
-
-    % Separate _local docs from normal docs
-    IsLocal = fun
-        (#doc{id= <<?LOCAL_DOC_PREFIX, _/binary>>}) -> true;
-        (_) -> false
-    end,
-    {NonRepDocs, Docs2} = lists:partition(IsLocal, Docs),
-
-    BucketList = group_alike_docs(Docs2),
-
-    DocBuckets = lists:map(fun(Bucket) ->
-        lists:map(fun(Doc) ->
-            DocWithBody = couch_doc:with_ejson_body(Doc),
-            couch_db_plugin:before_doc_update(Db, DocWithBody, UpdateType)
-        end, Bucket)
-    end, BucketList),
-
-    ValidatePred = fun
-        (#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>}) -> true;
-        (#doc{atts = Atts}) -> Atts /= []
-    end,
-
-    case (VDFuns /= []) orelse lists:any(ValidatePred, Docs2) of
-        true ->
-            % lookup the doc by id and get the most recent
-            Ids = [Id || [#doc{id = Id} | _] <- DocBuckets],
-            ExistingDocs = get_full_doc_infos(Db, Ids),
-            {DocBuckets2, DocErrors} = PVFun(Db, DocBuckets, ExistingDocs),
-             % remove empty buckets
-            DocBuckets3 = [Bucket || Bucket <- DocBuckets2, Bucket /= []],
-            {ok, DocBuckets3, NonRepDocs, DocErrors};
-        false ->
-            {ok, DocBuckets, NonRepDocs, []}
-    end.
-
-
-set_new_att_revpos(#doc{revs={RevPos,_Revs},atts=Atts0}=Doc) ->
-    Atts = lists:map(
-        fun(Att) ->
-            case couch_att:fetch(data, Att) of
-                % already commited to disk, don't set new rev
-                {stream, _} -> Att;
-                {Fd, _} when is_pid(Fd) -> Att;
-                % write required so update RevPos
-                _ -> couch_att:store(revpos, RevPos+1, Att)
-            end
-        end, Atts0),
-    Doc#doc{atts = Atts}.
-
-
-doc_flush_atts(Db, Doc) ->
-    Doc#doc{atts=[couch_att:flush(Db, Att) || Att <- Doc#doc.atts]}.
-
-
-compressible_att_type(MimeType) when is_binary(MimeType) ->
-    compressible_att_type(?b2l(MimeType));
-compressible_att_type(MimeType) ->
-    TypeExpList = re:split(
-        config:get("attachments", "compressible_types", ""),
-        "\\s*,\\s*",
-        [{return, list}]
-    ),
-    lists:any(
-        fun(TypeExp) ->
-            Regexp = ["^\\s*", re:replace(TypeExp, "\\*", ".*"),
-                "(?:\\s*;.*?)?\\s*", $$],
-            re:run(MimeType, Regexp, [caseless]) =/= nomatch
-        end,
-        [T || T <- TypeExpList, T /= []]
-    ).
-
-% From RFC 2616 3.6.1 - Chunked Transfer Coding
-%
-%   In other words, the origin server is willing to accept
-%   the possibility that the trailer fields might be silently
-%   discarded along the path to the client.
-%
-% I take this to mean that if "Trailers: Content-MD5\r\n"
-% is present in the request, but there is no Content-MD5
-% trailer, we're free to ignore this inconsistency and
-% pretend that no Content-MD5 exists.
-with_stream(Db, Att, Fun) ->
-    [InMd5, Type, Enc] = couch_att:fetch([md5, type, encoding], Att),
-    BufferSize = list_to_integer(
-        config:get("couchdb", "attachment_stream_buffer_size", "4096")),
-    Options = case (Enc =:= identity) andalso compressible_att_type(Type) of
-        true ->
-            CompLevel = list_to_integer(
-                config:get("attachments", "compression_level", "0")
-            ),
-            [
-                {buffer_size, BufferSize},
-                {encoding, gzip},
-                {compression_level, CompLevel}
-            ];
-        _ ->
-            [{buffer_size, BufferSize}]
-    end,
-    {ok, OutputStream} = open_write_stream(Db, Options),
-    ReqMd5 = case Fun(OutputStream) of
-        {md5, FooterMd5} ->
-            case InMd5 of
-                md5_in_footer -> FooterMd5;
-                _ -> InMd5
-            end;
-        _ ->
-            InMd5
-    end,
-    {StreamEngine, Len, IdentityLen, Md5, IdentityMd5} =
-        couch_stream:close(OutputStream),
-    couch_util:check_md5(IdentityMd5, ReqMd5),
-    {AttLen, DiskLen, NewEnc} = case Enc of
-    identity ->
-        case {Md5, IdentityMd5} of
-        {Same, Same} ->
-            {Len, IdentityLen, identity};
-        _ ->
-            {Len, IdentityLen, gzip}
-        end;
-    gzip ->
-        case couch_att:fetch([att_len, disk_len], Att) of
-            [AL, DL] when AL =:= undefined orelse DL =:= undefined ->
-                % Compressed attachment uploaded through the standalone API.
-                {Len, Len, gzip};
-            [AL, DL] ->
-                % This case is used for efficient push-replication, where a
-                % compressed attachment is located in the body of multipart
-                % content-type request.
-                {AL, DL, gzip}
-        end
-    end,
-    couch_att:store([
-        {data, {stream, StreamEngine}},
-        {att_len, AttLen},
-        {disk_len, DiskLen},
-        {md5, Md5},
-        {encoding, NewEnc}
-    ], Att).
-
-
-open_write_stream(Db, Options) ->
-    couch_db_engine:open_write_stream(Db, Options).
-
-
-open_read_stream(Db, AttState) ->
-    couch_db_engine:open_read_stream(Db, AttState).
-
-
-is_active_stream(Db, StreamEngine) ->
-    couch_db_engine:is_active_stream(Db, StreamEngine).
-
-
-calculate_start_seq(_Db, _Node, Seq) when is_integer(Seq) ->
-    Seq;
-calculate_start_seq(Db, Node, {Seq, Uuid}) ->
-    % Treat the current node as the epoch node
-    calculate_start_seq(Db, Node, {Seq, Uuid, Node});
-calculate_start_seq(Db, _Node, {Seq, {split, Uuid}, EpochNode}) ->
-    case is_owner(EpochNode, Seq, get_epochs(Db)) of
-        true ->
-            % Find last replicated sequence from split source to target
-            mem3_rep:find_split_target_seq(Db, EpochNode, Uuid, Seq);
-        false ->
-            couch_log:warning("~p calculate_start_seq not owner "
-                "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p, epochs: ~p",
-                [?MODULE, Db#db.name, Seq, Uuid, EpochNode, get_epochs(Db)]),
-            0
-    end;
-calculate_start_seq(Db, _Node, {Seq, Uuid, EpochNode}) ->
-    case is_prefix(Uuid, get_uuid(Db)) of
-        true ->
-            case is_owner(EpochNode, Seq, get_epochs(Db)) of
-                true -> Seq;
-                false ->
-                    couch_log:warning("~p calculate_start_seq not owner "
-                        "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p, epochs: ~p",
-                        [?MODULE, Db#db.name, Seq, Uuid, EpochNode,
-                            get_epochs(Db)]),
-                    0
-            end;
-        false ->
-            couch_log:warning("~p calculate_start_seq uuid prefix mismatch "
-                "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p",
-                [?MODULE, Db#db.name, Seq, Uuid, EpochNode]),
-            %% The file was rebuilt, most likely in a different
-            %% order, so rewind.
-            0
-    end;
-calculate_start_seq(Db, _Node, {replace, OriginalNode, Uuid, Seq}) ->
-    case is_prefix(Uuid, couch_db:get_uuid(Db)) of
-        true ->
-            try
-                start_seq(get_epochs(Db), OriginalNode, Seq)
-            catch throw:epoch_mismatch ->
-                couch_log:warning("~p start_seq duplicate uuid on node: ~p "
-                    "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p",
-                    [?MODULE, node(), Db#db.name, Seq, Uuid, OriginalNode]),
-                0
-            end;
-        false ->
-            {replace, OriginalNode, Uuid, Seq}
-    end.
-
-
-validate_epochs(Epochs) ->
-    %% Assert uniqueness.
-    case length(Epochs) == length(lists:ukeysort(2, Epochs)) of
-        true  -> ok;
-        false -> erlang:error(duplicate_epoch)
-    end,
-    %% Assert order.
-    case Epochs == lists:sort(fun({_, A}, {_, B}) -> B =< A end, Epochs) of
-        true  -> ok;
-        false -> erlang:error(epoch_order)
-    end.
-
-
-is_prefix(Pattern, Subject) ->
-     binary:longest_common_prefix([Pattern, Subject]) == size(Pattern).
-
-
-is_owner(Node, Seq, Epochs) ->
-    Node =:= owner_of(Epochs, Seq).
-
-
-owner_of(Db, Seq) when not is_list(Db) ->
-    owner_of(get_epochs(Db), Seq);
-owner_of([], _Seq) ->
-    undefined;
-owner_of([{EpochNode, EpochSeq} | _Rest], Seq) when Seq > EpochSeq ->
-    EpochNode;
-owner_of([_ | Rest], Seq) ->
-    owner_of(Rest, Seq).
-
-
-start_seq([{OrigNode, EpochSeq} | _], OrigNode, Seq) when Seq > EpochSeq ->
-    %% OrigNode is the owner of the Seq so we can safely stream from there
-    Seq;
-start_seq([{_, NewSeq}, {OrigNode, _} | _], OrigNode, Seq) when Seq > NewSeq ->
-    %% We transferred this file before Seq was written on OrigNode, so we need
-    %% to stream from the beginning of the next epoch. Note that it is _not_
-    %% necessary for the current node to own the epoch beginning at NewSeq
-    NewSeq;
-start_seq([_ | Rest], OrigNode, Seq) ->
-    start_seq(Rest, OrigNode, Seq);
-start_seq([], _OrigNode, _Seq) ->
-    throw(epoch_mismatch).
-
-
-fold_docs(Db, UserFun, UserAcc) ->
-    fold_docs(Db, UserFun, UserAcc, []).
-
-fold_docs(Db, UserFun, UserAcc, Options) ->
-    couch_db_engine:fold_docs(Db, UserFun, UserAcc, Options).
-
-
-fold_local_docs(Db, UserFun, UserAcc, Options) ->
-    couch_db_engine:fold_local_docs(Db, UserFun, UserAcc, Options).
-
-
-fold_design_docs(Db, UserFun, UserAcc, Options1) ->
-    Options2 = set_design_doc_keys(Options1),
-    couch_db_engine:fold_docs(Db, UserFun, UserAcc, Options2).
-
-
-fold_changes(Db, StartSeq, UserFun, UserAcc) ->
-    fold_changes(Db, StartSeq, UserFun, UserAcc, []).
-
-
-fold_changes(Db, StartSeq, UserFun, UserAcc, Opts) ->
-    couch_db_engine:fold_changes(Db, StartSeq, UserFun, UserAcc, Opts).
-
-
-fold_purge_infos(Db, StartPurgeSeq, Fun, Acc) ->
-    fold_purge_infos(Db, StartPurgeSeq, Fun, Acc, []).
-
-
-fold_purge_infos(Db, StartPurgeSeq, UFun, UAcc, Opts) ->
-    couch_db_engine:fold_purge_infos(Db, StartPurgeSeq, UFun, UAcc, Opts).
-
-
-count_changes_since(Db, SinceSeq) ->
-    couch_db_engine:count_changes_since(Db, SinceSeq).
-
-
-%%% Internal function %%%
-open_doc_revs_int(Db, IdRevs, Options) ->
-    Ids = [Id || {Id, _Revs} <- IdRevs],
-    LookupResults = get_full_doc_infos(Db, Ids),
-    lists:zipwith(
-        fun({Id, Revs}, Lookup) ->
-            case Lookup of
-            #full_doc_info{rev_tree=RevTree} ->
-                {FoundRevs, MissingRevs} =
-                case Revs of
-                all ->
-                    {couch_key_tree:get_all_leafs(RevTree), []};
-                _ ->
-                    case lists:member(latest, Options) of
-                    true ->
-                        couch_key_tree:get_key_leafs(RevTree, Revs);
-                    false ->
-                        couch_key_tree:get(RevTree, Revs)
-                    end
-                end,
-                FoundResults =
-                lists:map(fun({Value, {Pos, [Rev|_]}=FoundRevPath}) ->
-                    case Value of
-                    ?REV_MISSING ->
-                        % we have the rev in our list but know nothing about it
-                        {{not_found, missing}, {Pos, Rev}};
-                    #leaf{deleted=IsDeleted, ptr=SummaryPtr} ->
-                        {ok, make_doc(Db, Id, IsDeleted, SummaryPtr, FoundRevPath)}
-                    end
-                end, FoundRevs),
-                Results = FoundResults ++ [{{not_found, missing}, MissingRev} || MissingRev <- MissingRevs],
-                {ok, Results};
-            not_found when Revs == all ->
-                {ok, []};
-            not_found ->
-                {ok, [{{not_found, missing}, Rev} || Rev <- Revs]}
-            end
-        end,
-        IdRevs, LookupResults).
-
-open_doc_int(Db, <<?LOCAL_DOC_PREFIX, _/binary>> = Id, Options) ->
-    case couch_db_engine:open_local_docs(Db, [Id]) of
-    [#doc{} = Doc] ->
-        apply_open_options({ok, Doc}, Options);
-    [not_found] ->
-        {not_found, missing}
-    end;
-open_doc_int(Db, #doc_info{id=Id,revs=[RevInfo|_]}=DocInfo, Options) ->
-    #rev_info{deleted=IsDeleted,rev={Pos,RevId},body_sp=Bp} = RevInfo,
-    Doc = make_doc(Db, Id, IsDeleted, Bp, {Pos,[RevId]}),
-    apply_open_options(
-       {ok, Doc#doc{meta=doc_meta_info(DocInfo, [], Options)}}, Options);
-open_doc_int(Db, #full_doc_info{id=Id,rev_tree=RevTree}=FullDocInfo, Options) ->
-    #doc_info{revs=[#rev_info{deleted=IsDeleted,rev=Rev,body_sp=Bp}|_]} =
-        DocInfo = couch_doc:to_doc_info(FullDocInfo),
-    {[{_, RevPath}], []} = couch_key_tree:get(RevTree, [Rev]),
-    Doc = make_doc(Db, Id, IsDeleted, Bp, RevPath),
-    apply_open_options(
-        {ok, Doc#doc{meta=doc_meta_info(DocInfo, RevTree, Options)}}, Options);
-open_doc_int(Db, Id, Options) ->
-    case get_full_doc_info(Db, Id) of
-    #full_doc_info{} = FullDocInfo ->
-        open_doc_int(Db, FullDocInfo, Options);
-    not_found ->
-        {not_found, missing}
-    end.
-
-doc_meta_info(#doc_info{high_seq=Seq,revs=[#rev_info{rev=Rev}|RestInfo]}, RevTree, Options) ->
-    case lists:member(revs_info, Options) of
-    false -> [];
-    true ->
-        {[{Pos, RevPath}],[]} =
-            couch_key_tree:get_full_key_paths(RevTree, [Rev]),
-
-        [{revs_info, Pos, lists:map(
-            fun({Rev1, ?REV_MISSING}) ->
-                {Rev1, missing};
-            ({Rev1, Leaf}) ->
-                case Leaf#leaf.deleted of
-                true ->
-                    {Rev1, deleted};
-                false ->
-                    {Rev1, available}
-                end
-            end, RevPath)}]
-    end ++
-    case lists:member(conflicts, Options) of
-    false -> [];
-    true ->
-        case [Rev1 || #rev_info{rev=Rev1,deleted=false} <- RestInfo] of
-        [] -> [];
-        ConflictRevs -> [{conflicts, ConflictRevs}]
-        end
-    end ++
-    case lists:member(deleted_conflicts, Options) of
-    false -> [];
-    true ->
-        case [Rev1 || #rev_info{rev=Rev1,deleted=true} <- RestInfo] of
-        [] -> [];
-        DelConflictRevs -> [{deleted_conflicts, DelConflictRevs}]
-        end
-    end ++
-    case lists:member(local_seq, Options) of
-    false -> [];
-    true -> [{local_seq, Seq}]
-    end.
-
-
-make_doc(_Db, Id, Deleted, nil = _Bp, RevisionPath) ->
-    #doc{
-        id = Id,
-        revs = RevisionPath,
-        body = [],
-        atts = [],
-        deleted = Deleted
-    };
-make_doc(#db{} = Db, Id, Deleted, Bp, {Pos, Revs}) ->
-    RevsLimit = get_revs_limit(Db),
-    Doc0 = couch_db_engine:read_doc_body(Db, #doc{
-        id = Id,
-        revs = {Pos, lists:sublist(Revs, 1, RevsLimit)},
-        body = Bp,
-        deleted = Deleted
-    }),
-    Doc1 = case Doc0#doc.atts of
-        BinAtts when is_binary(BinAtts) ->
-            Doc0#doc{
-                atts = couch_compress:decompress(BinAtts)
-            };
-        ListAtts when is_list(ListAtts) ->
-            Doc0
-    end,
-    after_doc_read(Db, Doc1#doc{
-        atts = [couch_att:from_disk_term(Db, T) || T <- Doc1#doc.atts]
-    }).
-
-
-after_doc_read(#db{} = Db, Doc) ->
-    DocWithBody = couch_doc:with_ejson_body(Doc),
-    couch_db_plugin:after_doc_read(Db, DocWithBody).
-
-increment_stat(#db{options = Options}, Stat) ->
-    case lists:member(sys_db, Options) of
-    true ->
-        ok;
-    false ->
-        couch_stats:increment_counter(Stat)
-    end.
-
--spec normalize_dbname(list() | binary()) -> binary().
-
-normalize_dbname(DbName) when is_list(DbName) ->
-    normalize_dbname(list_to_binary(DbName));
-normalize_dbname(DbName) when is_binary(DbName) ->
-    mem3:dbname(couch_util:drop_dot_couch_ext(DbName)).
-
-
--spec dbname_suffix(list() | binary()) -> binary().
-
-dbname_suffix(DbName) ->
-    filename:basename(normalize_dbname(DbName)).
-
-
-validate_dbname(DbName) when is_list(DbName) ->
-    validate_dbname(?l2b(DbName));
-validate_dbname(DbName) when is_binary(DbName) ->
-    Normalized = normalize_dbname(DbName),
-    couch_db_plugin:validate_dbname(
-        DbName, Normalized, fun validate_dbname_int/2).
-
-validate_dbname_int(DbName, Normalized) when is_binary(DbName) ->
-    DbNoExt = couch_util:drop_dot_couch_ext(DbName),
-    case re:run(DbNoExt, ?DBNAME_REGEX, [{capture,none}, dollar_endonly]) of
-        match ->
-            ok;
-        nomatch ->
-            case is_system_db_name(Normalized) of
-                true -> ok;
-                false -> {error, {illegal_database_name, DbName}}
-            end
-    end.
-
-is_system_db_name(DbName) when is_list(DbName) ->
-    is_system_db_name(?l2b(DbName));
-is_system_db_name(DbName) when is_binary(DbName) ->
-    Normalized = normalize_dbname(DbName),
-    Suffix = filename:basename(Normalized),
-    case {filename:dirname(Normalized), lists:member(Suffix, ?SYSTEM_DATABASES)} of
-        {<<".">>, Result} -> Result;
-        {_Prefix, false} -> false;
-        {Prefix, true} ->
-            ReOpts =  [{capture,none}, dollar_endonly],
-            re:run(Prefix, ?DBNAME_REGEX, ReOpts) == match
-    end.
-
-set_design_doc_keys(Options1) ->
-    Dir = case lists:keyfind(dir, 1, Options1) of
-        {dir, D0} -> D0;
-        _ -> fwd
-    end,
-    Options2 = set_design_doc_start_key(Options1, Dir),
-    set_design_doc_end_key(Options2, Dir).
-
-
--define(FIRST_DDOC_KEY, <<"_design/">>).
--define(LAST_DDOC_KEY, <<"_design0">>).
-
-
-set_design_doc_start_key(Options, fwd) ->
-    Key1 = couch_util:get_value(start_key, Options, ?FIRST_DDOC_KEY),
-    Key2 = case Key1 < ?FIRST_DDOC_KEY of
-        true -> ?FIRST_DDOC_KEY;
-        false -> Key1
-    end,
-    lists:keystore(start_key, 1, Options, {start_key, Key2});
-set_design_doc_start_key(Options, rev) ->
-    Key1 = couch_util:get_value(start_key, Options, ?LAST_DDOC_KEY),
-    Key2 = case Key1 > ?LAST_DDOC_KEY of
-        true -> ?LAST_DDOC_KEY;
-        false -> Key1
-    end,
-    lists:keystore(start_key, 1, Options, {start_key, Key2}).
-
-
-set_design_doc_end_key(Options, fwd) ->
-    case couch_util:get_value(end_key_gt, Options) of
-        undefined ->
-            Key1 = couch_util:get_value(end_key, Options, ?LAST_DDOC_KEY),
-            Key2 = case Key1 > ?LAST_DDOC_KEY of
-                true -> ?LAST_DDOC_KEY;
-                false -> Key1
-            end,
-            lists:keystore(end_key, 1, Options, {end_key, Key2});
-        EKeyGT ->
-            Key2 = case EKeyGT > ?LAST_DDOC_KEY of
-                true -> ?LAST_DDOC_KEY;
-                false -> EKeyGT
-            end,
-            lists:keystore(end_key_gt, 1, Options, {end_key_gt, Key2})
-    end;
-set_design_doc_end_key(Options, rev) ->
-    case couch_util:get_value(end_key_gt, Options) of
-        undefined ->
-            Key1 = couch_util:get_value(end_key, Options, ?LAST_DDOC_KEY),
-            Key2 = case Key1 < ?FIRST_DDOC_KEY of
-                true -> ?FIRST_DDOC_KEY;
-                false -> Key1
-            end,
-            lists:keystore(end_key, 1, Options, {end_key, Key2});
-        EKeyGT ->
-            Key2 = case EKeyGT < ?FIRST_DDOC_KEY of
-                true -> ?FIRST_DDOC_KEY;
-                false -> EKeyGT
-            end,
-            lists:keystore(end_key_gt, 1, Options, {end_key_gt, Key2})
-    end.
-
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-setup_all() ->
-    ok = meck:new(couch_epi, [passthrough]),
-    ok = meck:expect(couch_epi, decide, fun(_, _, _, _, _) -> no_decision end),
-    ok.
-
-teardown_all(_) ->
-    meck:unload().
-
-setup() ->
-    meck:reset([couch_epi]).
-
-teardown(_) ->
-    ok.
-
-validate_dbname_success_test_() ->
-    Cases =
-        generate_cases_with_shards("long/co$mplex-/path+/something")
-        ++ generate_cases_with_shards("something")
-        ++ lists:append(
-            [generate_cases_with_shards(?b2l(SystemDb))
-                || SystemDb <- ?SYSTEM_DATABASES]),
-    {
-        setup,
-        fun setup_all/0,
-        fun teardown_all/1,
-        {
-            foreach,
-            fun setup/0,
-            fun teardown/1,
-            [should_pass_validate_dbname(A) || {_, A} <- Cases]
-        }
-    }.
-
-validate_dbname_fail_test_() ->
-    Cases = generate_cases("_long/co$mplex-/path+/_something")
-       ++ generate_cases("_something")
-       ++ generate_cases_with_shards("long/co$mplex-/path+/_something#")
-       ++ generate_cases_with_shards("long/co$mplex-/path+/some.thing")
-       ++ generate_cases("!abcdefg/werwej/_users")
-       ++ generate_cases_with_shards("!abcdefg/werwej/_users"),
-    {
-        setup,
-        fun setup_all/0,
-        fun teardown_all/1,
-        {
-            foreach,
-            fun setup/0,
-            fun teardown/1,
-            [should_fail_validate_dbname(A) || {_, A} <- Cases]
-        }
-    }.
-
-normalize_dbname_test_() ->
-    Cases = generate_cases_with_shards("long/co$mplex-/path+/_something")
-       ++ generate_cases_with_shards("_something"),
-    WithExpected = [{?l2b(filename:rootname(A)), B} || {A, B} <- Cases],
-    [{test_name({Expected, Db}), ?_assertEqual(Expected, normalize_dbname(Db))}
-        || {Expected, Db} <- WithExpected].
-
-dbname_suffix_test_() ->
-    Cases = generate_cases_with_shards("long/co$mplex-/path+/_something")
-       ++ generate_cases_with_shards("_something"),
-    WithExpected = [{?l2b(filename:basename(Arg)), Db} || {Arg, Db} <- Cases],
-    [{test_name({Expected, Db}), ?_assertEqual(Expected, dbname_suffix(Db))}
-        || {Expected, Db} <- WithExpected].
-
-is_system_db_name_test_() ->
-    Cases = lists:append([
-        generate_cases_with_shards("long/co$mplex-/path+/" ++ ?b2l(Db))
-            || Db <- ?SYSTEM_DATABASES]
-        ++ [generate_cases_with_shards(?b2l(Db)) || Db <- ?SYSTEM_DATABASES
-    ]),
-    WithExpected = [{?l2b(filename:basename(filename:rootname(Arg))), Db}
-        || {Arg, Db} <- Cases],
-    [{test_name({Expected, Db}) ++ " in ?SYSTEM_DATABASES",
-        ?_assert(is_system_db_name(Db))} || {Expected, Db} <- WithExpected].
-
-should_pass_validate_dbname(DbName) ->
-    {test_name(DbName), ?_assertEqual(ok, validate_dbname(DbName))}.
-
-should_fail_validate_dbname(DbName) ->
-    {test_name(DbName), ?_test(begin
-        Result = validate_dbname(DbName),
-        ?assertMatch({error, {illegal_database_name, _}}, Result),
-        {error, {illegal_database_name, FailedDbName}} = Result,
-        ?assertEqual(to_binary(DbName), FailedDbName),
-        ok
-    end)}.
-
-calculate_start_seq_test_() ->
-    {
-        setup,
-        fun setup_start_seq_all/0,
-        fun teardown_start_seq_all/1,
-        {
-            foreach,
-            fun setup_start_seq/0,
-            fun teardown_start_seq/1,
-            [
-                t_calculate_start_seq_uuid_mismatch(),
-                t_calculate_start_seq_is_owner(),
-                t_calculate_start_seq_not_owner(),
-                t_calculate_start_seq_raw(),
-                t_calculate_start_seq_epoch_mismatch()
-            ]
-        }
-    }.
-
-setup_start_seq_all() ->
-    meck:new(couch_db_engine, [passthrough]),
-    meck:expect(couch_db_engine, get_uuid, fun(_) -> <<"foo">> end),
-    ok = meck:expect(couch_log, warning, 2, ok),
-    Epochs = [
-        {node2, 10},
-        {node1, 1}
-    ],
-    meck:expect(couch_db_engine, get_epochs, fun(_) -> Epochs end).
-
-teardown_start_seq_all(_) ->
-    meck:unload().
-
-setup_start_seq() ->
-    meck:reset([
-        couch_db_engine,
-        couch_log
-    ]).
-
-teardown_start_seq(_) ->
-    ok.
-
-t_calculate_start_seq_uuid_mismatch() ->
-    ?_test(begin
-        Db = test_util:fake_db([]),
-        Seq = calculate_start_seq(Db, node2, {15, <<"baz">>}),
-        ?assertEqual(0, Seq)
-    end).
-
-t_calculate_start_seq_is_owner() ->
-    ?_test(begin
-        Db = test_util:fake_db([]),
-        Seq = calculate_start_seq(Db, node2, {15, <<"foo">>}),
-        ?assertEqual(15, Seq)
-    end).
-
-t_calculate_start_seq_not_owner() ->
-    ?_test(begin
-        Db = test_util:fake_db([]),
-        Seq = calculate_start_seq(Db, node1, {15, <<"foo">>}),
-        ?assertEqual(0, Seq)
-    end).
-
-t_calculate_start_seq_raw() ->
-    ?_test(begin
-        Db = test_util:fake_db([]),
-        Seq = calculate_start_seq(Db, node1, 13),
-        ?assertEqual(13, Seq)
-    end).
-
-t_calculate_start_seq_epoch_mismatch() ->
-    ?_test(begin
-        Db = test_util:fake_db([]),
-        SeqIn = {replace, not_this_node, get_uuid(Db), 42},
-        Seq = calculate_start_seq(Db, node1, SeqIn),
-        ?assertEqual(0, Seq)
-    end).
-
-is_owner_test() ->
-    ?assertNot(is_owner(foo, 1, [])),
-    ?assertNot(is_owner(foo, 1, [{foo, 1}])),
-    ?assert(is_owner(foo, 2, [{foo, 1}])),
-    ?assert(is_owner(foo, 50, [{bar, 100}, {foo, 1}])),
-    ?assert(is_owner(foo, 50, [{baz, 200}, {bar, 100}, {foo, 1}])),
-    ?assert(is_owner(bar, 150, [{baz, 200}, {bar, 100}, {foo, 1}])),
-    ?assertError(duplicate_epoch, validate_epochs([{foo, 1}, {bar, 1}])),
-    ?assertError(epoch_order, validate_epochs([{foo, 100}, {bar, 200}])).
-
-to_binary(DbName) when is_list(DbName) ->
-    ?l2b(DbName);
-to_binary(DbName) when is_binary(DbName) ->
-    DbName.
-
-test_name({Expected, DbName}) ->
-    lists:flatten(io_lib:format("~p -> ~p", [DbName, Expected]));
-test_name(DbName) ->
-    lists:flatten(io_lib:format("~p", [DbName])).
-
-generate_cases_with_shards(DbName) ->
-    DbNameWithShard = add_shard(DbName),
-    DbNameWithShardAndExtension = add_shard(DbName) ++ ".couch",
-    Cases = [
-        DbName, ?l2b(DbName),
-        DbNameWithShard, ?l2b(DbNameWithShard),
-        DbNameWithShardAndExtension, ?l2b(DbNameWithShardAndExtension)
-    ],
-    [{DbName, Case} || Case <- Cases].
-
-add_shard(DbName) ->
-    "shards/00000000-3fffffff/" ++ DbName ++ ".1415960794".
-
-generate_cases(DbName) ->
-    [{DbName, DbName}, {DbName, ?l2b(DbName)}].
-
--endif.
diff --git a/src/couch/src/couch_db_engine.erl b/src/couch/src/couch_db_engine.erl
deleted file mode 100644
index 9adc992..0000000
--- a/src/couch/src/couch_db_engine.erl
+++ /dev/null
@@ -1,1105 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_engine).
-
-
--include("couch_db.hrl").
--include("couch_db_int.hrl").
-
-
--type filepath() :: iolist().
--type docid() :: binary().
--type rev() :: {non_neg_integer(), binary()}.
--type revs() :: [rev()].
--type json() :: any().
--type uuid() :: binary().
--type purge_seq() :: non_neg_integer().
-
--type doc_pair() :: {
-        #full_doc_info{} | not_found,
-        #full_doc_info{} | not_found
-    }.
-
--type doc_pairs() :: [doc_pair()].
-
--type db_open_options() :: [
-        create
-    ].
-
--type delete_options() :: [
-        {context, delete | compaction} |
-        sync
-    ].
-
--type purge_info() :: {purge_seq(), uuid(), docid(), revs()}.
--type epochs() :: [{Node::atom(), UpdateSeq::non_neg_integer()}].
--type size_info() :: [{Name::atom(), Size::non_neg_integer()}].
--type partition_info() :: [
-    {partition, Partition::binary()} |
-    {doc_count, DocCount::non_neg_integer()} |
-    {doc_del_count, DocDelCount::non_neg_integer()} |
-    {sizes, size_info()}
-].
-
--type write_stream_options() :: [
-        {buffer_size, Size::pos_integer()} |
-        {encoding, atom()} |
-        {compression_level, non_neg_integer()}
-    ].
-
--type doc_fold_options() :: [
-        {start_key, Key::any()} |
-        {end_key, Key::any()} |
-        {end_key_gt, Key::any()} |
-        {dir, fwd | rev} |
-        include_reductions |
-        include_deleted
-    ].
-
--type changes_fold_options() :: [
-        {dir, fwd | rev}
-    ].
-
--type purge_fold_options() :: [
-        {start_key, Key::any()} |
-        {end_key, Key::any()} |
-        {end_key_gt, Key::any()} |
-        {dir, fwd | rev}
-    ].
-
--type db_handle() :: any().
-
--type doc_fold_fun() :: fun((#full_doc_info{}, UserAcc::any()) ->
-        {ok, NewUserAcc::any()} |
-        {stop, NewUserAcc::any()}).
-
--type local_doc_fold_fun() :: fun((#doc{}, UserAcc::any()) ->
-        {ok, NewUserAcc::any()} |
-        {stop, NewUserAcc::any()}).
-
--type changes_fold_fun() :: fun((#doc_info{}, UserAcc::any()) ->
-        {ok, NewUserAcc::any()} |
-        {stop, NewUserAcc::any()}).
-
--type purge_fold_fun() :: fun((purge_info(), UserAcc::any()) ->
-        {ok, NewUserAcc::any()} |
-        {stop, NewUserAcc::any()}).
-
-
-% This is called by couch_server to determine which
-% engine should be used for the given database. DbPath
-% is calculated based on the DbName and the configured
-% extension for a given engine. The first engine to
-% return true is the engine that will be used for the
-% database.
--callback exists(DbPath::filepath()) -> boolean().
-
-
-% This is called by couch_server to delete a database. It
-% is called from inside the couch_server process which
-% means that the storage engine does not have to guarantee
-% its own consistency checks when executing in this
-% context. Although since this is executed in the context
-% of couch_server it should return relatively quickly.
--callback delete(
-            RootDir::filepath(),
-            DbPath::filepath(),
-            DelOpts::delete_options()) ->
-        ok | {error, Reason::atom()}.
-
-
-% This function can be called from multiple contexts. It
-% will either be called just before a call to delete/3 above
-% or when a compaction is cancelled which executes in the
-% context of a couch_db_updater process. It is intended to
-% remove any temporary files used during compaction that
-% may be used to recover from a failed compaction swap.
--callback delete_compaction_files(
-            RootDir::filepath(),
-            DbPath::filepath(),
-            DelOpts::delete_options()) ->
-        ok.
-
-
-% This is called from the couch_db_updater:init/1 context. As
-% such this means that it is guaranteed to only have one process
-% executing for a given DbPath argument (ie, opening a given
-% database is guaranteed to only happen in a single process).
-% However, multiple process may be trying to open different
-% databases concurrently so if a database requires a shared
-% resource that will require concurrency control at the storage
-% engine layer.
-%
-% The returned DbHandle should be a term that can be freely
-% copied between processes and accessed concurrently. However
-% its guaranteed that the handle will only ever be mutated
-% in a single threaded context (ie, within the couch_db_updater
-% process).
--callback init(DbPath::filepath(), db_open_options()) ->
-    {ok, DbHandle::db_handle()}.
-
-
-% This is called in the context of couch_db_updater:terminate/2
-% and as such has the same properties for init/2. It's guaranteed
-% to be consistent for a given database but may be called by many
-% databases concurrently.
--callback terminate(Reason::any(), DbHandle::db_handle()) -> Ignored::any().
-
-
-% This is called in the context of couch_db_updater:handle_call/3
-% for any message that is unknown. It can be used to handle messages
-% from asynchronous processes like the engine's compactor if it has one.
--callback handle_db_updater_call(Msg::any(), DbHandle::db_handle()) ->
-        {reply, Resp::any(), NewDbHandle::db_handle()} |
-        {stop, Reason::any(), Resp::any(), NewDbHandle::db_handle()}.
-
-
-% This is called in the context of couch_db_updater:handle_info/2
-% and has the same properties as handle_call/3.
--callback handle_db_updater_info(Msg::any(), DbHandle::db_handle()) ->
-    {noreply, NewDbHandle::db_handle()} |
-    {noreply, NewDbHandle::db_handle(), Timeout::timeout()} |
-    {stop, Reason::any(), NewDbHandle::db_handle()}.
-
-
-% These functions are called by any process opening or closing
-% a database. As such they need to be able to handle being
-% called concurrently. For example, the legacy engine uses these
-% to add monitors to the main engine process.
--callback incref(DbHandle::db_handle()) -> {ok, NewDbHandle::db_handle()}.
--callback decref(DbHandle::db_handle()) -> ok.
--callback monitored_by(DbHande::db_handle()) -> [pid()].
-
-
-% This is called in the context of couch_db_updater:handle_info/2
-% and should return the timestamp of the last activity of
-% the database. If a storage has no notion of activity or the
-% value would be hard to report its ok to just return the
-% result of os:timestamp/0 as this will just disable idle
-% databases from automatically closing.
--callback last_activity(DbHandle::db_handle()) -> erlang:timestamp().
-
-
-% All of the get_* functions may be called from many
-% processes concurrently.
-
-% The database should make a note of the update sequence when it
-% was last compacted. If the database doesn't need compacting it
-% can just hard code a return value of 0.
--callback get_compacted_seq(DbHandle::db_handle()) ->
-            CompactedSeq::non_neg_integer().
-
-
-% The number of documents in the database which have all leaf
-% revisions marked as deleted.
--callback get_del_doc_count(DbHandle::db_handle()) ->
-            DelDocCount::non_neg_integer().
-
-
-% This number is reported in the database info properties and
-% as such can be any JSON value.
--callback get_disk_version(DbHandle::db_handle()) -> Version::json().
-
-
-% The number of documents in the database that have one or more
-% leaf revisions not marked as deleted.
--callback get_doc_count(DbHandle::db_handle()) -> DocCount::non_neg_integer().
-
-
-% The epochs track which node owned the database starting at
-% a given update sequence. Each time a database is opened it
-% should look at the epochs. If the most recent entry is not
-% for the current node it should add an entry that will be
-% written the next time a write is performed. An entry is
-% simply a {node(), CurrentUpdateSeq} tuple.
--callback get_epochs(DbHandle::db_handle()) -> Epochs::epochs().
-
-
-% Get the current purge sequence known to the engine. This
-% value should be updated during calls to purge_docs.
--callback get_purge_seq(DbHandle::db_handle()) -> purge_seq().
-
-
-% Get the oldest purge sequence known to the engine
--callback get_oldest_purge_seq(DbHandle::db_handle()) -> purge_seq().
-
-
-% Get the purged infos limit. This should just return the last
-% value that was passed to set_purged_docs_limit/2.
--callback get_purge_infos_limit(DbHandle::db_handle()) -> pos_integer().
-
-
-% Get the revision limit. This should just return the last
-% value that was passed to set_revs_limit/2.
--callback get_revs_limit(DbHandle::db_handle()) -> RevsLimit::pos_integer().
-
-
-% Get the current security properties. This should just return
-% the last value that was passed to set_security/2.
--callback get_security(DbHandle::db_handle()) -> SecProps::any().
-
-
-% Get the current properties.
--callback get_props(DbHandle::db_handle()) -> Props::[any()].
-
-
-% This information is displayed in the database info poperties. It
-% should just be a list of {Name::atom(), Size::non_neg_integer()}
-% tuples that will then be combined across shards. Currently,
-% various modules expect there to at least be values for:
-%
-%   file     - Number of bytes on disk
-%
-%   active   - Theoretical minimum number of bytes to store this db on disk
-%              which is used to guide decisions on compaction
-%
-%   external - Number of bytes that would be required to represent the
-%              contents outside of the database (for capacity and backup
-%              planning)
--callback get_size_info(DbHandle::db_handle()) -> SizeInfo::size_info().
-
-
-% This returns the information for the given partition.
-% It should just be a list of {Name::atom(), Size::non_neg_integer()}
-% It returns the partition name, doc count, deleted doc count and two sizes:
-%
-%   active   - Theoretical minimum number of bytes to store this partition on disk
-%
-%   external - Number of bytes that would be required to represent the
-%              contents of this partition outside of the database
--callback get_partition_info(DbHandle::db_handle(), Partition::binary()) ->
-    partition_info().
-
-
-% The current update sequence of the database. The update
-% sequence should be incrememnted for every revision added to
-% the database.
--callback get_update_seq(DbHandle::db_handle()) -> UpdateSeq::non_neg_integer().
-
-
-% Whenever a database is created it should generate a
-% persistent UUID for identification in case the shard should
-% ever need to be moved between nodes in a cluster.
--callback get_uuid(DbHandle::db_handle()) -> UUID::binary().
-
-
-% These functions are only called by couch_db_updater and
-% as such are guaranteed to be single threaded calls. The
-% database should simply store these values somewhere so
-% they can be returned by the corresponding get_* calls.
-
--callback set_revs_limit(DbHandle::db_handle(), RevsLimit::pos_integer()) ->
-        {ok, NewDbHandle::db_handle()}.
-
-
--callback set_purge_infos_limit(DbHandle::db_handle(), Limit::pos_integer()) ->
-        {ok, NewDbHandle::db_handle()}.
-
-
--callback set_security(DbHandle::db_handle(), SecProps::any()) ->
-        {ok, NewDbHandle::db_handle()}.
-
-
-% This function is only called by couch_db_updater and
-% as such is guaranteed to be single threaded calls. The
-% database should simply store provided property list
-% unaltered.
-
--callback set_props(DbHandle::db_handle(), Props::any()) ->
-        {ok, NewDbHandle::db_handle()}.
-
-
-% Set the current update sequence of the database. The intention is to use this
-% when copying a database such that the destination update sequence should
-% match exactly the source update sequence.
--callback set_update_seq(
-    DbHandle::db_handle(),
-    UpdateSeq::non_neg_integer()) ->
-    {ok, NewDbHandle::db_handle()}.
-
-
-% This function will be called by many processes concurrently.
-% It should return a #full_doc_info{} record or not_found for
-% every provided DocId in the order those DocId's appear in
-% the input.
-%
-% Traditionally this function will only return documents that
-% were present in the database when the DbHandle was retrieved
-% from couch_server. It is currently unknown what would break
-% if a storage engine deviated from that property.
--callback open_docs(DbHandle::db_handle(), DocIds::[docid()]) ->
-        [#full_doc_info{} | not_found].
-
-
-% This function will be called by many processes concurrently.
-% It should return a #doc{} record or not_found for every
-% provided DocId in the order they appear in the input.
-%
-% The same caveats around database snapshots from open_docs
-% apply to this function (although this function is called
-% rather less frequently so it may not be as big of an
-% issue).
--callback open_local_docs(DbHandle::db_handle(), DocIds::[docid()]) ->
-        [#doc{} | not_found].
-
-
-% This function will be called from many contexts concurrently.
-% The provided RawDoc is a #doc{} record that has its body
-% value set to the body value returned from write_doc_body/2.
-%
-% This API exists so that storage engines can store document
-% bodies externally from the #full_doc_info{} record (which
-% is the traditional approach and is recommended).
--callback read_doc_body(DbHandle::db_handle(), RawDoc::doc()) ->
-        doc().
-
-
-% This function will be called from many contexts concurrently.
-% If the storage engine has a purge_info() record for any of the
-% provided UUIDs, those purge_info() records should be returned. The
-% resulting list should have the same length as the input list of
-% UUIDs.
--callback load_purge_infos(DbHandle::db_handle(), [uuid()]) ->
-        [purge_info() | not_found].
-
-
-% This function is called concurrently by any client process
-% that is writing a document. It should accept a #doc{}
-% record and return a #doc{} record with a mutated body it
-% wishes to have written to disk by write_doc_body/2.
-%
-% This API exists so that storage engines can compress
-% document bodies in parallel by client processes rather
-% than forcing all compression to occur single threaded
-% in the context of the couch_db_updater process.
--callback serialize_doc(DbHandle::db_handle(), Doc::doc()) ->
-        doc().
-
-
-% This function is called in the context of a couch_db_updater
-% which means its single threaded for the given DbHandle.
-%
-% The returned #doc{} record should have its Body set to a value
-% that will be stored in the #full_doc_info{} record's revision
-% tree leaves which is passed to read_doc_body/2 above when
-% a client wishes to read a document.
-%
-% The BytesWritten return value is used to determine the number
-% of active bytes in the database which can is used to make
-% a determination of when to compact this database.
--callback write_doc_body(DbHandle::db_handle(), Doc::doc()) ->
-        {ok, FlushedDoc::doc(), BytesWritten::non_neg_integer()}.
-
-
-% This function is called from the context of couch_db_updater
-% and as such is guaranteed single threaded for the given
-% DbHandle.
-%
-% This is probably the most complicated function in the entire
-% API due to a few subtle behavior requirements required by
-% CouchDB's storage model.
-%
-% The Pairs argument is a list of pairs (2-tuples) of
-% #full_doc_info{} records. The first element of the pair is
-% the #full_doc_info{} that exists on disk. The second element
-% is the new version that should be written to disk. There are
-% two basic cases that should be followed:
-%
-%     1. {not_found, #full_doc_info{}} - A new document was created
-%     2. {#full_doc_info{}, #full_doc_info{}} - A document was updated
-%
-% The cases are fairly straight forward as long as proper
-% accounting for moving entries in the update sequence are accounted
-% for.
-%
-% The LocalDocs variable is applied separately. Its important to
-% note for new storage engine authors that these documents are
-% separate because they should *not* be included as part of the
-% changes index for the database.
-%
-% Traditionally an invocation of write_doc_infos should be all
-% or nothing in so much that if an error occurs (or the VM dies)
-% then the database doesn't retain any of the changes. However
-% as long as a storage engine maintains consistency this should
-% not be an issue as it has never been a guarantee and the
-% batches are non-deterministic (from the point of view of the
-% client).
--callback write_doc_infos(
-    DbHandle::db_handle(),
-    Pairs::doc_pairs(),
-    LocalDocs::[#doc{}]) ->
-        {ok, NewDbHandle::db_handle()}.
-
-
-% This function is called from the context of couch_db_updater
-% and as such is guaranteed single threaded for the given
-% DbHandle.
-%
-% Each doc_pair() is a 2-tuple of #full_doc_info{} records. The
-% first element of the pair is the #full_doc_info{} that exists
-% on disk. The second element is the new version that should be
-% written to disk. There are three basic cases that should be considered:
-%
-%     1. {#full_doc_info{}, #full_doc_info{}} - A document was partially purged
-%     2. {#full_doc_info{}, not_found} - A document was completely purged
-%     3. {not_found, not_found} - A no-op purge
-%
-% In case 1, non-tail-append engines may have to remove revisions
-% specifically rather than rely on compaction to remove them. Also
-% note that the new #full_doc_info{} will have a different update_seq
-% that will need to be reflected in the changes feed.
-%
-% In case 2 you'll notice is "purged completely" which
-% means it needs to be removed from the database including the
-% update sequence.
-%
-% In case 3 we just need to store the purge_info() to know that it
-% was processed even though it produced no changes to the database.
-%
-% The purge_info() tuples contain the purge_seq, uuid, docid and
-% revisions that were requested to be purged. This should be persisted
-% in such a way that we can efficiently load purge_info() by its UUID
-% as well as iterate over purge_info() entries in order of their PurgeSeq.
--callback purge_docs(DbHandle::db_handle(), [doc_pair()], [purge_info()]) ->
-        {ok, NewDbHandle::db_handle()}.
-
-
-% This function should be called from a single threaded context and
-% should be used to copy purge infos from on database to another
-% when copying a database
--callback copy_purge_infos(DbHandle::db_handle(), [purge_info()]) ->
-        {ok, NewDbHandle::db_handle()}.
-
-
-% This function is called in the context of couch_db_udpater and
-% as such is single threaded for any given DbHandle.
-%
-% This call is made periodically to ensure that the database has
-% stored all updates on stable storage. (ie, here is where you fsync).
--callback commit_data(DbHandle::db_handle()) ->
-        {ok, NewDbHande::db_handle()}.
-
-
-% This function is called by multiple processes concurrently.
-%
-% This function along with open_read_stream are part of the
-% attachments API. For the time being I'm leaving these mostly
-% undocumented. There are implementations of this in both the
-% legacy btree engine as well as the alternative engine
-% implementations for the curious, however this is a part of the
-% API for which I'd like feed back.
-%
-% Currently an engine can elect to not implement these API's
-% by throwing the atom not_supported.
--callback open_write_stream(
-    DbHandle::db_handle(),
-    Options::write_stream_options()) ->
-        {ok, pid()}.
-
-
-% See the documentation for open_write_stream
--callback open_read_stream(DbHandle::db_handle(), StreamDiskInfo::any()) ->
-        {ok, {Module::atom(), ReadStreamState::any()}}.
-
-
-% See the documentation for open_write_stream
--callback is_active_stream(DbHandle::db_handle(), ReadStreamState::any()) ->
-        boolean().
-
-
-% This funciton is called by many processes concurrently.
-%
-% This function is called to fold over the documents in
-% the database sorted by the raw byte collation order of
-% the document id. For each document id, the supplied user
-% function should be invoked with the first argument set
-% to the #full_doc_info{} record and the second argument
-% set to the current user supplied accumulator. The return
-% value of the user function is a 2-tuple of {Go, NewUserAcc}.
-% The NewUserAcc value should then replace the current
-% user accumulator. If Go is the atom ok, iteration over
-% documents should continue. If Go is the atom stop, then
-% iteration should halt and the return value should be
-% {ok, NewUserAcc}.
-%
-% Possible options to this function include:
-%
-%     1. start_key - Start iteration at the provided key or
-%        or just after if the key doesn't exist
-%     2. end_key - Stop iteration just after the provided key
-%     3. end_key_gt - Stop iteration prior to visiting the provided
-%        key
-%     4. dir - The atom fwd or rev. This is to be able to iterate
-%        over documents in reverse order. The logic for comparing
-%        start_key, end_key, and end_key_gt are then reversed (ie,
-%        when rev, start_key should be greater than end_key if the
-%        user wishes to see results)
-%     5. include_reductions - This is a hack for _all_docs since
-%        it currently relies on reductions to count an offset. This
-%        is a terrible hack that will need to be addressed by the
-%        API in the future. If this option is present the supplied
-%        user function expects three arguments, where the first
-%        argument is a #full_doc_info{} record, the second argument
-%        is the current list of reductions to the left of the current
-%        document, and the third argument is the current user
-%        accumulator. The return value from the user function is
-%        unaffected. However the final return value of the function
-%        should include the final total reductions as the second
-%        element of a 3-tuple. Like I said, this is a hack.
-%     6. include_deleted - By default deleted documents are not
-%        included in fold_docs calls. However in some special
-%        cases we do want to see them (as of now, just in couch_changes
-%        during the design document changes optimization)
-%
-% Historically, if a process calls this function repeatedly it
-% would see the same results returned even if there were concurrent
-% updates happening. However there doesn't seem to be any instance of
-% that actually happening so a storage engine that includes new results
-% between invocations shouldn't have any issues.
--callback fold_docs(
-    DbHandle::db_handle(),
-    UserFold::doc_fold_fun(),
-    UserAcc::any(),
-    doc_fold_options()) ->
-        {ok, LastUserAcc::any()}.
-
-
-% This function may be called by many processes concurrently.
-%
-% This should behave exactly the same as fold_docs/4 except that it
-% should only return local documents and the first argument to the
-% user function is a #doc{} record, not a #full_doc_info{}.
--callback fold_local_docs(
-    DbHandle::db_handle(),
-    UserFold::local_doc_fold_fun(),
-    UserAcc::any(),
-    doc_fold_options()) ->
-        {ok, LastUserAcc::any()}.
-
-
-% This function may be called by many processes concurrently.
-%
-% This function is called to fold over the documents (not local
-% documents) in order of their most recent update. Each document
-% in the database should have exactly one entry in this sequence.
-% If a document is updated during a call to this function it should
-% not be included twice as that will probably lead to Very Bad Things.
-%
-% This should behave similarly to fold_docs/4 in that the supplied
-% user function should be invoked with a #full_doc_info{} record
-% as the first argument and the current user accumulator as the
-% second argument. The same semantics for the return value from the
-% user function should be handled as in fold_docs/4.
-%
-% The StartSeq parameter indicates where the fold should start
-% *after*. As in, if a change with a value of StartSeq exists in the
-% database it should not be included in the fold.
-%
-% The only option currently supported by the API is the `dir`
-% option that should behave the same as for fold_docs.
--callback fold_changes(
-    DbHandle::db_handle(),
-    StartSeq::non_neg_integer(),
-    UserFold::changes_fold_fun(),
-    UserAcc::any(),
-    changes_fold_options()) ->
-        {ok, LastUserAcc::any()}.
-
-
-% This function may be called by many processes concurrently.
-%
-% This function is called to fold over purged requests in order of
-% their oldest purge (increasing purge_seq order)
-%
-% The StartPurgeSeq parameter indicates where the fold should start *after*.
--callback fold_purge_infos(
-    DbHandle::db_handle(),
-    StartPurgeSeq::purge_seq(),
-    UserFold::purge_fold_fun(),
-    UserAcc::any(),
-    purge_fold_options()) ->
-        {ok, LastUserAcc::any()}.
-
-
-% This function may be called by many processes concurrently.
-%
-% This function is called to count the number of documents changed
-% since the given UpdateSeq (ie, not including the possible change
-% at exactly UpdateSeq). It is currently only used internally to
-% provide a status update in a replication's _active_tasks entry
-% to indicate how many documents are left to be processed.
-%
-% This is a fairly difficult thing to support in engine's that don't
-% behave exactly like a tree with efficient support for counting rows
-% between keys. As such returning 0 or even just the difference between
-% the current update sequence is possibly the best some storage engines
-% can provide. This may lead to some confusion when interpreting the
-% _active_tasks entry if the storage engine isn't accounted for by the
-% client.
--callback count_changes_since(
-    DbHandle::db_handle(),
-    UpdateSeq::non_neg_integer()) ->
-        TotalChanges::non_neg_integer().
-
-
-% This function is called in the context of couch_db_updater and as
-% such is guaranteed to be single threaded for the given DbHandle.
-%
-% If a storage engine requires compaction this is a trigger to start
-% it off. However a storage engine can do whatever it wants here. As
-% this is fairly engine specific there's not a lot guidance that is
-% generally applicable.
-%
-% When compaction is finished the compactor should use
-% gen_server:cast/2 to send a {compact_done, CompactEngine, CompactInfo}
-% message to the Parent pid provided. Currently CompactEngine
-% must be the same engine that started the compaction and CompactInfo
-% is an arbitrary term that's passed to finish_compaction/4.
--callback start_compaction(
-    DbHandle::db_handle(),
-    DbName::binary(),
-    Options::db_open_options(),
-    Parent::pid()) ->
-        {ok, NewDbHandle::db_handle(), CompactorPid::pid()}.
-
-
-% This function is called in the context of couch_db_udpater and as
-% such is guarnateed to be single threaded for the given DbHandle.
-%
-% Same as for start_compaction, this will be extremely specific to
-% any given storage engine.
-%
-% The split in the API here is so that if the storage engine needs
-% to update the DbHandle state of the couch_db_updater it can as
-% finish_compaction/4 is called in the context of the couch_db_updater.
--callback finish_compaction(
-    OldDbHandle::db_handle(),
-    DbName::binary(),
-    Options::db_open_options(),
-    CompactInfo::any()) ->
-        {ok, CompactedDbHandle::db_handle(), CompactorPid::pid() | undefined}.
-
-
--export([
-    exists/2,
-    delete/4,
-    delete_compaction_files/4,
-
-    init/3,
-    terminate/2,
-    handle_db_updater_call/3,
-    handle_db_updater_info/2,
-
-    incref/1,
-    decref/1,
-    monitored_by/1,
-
-    last_activity/1,
-
-    get_engine/1,
-    get_compacted_seq/1,
-    get_del_doc_count/1,
-    get_disk_version/1,
-    get_doc_count/1,
-    get_epochs/1,
-    get_purge_seq/1,
-    get_oldest_purge_seq/1,
-    get_purge_infos_limit/1,
-    get_revs_limit/1,
-    get_security/1,
-    get_props/1,
-    get_size_info/1,
-    get_partition_info/2,
-    get_update_seq/1,
-    get_uuid/1,
-
-    set_revs_limit/2,
-    set_security/2,
-    set_purge_infos_limit/2,
-    set_props/2,
-
-    set_update_seq/2,
-
-    open_docs/2,
-    open_local_docs/2,
-    read_doc_body/2,
-    load_purge_infos/2,
-
-    serialize_doc/2,
-    write_doc_body/2,
-    write_doc_infos/3,
-    purge_docs/3,
-    copy_purge_infos/2,
-    commit_data/1,
-
-    open_write_stream/2,
-    open_read_stream/2,
-    is_active_stream/2,
-
-    fold_docs/4,
-    fold_local_docs/4,
-    fold_changes/5,
-    fold_purge_infos/5,
-    count_changes_since/2,
-
-    start_compaction/1,
-    finish_compaction/2,
-    trigger_on_compact/1
-]).
-
-
-exists(Engine, DbPath) ->
-    Engine:exists(DbPath).
-
-
-delete(Engine, RootDir, DbPath, DelOpts) when is_list(DelOpts) ->
-    Engine:delete(RootDir, DbPath, DelOpts).
-
-
-delete_compaction_files(Engine, RootDir, DbPath, DelOpts)
-        when is_list(DelOpts) ->
-    Engine:delete_compaction_files(RootDir, DbPath, DelOpts).
-
-
-init(Engine, DbPath, Options) ->
-    case Engine:init(DbPath, Options) of
-         {ok, EngineState} ->
-             {ok, {Engine, EngineState}};
-         Error ->
-             throw(Error)
-    end.
-
-
-terminate(Reason, #db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:terminate(Reason, EngineState).
-
-
-handle_db_updater_call(Msg, _From, #db{} = Db) ->
-    #db{
-        engine = {Engine, EngineState}
-    } = Db,
-    case Engine:handle_db_updater_call(Msg, EngineState) of
-        {reply, Resp, NewState} ->
-            {reply, Resp, Db#db{engine = {Engine, NewState}}};
-        {stop, Reason, Resp, NewState} ->
-            {stop, Reason, Resp, Db#db{engine = {Engine, NewState}}}
-    end.
-
-
-handle_db_updater_info(Msg, #db{} = Db) ->
-    #db{
-        name = Name,
-        engine = {Engine, EngineState}
-    } = Db,
-    case Engine:handle_db_updater_info(Msg, EngineState) of
-        {noreply, NewState} ->
-            {noreply, Db#db{engine = {Engine, NewState}}};
-        {noreply, NewState, Timeout} ->
-            {noreply, Db#db{engine = {Engine, NewState}}, Timeout};
-        {stop, Reason, NewState} ->
-            couch_log:error("DB ~s shutting down: ~p", [Name, Msg]),
-            {stop, Reason, Db#db{engine = {Engine, NewState}}}
-    end.
-
-
-incref(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewState} = Engine:incref(EngineState),
-    {ok, Db#db{engine = {Engine, NewState}}}.
-
-
-decref(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:decref(EngineState).
-
-
-monitored_by(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:monitored_by(EngineState).
-
-
-last_activity(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:last_activity(EngineState).
-
-
-get_engine(#db{} = Db) ->
-    #db{engine = {Engine, _}} = Db,
-    Engine.
-
-
-get_compacted_seq(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_compacted_seq(EngineState).
-
-
-get_del_doc_count(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_del_doc_count(EngineState).
-
-
-get_disk_version(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_disk_version(EngineState).
-
-
-get_doc_count(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_doc_count(EngineState).
-
-
-get_epochs(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_epochs(EngineState).
-
-
-get_purge_seq(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_purge_seq(EngineState).
-
-
-get_oldest_purge_seq(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_oldest_purge_seq(EngineState).
-
-
-get_purge_infos_limit(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_purge_infos_limit(EngineState).
-
-
-get_revs_limit(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_revs_limit(EngineState).
-
-
-get_security(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_security(EngineState).
-
-
-get_props(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_props(EngineState).
-
-
-get_size_info(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_size_info(EngineState).
-
-
-get_partition_info(#db{} = Db, Partition) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_partition_info(EngineState, Partition).
-
-
-get_update_seq(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_update_seq(EngineState).
-
-get_uuid(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_uuid(EngineState).
-
-
-set_revs_limit(#db{} = Db, RevsLimit) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:set_revs_limit(EngineState, RevsLimit),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-set_purge_infos_limit(#db{} = Db, PurgedDocsLimit) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:set_purge_infos_limit(EngineState, PurgedDocsLimit),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-set_security(#db{} = Db, SecProps) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:set_security(EngineState, SecProps),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-set_props(#db{} = Db, Props) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:set_props(EngineState, Props),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-set_update_seq(#db{} = Db, UpdateSeq) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:set_update_seq(EngineState, UpdateSeq),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-open_docs(#db{} = Db, DocIds) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:open_docs(EngineState, DocIds).
-
-
-open_local_docs(#db{} = Db, DocIds) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:open_local_docs(EngineState, DocIds).
-
-
-read_doc_body(#db{} = Db, RawDoc) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:read_doc_body(EngineState, RawDoc).
-
-
-load_purge_infos(#db{} = Db, UUIDs) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:load_purge_infos(EngineState, UUIDs).
-
-
-serialize_doc(#db{} = Db, #doc{} = Doc) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:serialize_doc(EngineState, Doc).
-
-
-write_doc_body(#db{} = Db, #doc{} = Doc) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:write_doc_body(EngineState, Doc).
-
-
-write_doc_infos(#db{} = Db, DocUpdates, LocalDocs) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:write_doc_infos(EngineState, DocUpdates, LocalDocs),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-purge_docs(#db{} = Db, DocUpdates, Purges) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:purge_docs(
-        EngineState, DocUpdates, Purges),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-copy_purge_infos(#db{} = Db, Purges) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:copy_purge_infos(
-        EngineState, Purges),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-commit_data(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:commit_data(EngineState),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-open_write_stream(#db{} = Db, Options) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:open_write_stream(EngineState, Options).
-
-
-open_read_stream(#db{} = Db, StreamDiskInfo) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:open_read_stream(EngineState, StreamDiskInfo).
-
-
-is_active_stream(#db{} = Db, ReadStreamState) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:is_active_stream(EngineState, ReadStreamState).
-
-
-fold_docs(#db{} = Db, UserFun, UserAcc, Options) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:fold_docs(EngineState, UserFun, UserAcc, Options).
-
-
-fold_local_docs(#db{} = Db, UserFun, UserAcc, Options) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:fold_local_docs(EngineState, UserFun, UserAcc, Options).
-
-
-fold_changes(#db{} = Db, StartSeq, UserFun, UserAcc, Options) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:fold_changes(EngineState, StartSeq, UserFun, UserAcc, Options).
-
-
-fold_purge_infos(#db{} = Db, StartPurgeSeq, UserFun, UserAcc, Options) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:fold_purge_infos(
-            EngineState, StartPurgeSeq, UserFun, UserAcc, Options).
-
-
-count_changes_since(#db{} = Db, StartSeq) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:count_changes_since(EngineState, StartSeq).
-
-
-start_compaction(#db{} = Db) ->
-    #db{
-        engine = {Engine, EngineState},
-        name = DbName,
-        options = Options
-    } = Db,
-    {ok, NewEngineState, Pid} = Engine:start_compaction(
-            EngineState, DbName, Options, self()),
-    {ok, Db#db{
-        engine = {Engine, NewEngineState},
-        compactor_pid = Pid
-    }}.
-
-
-finish_compaction(Db, CompactInfo) ->
-    #db{
-        engine = {Engine, St},
-        name = DbName,
-        options = Options
-    } = Db,
-    NewDb = case Engine:finish_compaction(St, DbName, Options, CompactInfo) of
-        {ok, NewState, undefined} ->
-            couch_event:notify(DbName, compacted),
-            Db#db{
-                engine = {Engine, NewState},
-                compactor_pid = nil
-            };
-        {ok, NewState, CompactorPid} when is_pid(CompactorPid) ->
-            Db#db{
-                engine = {Engine, NewState},
-                compactor_pid = CompactorPid
-            }
-    end,
-    ok = gen_server:call(couch_server, {db_updated, NewDb}, infinity),
-    {ok, NewDb}.
-
-
-trigger_on_compact(DbName) ->
-    {ok, DDocs} = get_ddocs(DbName),
-    couch_db_plugin:on_compact(DbName, DDocs).
-
-
-get_ddocs(<<"shards/", _/binary>> = DbName) ->
-    {_, Ref} = spawn_monitor(fun() ->
-        exit(fabric:design_docs(mem3:dbname(DbName)))
-    end),
-    receive
-        {'DOWN', Ref, _, _, {ok, JsonDDocs}} ->
-            {ok, lists:map(fun(JsonDDoc) ->
-                couch_doc:from_json_obj(JsonDDoc)
-            end, JsonDDocs)};
-        {'DOWN', Ref, _, _, Else} ->
-            Else
-    end;
-get_ddocs(DbName) ->
-    couch_util:with_db(DbName, fun(Db) ->
-        FoldFun = fun(FDI, Acc) ->
-            {ok, Doc} = couch_db:open_doc_int(Db, FDI, []),
-            {ok, [Doc | Acc]}
-        end,
-        {ok, Docs} = couch_db:fold_design_docs(Db, FoldFun, [], []),
-        {ok, lists:reverse(Docs)}
-    end).
diff --git a/src/couch/src/couch_db_header.erl b/src/couch/src/couch_db_header.erl
deleted file mode 100644
index 355364f..0000000
--- a/src/couch/src/couch_db_header.erl
+++ /dev/null
@@ -1,405 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_header).
-
-
--export([
-    new/0,
-    from/1,
-    is_header/1,
-    upgrade/1,
-    set/2
-]).
-
--export([
-    disk_version/1,
-    update_seq/1,
-    id_tree_state/1,
-    seq_tree_state/1,
-    latest/1,
-    local_tree_state/1,
-    purge_seq/1,
-    purged_docs/1,
-    security_ptr/1,
-    revs_limit/1,
-    uuid/1,
-    epochs/1,
-    compacted_seq/1
-]).
-
-
-% This should be updated anytime a header change happens that requires more
-% than filling in new defaults.
-%
-% As long the changes are limited to new header fields (with inline
-% defaults) added to the end of the record, then there is no need to increment
-% the disk revision number.
-%
-% if the disk revision is incremented, then new upgrade logic will need to be
-% added to couch_db_updater:init_db.
-
--define(LATEST_DISK_VERSION, 6).
-
--record(db_header, {
-    disk_version = ?LATEST_DISK_VERSION,
-    update_seq = 0,
-    unused = 0,
-    id_tree_state = nil,
-    seq_tree_state = nil,
-    local_tree_state = nil,
-    purge_seq = 0,
-    purged_docs = nil,
-    security_ptr = nil,
-    revs_limit = 1000,
-    uuid,
-    epochs,
-    compacted_seq
-}).
-
-
-new() ->
-    #db_header{
-        uuid = couch_uuids:random(),
-        epochs = [{node(), 0}]
-    }.
-
-
-from(Header0) ->
-    Header = upgrade(Header0),
-    #db_header{
-        uuid = Header#db_header.uuid,
-        epochs = Header#db_header.epochs,
-        compacted_seq = Header#db_header.compacted_seq
-    }.
-
-
-is_header(Header) ->
-    try
-        upgrade(Header),
-        true
-    catch _:_ ->
-        false
-    end.
-
-
-upgrade(Header) ->
-    Funs = [
-        fun upgrade_tuple/1,
-        fun upgrade_disk_version/1,
-        fun upgrade_uuid/1,
-        fun upgrade_epochs/1,
-        fun upgrade_compacted_seq/1
-    ],
-    lists:foldl(fun(F, HdrAcc) ->
-        F(HdrAcc)
-    end, Header, Funs).
-
-
-set(Header0, Fields) ->
-    % A subtlety here is that if a database was open during
-    % the release upgrade that updates to uuids and epochs then
-    % this dynamic upgrade also assigns a uuid and epoch.
-    Header = upgrade(Header0),
-    lists:foldl(fun({Field, Value}, HdrAcc) ->
-        set_field(HdrAcc, Field, Value)
-    end, Header, Fields).
-
-
-disk_version(Header) ->
-    get_field(Header, disk_version).
-
-
-update_seq(Header) ->
-    get_field(Header, update_seq).
-
-
-id_tree_state(Header) ->
-    get_field(Header, id_tree_state).
-
-
-seq_tree_state(Header) ->
-    get_field(Header, seq_tree_state).
-
-
-local_tree_state(Header) ->
-    get_field(Header, local_tree_state).
-
-
-purge_seq(Header) ->
-    get_field(Header, purge_seq).
-
-
-purged_docs(Header) ->
-    get_field(Header, purged_docs).
-
-
-security_ptr(Header) ->
-    get_field(Header, security_ptr).
-
-
-revs_limit(Header) ->
-    get_field(Header, revs_limit).
-
-
-uuid(Header) ->
-    get_field(Header, uuid).
-
-
-epochs(Header) ->
-    get_field(Header, epochs).
-
-
-compacted_seq(Header) ->
-    get_field(Header, compacted_seq).
-
-
-get_field(Header, Field) ->
-    Idx = index(Field),
-    case Idx > tuple_size(Header) of
-        true -> undefined;
-        false -> element(index(Field), Header)
-    end.
-
-
-set_field(Header, Field, Value) ->
-    setelement(index(Field), Header, Value).
-
-
-index(Field) ->
-    couch_util:get_value(Field, indexes()).
-
-
-indexes() ->
-    Fields = record_info(fields, db_header),
-    Indexes = lists:seq(2, record_info(size, db_header)),
-    lists:zip(Fields, Indexes).
-
-
-upgrade_tuple(Old) when is_record(Old, db_header) ->
-    Old;
-upgrade_tuple(Old) when is_tuple(Old) ->
-    NewSize = record_info(size, db_header),
-    if tuple_size(Old) < NewSize -> ok; true ->
-        erlang:error({invalid_header_size, Old})
-    end,
-    {_, New} = lists:foldl(fun(Val, {Idx, Hdr}) ->
-        {Idx+1, setelement(Idx, Hdr, Val)}
-    end, {1, #db_header{}}, tuple_to_list(Old)),
-    if is_record(New, db_header) -> ok; true ->
-        erlang:error({invalid_header_extension, {Old, New}})
-    end,
-    New.
-
--define(OLD_DISK_VERSION_ERROR,
-    "Database files from versions smaller than 0.10.0 are no longer supported").
-
-upgrade_disk_version(#db_header{}=Header) ->
-    case element(2, Header) of
-        1 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
-        2 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
-        3 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
-        4 -> Header#db_header{security_ptr = nil}; % [0.10 - 0.11)
-        5 -> Header; % pre 1.2
-        ?LATEST_DISK_VERSION -> Header;
-        _ ->
-            Reason = "Incorrect disk header version",
-            throw({database_disk_version_error, Reason})
-    end.
-
-
-upgrade_uuid(#db_header{}=Header) ->
-    case Header#db_header.uuid of
-        undefined ->
-            % Upgrading this old db file to a newer
-            % on disk format that includes a UUID.
-            Header#db_header{uuid=couch_uuids:random()};
-        _ ->
-            Header
-    end.
-
-
-upgrade_epochs(#db_header{}=Header) ->
-    NewEpochs = case Header#db_header.epochs of
-        undefined ->
-            % This node is taking over ownership of shard with
-            % and old version of couch file. Before epochs there
-            % was always an implicit assumption that a file was
-            % owned since eternity by the node it was on. This
-            % just codifies that assumption.
-            [{node(), 0}];
-        [{Node, _} | _] = Epochs0 when Node == node() ->
-            % Current node is the current owner of this db
-            Epochs0;
-        Epochs1 ->
-            % This node is taking over ownership of this db
-            % and marking the update sequence where it happened.
-            [{node(), Header#db_header.update_seq} | Epochs1]
-    end,
-    % Its possible for a node to open a db and claim
-    % ownership but never make a write to the db. This
-    % removes nodes that claimed ownership but never
-    % changed the database.
-    DedupedEpochs = remove_dup_epochs(NewEpochs),
-    Header#db_header{epochs=DedupedEpochs}.
-
-
-% This is slightly relying on the udpate_seq's being sorted
-% in epochs due to how we only ever push things onto the
-% front. Although if we ever had a case where the update_seq
-% is not monotonically increasing I don't know that we'd
-% want to remove dupes (by calling a sort on the input to this
-% function). So for now we don't sort but are relying on the
-% idea that epochs is always sorted.
-remove_dup_epochs([_]=Epochs) ->
-    Epochs;
-remove_dup_epochs([{N1, S}, {_N2, S}]) ->
-    % Seqs match, keep the most recent owner
-    [{N1, S}];
-remove_dup_epochs([_, _]=Epochs) ->
-    % Seqs don't match.
-    Epochs;
-remove_dup_epochs([{N1, S}, {_N2, S} | Rest]) ->
-    % Seqs match, keep the most recent owner
-    remove_dup_epochs([{N1, S} | Rest]);
-remove_dup_epochs([{N1, S1}, {N2, S2} | Rest]) ->
-    % Seqs don't match, recurse to check others
-    [{N1, S1} | remove_dup_epochs([{N2, S2} | Rest])].
-
-
-upgrade_compacted_seq(#db_header{}=Header) ->
-    case Header#db_header.compacted_seq of
-        undefined ->
-            Header#db_header{compacted_seq=0};
-        _ ->
-            Header
-    end.
-
-latest(?LATEST_DISK_VERSION) ->
-    true;
-latest(N) when is_integer(N), N < ?LATEST_DISK_VERSION ->
-    false;
-latest(_Else) ->
-    undefined.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-mk_header(Vsn) ->
-    {
-        db_header, % record name
-        Vsn, % disk version
-        100, % update_seq
-        0, % unused
-        foo, % id_tree_state
-        bar, % seq_tree_state
-        bam, % local_tree_state
-        1, % purge_seq
-        baz, % purged_docs
-        bang, % security_ptr
-        999 % revs_limit
-    }.
-
-
-upgrade_v3_test() ->
-    Vsn3Header = mk_header(3),
-    NewHeader = upgrade_tuple(Vsn3Header),
-
-    % Tuple upgrades don't change
-    ?assert(is_record(NewHeader, db_header)),
-    ?assertEqual(3, disk_version(NewHeader)),
-    ?assertEqual(100, update_seq(NewHeader)),
-    ?assertEqual(foo, id_tree_state(NewHeader)),
-    ?assertEqual(bar, seq_tree_state(NewHeader)),
-    ?assertEqual(bam, local_tree_state(NewHeader)),
-    ?assertEqual(1, purge_seq(NewHeader)),
-    ?assertEqual(baz, purged_docs(NewHeader)),
-    ?assertEqual(bang, security_ptr(NewHeader)),
-    ?assertEqual(999, revs_limit(NewHeader)),
-    ?assertEqual(undefined, uuid(NewHeader)),
-    ?assertEqual(undefined, epochs(NewHeader)),
-
-    ?assertThrow({database_disk_version_error, _},
-                 upgrade_disk_version(NewHeader)).
-
-
-upgrade_v5_test() ->
-    Vsn5Header = mk_header(5),
-    NewHeader = upgrade_disk_version(upgrade_tuple(Vsn5Header)),
-
-    ?assert(is_record(NewHeader, db_header)),
-    ?assertEqual(5, disk_version(NewHeader)),
-
-    % Security ptr isn't changed for v5 headers
-    ?assertEqual(bang, security_ptr(NewHeader)).
-
-
-upgrade_uuid_test() ->
-    Vsn5Header = mk_header(5),
-
-    % Upgraded headers get a new UUID
-    NewHeader = upgrade_uuid(upgrade_disk_version(upgrade_tuple(Vsn5Header))),
-    ?assertMatch(<<_:32/binary>>, uuid(NewHeader)),
-
-    % Headers with a UUID don't have their UUID changed
-    NewNewHeader = upgrade_uuid(upgrade_disk_version(upgrade_tuple(NewHeader))),
-    ?assertEqual(uuid(NewHeader), uuid(NewNewHeader)),
-
-    % Derived empty headers maintain the same UUID
-    ResetHeader = from(NewNewHeader),
-    ?assertEqual(uuid(NewHeader), uuid(ResetHeader)).
-
-
-upgrade_epochs_test() ->
-    Vsn5Header = mk_header(5),
-
-    % Upgraded headers get a default epochs set
-    NewHeader = upgrade(Vsn5Header),
-    ?assertEqual([{node(), 0}], epochs(NewHeader)),
-
-    % Fake an old entry in epochs
-    FakeFields = [
-        {update_seq, 20},
-        {epochs, [{'someothernode@someotherhost', 0}]}
-    ],
-    NotOwnedHeader = set(NewHeader, FakeFields),
-
-    OwnedEpochs = [
-        {node(), 20},
-        {'someothernode@someotherhost', 0}
-    ],
-
-    % Upgrading a header not owned by the local node updates
-    % the epochs appropriately.
-    NowOwnedHeader = upgrade(NotOwnedHeader),
-    ?assertEqual(OwnedEpochs, epochs(NowOwnedHeader)),
-
-    % Headers with epochs stay the same after upgrades
-    NewNewHeader = upgrade(NowOwnedHeader),
-    ?assertEqual(OwnedEpochs, epochs(NewNewHeader)),
-
-    % Getting a reset header maintains the epoch data
-    ResetHeader = from(NewNewHeader),
-    ?assertEqual(OwnedEpochs, epochs(ResetHeader)).
-
-
-get_uuid_from_old_header_test() ->
-    Vsn5Header = mk_header(5),
-    ?assertEqual(undefined, uuid(Vsn5Header)).
-
-
-get_epochs_from_old_header_test() ->
-    Vsn5Header = mk_header(5),
-    ?assertEqual(undefined, epochs(Vsn5Header)).
-
-
--endif.
diff --git a/src/couch/src/couch_db_int.hrl b/src/couch/src/couch_db_int.hrl
deleted file mode 100644
index 7da0ce5..0000000
--- a/src/couch/src/couch_db_int.hrl
+++ /dev/null
@@ -1,76 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
--record(db, {
-    vsn = 1,
-    name,
-    filepath,
-
-    engine = {couch_bt_engine, undefined},
-
-    main_pid = nil,
-    compactor_pid = nil,
-
-    committed_update_seq,
-
-    instance_start_time, % number of microsecs since jan 1 1970 as a binary string
-
-    user_ctx = #user_ctx{},
-    security = [],
-    validate_doc_funs = undefined,
-
-    before_doc_update = nil, % nil | fun(Doc, Db) -> NewDoc
-    after_doc_read = nil,    % nil | fun(Doc, Db) -> NewDoc
-
-    % feature removed in 3.x, but field kept to avoid changing db record size
-    % and breaking rolling cluster upgrade
-    waiting_delayed_commit_deprecated,
-
-    options = [],
-    compression
-}).
-
-
--define(OLD_DB_REC, {
-    db,
-    _, % MainPid
-    _, % CompactorPid
-    _, % InstanceStartTime
-    _, % Fd
-    _, % FdMonitor
-    _, % Header
-    _, % CommittedUpdateSeq
-    _, % IdTree
-    _, % SeqTree
-    _, % LocalTree
-    _, % UpdateSeq
-    _, % Name
-    _, % FilePath
-    _, % ValidateDocFuns
-    _, % Security
-    _, % SecurityPtr
-    _, % UserCtx
-    _, % WaitingDelayedCommit
-    _, % RevsLimit
-    _, % FsyncOptions
-    _, % Options
-    _, % Compression
-    _, % BeforeDocUpdate
-    _  % AfterDocRead
-}).
-
-
--define(OLD_DB_NAME(Db), element(2, Db)).
--define(OLD_DB_MAIN_PID(Db), element(13, Db)).
--define(OLD_DB_USER_CTX(Db), element(18, Db)).
--define(OLD_DB_SECURITY(Db), element(16, Db)).
diff --git a/src/couch/src/couch_db_plugin.erl b/src/couch/src/couch_db_plugin.erl
deleted file mode 100644
index c3684c6..0000000
--- a/src/couch/src/couch_db_plugin.erl
+++ /dev/null
@@ -1,96 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_plugin).
-
--export([
-    validate_dbname/3,
-    before_doc_update/3,
-    after_doc_read/2,
-    validate_docid/1,
-    check_is_admin/1,
-    is_valid_purge_client/2,
-    on_compact/2,
-    on_delete/2
-]).
-
--define(SERVICE_ID, couch_db).
-
--include_lib("couch/include/couch_db.hrl").
-
-%% ------------------------------------------------------------------
-%% API Function Definitions
-%% ------------------------------------------------------------------
-
-validate_dbname(DbName, Normalized, Default) ->
-    maybe_handle(validate_dbname, [DbName, Normalized], Default).
-
-before_doc_update(Db, Doc0, UpdateType) ->
-    Fun = couch_db:get_before_doc_update_fun(Db),
-    case with_pipe(before_doc_update, [Doc0, Db, UpdateType]) of
-        [Doc1, _Db, UpdateType1] when is_function(Fun) ->
-            Fun(Doc1, Db, UpdateType1);
-        [Doc1, _Db, _UpdateType] ->
-            Doc1
-    end.
-
-after_doc_read(Db, Doc0) ->
-    Fun = couch_db:get_after_doc_read_fun(Db),
-    case with_pipe(after_doc_read, [Doc0, Db]) of
-        [Doc1, _Db] when is_function(Fun) -> Fun(Doc1, Db);
-        [Doc1, _Db] -> Doc1
-    end.
-
-validate_docid(Id) ->
-    Handle = couch_epi:get_handle(?SERVICE_ID),
-    %% callbacks return true only if it specifically allow the given Id
-    couch_epi:any(Handle, ?SERVICE_ID, validate_docid, [Id], []).
-
-check_is_admin(Db) ->
-    Handle = couch_epi:get_handle(?SERVICE_ID),
-    %% callbacks return true only if it specifically allow the given Id
-    couch_epi:any(Handle, ?SERVICE_ID, check_is_admin, [Db], []).
-
-is_valid_purge_client(DbName, Props) ->
-    Handle = couch_epi:get_handle(?SERVICE_ID),
-    %% callbacks return true only if it specifically allow the given Id
-    couch_epi:any(Handle, ?SERVICE_ID, is_valid_purge_client, [DbName, Props], []).
-
-on_compact(DbName, DDocs) ->
-    Handle = couch_epi:get_handle(?SERVICE_ID),
-    couch_epi:apply(Handle, ?SERVICE_ID, on_compact, [DbName, DDocs], []).
-
-on_delete(DbName, Options) ->
-    Handle = couch_epi:get_handle(?SERVICE_ID),
-    couch_epi:apply(Handle, ?SERVICE_ID, on_delete, [DbName, Options], []).
-
-%% ------------------------------------------------------------------
-%% Internal Function Definitions
-%% ------------------------------------------------------------------
-
-with_pipe(Func, Args) ->
-    do_apply(Func, Args, [pipe]).
-
-do_apply(Func, Args, Opts) ->
-    Handle = couch_epi:get_handle(?SERVICE_ID),
-    couch_epi:apply(Handle, ?SERVICE_ID, Func, Args, Opts).
-
-maybe_handle(Func, Args, Default) ->
-    Handle = couch_epi:get_handle(?SERVICE_ID),
-    case couch_epi:decide(Handle, ?SERVICE_ID, Func, Args, []) of
-       no_decision when is_function(Default) ->
-           apply(Default, Args);
-       no_decision ->
-           Default;
-       {decided, Result} ->
-           Result
-    end.
diff --git a/src/couch/src/couch_db_split.erl b/src/couch/src/couch_db_split.erl
deleted file mode 100644
index 3a1f98d..0000000
--- a/src/couch/src/couch_db_split.erl
+++ /dev/null
@@ -1,503 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_split).
-
-
--export([
-    split/3,
-    copy_local_docs/3,
-    cleanup_target/2
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
-
-
--define(DEFAULT_BUFFER_SIZE, 16777216).
-
-
--record(state, {
-    source_db,
-    source_uuid,
-    targets,
-    pickfun,
-    max_buffer_size = ?DEFAULT_BUFFER_SIZE,
-    hashfun
-}).
-
--record(target, {
-    db,
-    uuid,
-    buffer = [],
-    buffer_size = 0
-}).
-
--record(racc, {
-    id,
-    source_db,
-    target_db,
-    active = 0,
-    external = 0,
-    atts = []
-}).
-
-
-% Public API
-
-split(Source, #{} = Targets, PickFun) when
-        map_size(Targets) >= 2, is_function(PickFun, 3) ->
-    case couch_db:open_int(Source, [?ADMIN_CTX]) of
-        {ok, SourceDb} ->
-            Engine = get_engine(SourceDb),
-            Partitioned = couch_db:is_partitioned(SourceDb),
-            HashFun = mem3_hash:get_hash_fun(couch_db:name(SourceDb)),
-            try
-                split(SourceDb, Partitioned, Engine, Targets, PickFun, HashFun)
-            catch
-                throw:{target_create_error, DbName, Error, TargetDbs} ->
-                    cleanup_targets(TargetDbs, Engine),
-                    {error, {target_create_error, DbName, Error}}
-            after
-                couch_db:close(SourceDb)
-            end;
-        {not_found, _} ->
-            {error, missing_source}
-    end.
-
-
-copy_local_docs(Source, #{} = Targets0, PickFun) when
-        is_binary(Source), is_function(PickFun, 3) ->
-    case couch_db:open_int(Source, [?ADMIN_CTX]) of
-        {ok, SourceDb} ->
-            try
-                Targets = maps:map(fun(_, DbName) ->
-                    {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
-                    #target{db = Db, uuid = couch_db:get_uuid(Db)}
-                end, Targets0),
-                SourceName = couch_db:name(SourceDb),
-                try
-                    State = #state{
-                        source_db = SourceDb,
-                        source_uuid = couch_db:get_uuid(SourceDb),
-                        targets = Targets,
-                        pickfun = PickFun,
-                        hashfun = mem3_hash:get_hash_fun(SourceName)
-                    },
-                    copy_local_docs(State),
-                    ok
-                after
-                    maps:map(fun(_, #target{db = Db} = T) ->
-                        couch_db:close(Db),
-                        T#target{db = undefined}
-                    end, Targets)
-                end
-            after
-                couch_db:close(SourceDb)
-            end;
-        {not_found, _} ->
-            {error, missing_source}
-    end.
-
-
-cleanup_target(Source, Target) when is_binary(Source), is_binary(Target) ->
-    case couch_db:open_int(Source, [?ADMIN_CTX]) of
-        {ok, SourceDb} ->
-            try
-                delete_target(Target, get_engine(SourceDb))
-            after
-                couch_db:close(SourceDb)
-            end;
-        {not_found, _} ->
-            {error, missing_source}
-    end.
-
-
-% Private Functions
-
-split(SourceDb, Partitioned, Engine, Targets0, PickFun, {M, F, A} = HashFun) ->
-    Targets = maps:fold(fun(Key, DbName, Map) ->
-        case couch_db:validate_dbname(DbName) of
-            ok ->
-                ok;
-            {error, E} ->
-                throw({target_create_error, DbName, E, Map})
-        end,
-        case couch_server:lock(DbName, <<"shard splitting">>) of
-            ok ->
-                ok;
-            {error, Err} ->
-                throw({target_create_error, DbName, Err, Map})
-        end,
-        {ok, Filepath} = couch_server:get_engine_path(DbName, Engine),
-        Opts = [create, ?ADMIN_CTX] ++ case Partitioned of
-            true -> [{props, [{partitioned, true}, {hash, [M, F, A]}]}];
-            false -> []
-        end,
-        case couch_db:start_link(Engine, DbName, Filepath, Opts) of
-            {ok, Db} ->
-                Map#{Key => #target{db = Db}};
-            {error, Error} ->
-                throw({target_create_error, DbName, Error, Map})
-        end
-    end, #{}, Targets0),
-    Seq = couch_db:get_update_seq(SourceDb),
-    State1 = #state{
-        source_db = SourceDb,
-        targets = Targets,
-        pickfun = PickFun,
-        hashfun = HashFun,
-        max_buffer_size = get_max_buffer_size()
-    },
-    State2 = copy_docs(State1),
-    State3 = copy_checkpoints(State2),
-    State4 = copy_meta(State3),
-    State5 = copy_purge_info(State4),
-    State6 = set_targets_update_seq(State5),
-    stop_targets(State6#state.targets),
-    {ok, Seq}.
-
-
-cleanup_targets(#{} = Targets, Engine) ->
-    maps:map(fun(_, #target{db = Db} = T) ->
-        ok = stop_target_db(Db),
-        DbName = couch_db:name(Db),
-        delete_target(DbName, Engine),
-        couch_server:unlock(DbName),
-        T
-    end, Targets).
-
-
-stop_targets(#{} = Targets) ->
-    maps:map(fun(_, #target{db = Db} = T) ->
-        {ok, Db1} = couch_db_engine:commit_data(Db),
-        ok = stop_target_db(Db1),
-        T
-    end, Targets).
-
-
-stop_target_db(Db) ->
-    couch_db:close(Db),
-    Pid = couch_db:get_pid(Db),
-    catch unlink(Pid),
-    catch exit(Pid, kill),
-    couch_server:unlock(couch_db:name(Db)),
-    ok.
-
-
-delete_target(DbName, Engine) ->
-    RootDir = config:get("couchdb", "database_dir", "."),
-    {ok, Filepath} = couch_server:get_engine_path(DbName, Engine),
-    DelOpt = [{context, compaction}, sync],
-    couch_db_engine:delete(Engine, RootDir, Filepath, DelOpt).
-
-
-pick_target(DocId, #state{} = State, #{} = Targets) ->
-    #state{pickfun = PickFun, hashfun = HashFun} = State,
-    Key = PickFun(DocId, maps:keys(Targets), HashFun),
-    {Key, maps:get(Key, Targets)}.
-
-
-set_targets_update_seq(#state{targets = Targets} = State) ->
-    Seq = couch_db:get_update_seq(State#state.source_db),
-    Targets1 = maps:map(fun(_, #target{db = Db} = Target) ->
-        {ok, Db1} = couch_db_engine:set_update_seq(Db, Seq),
-        Target#target{db = Db1}
-    end, Targets),
-    State#state{targets = Targets1}.
-
-
-copy_checkpoints(#state{} = State) ->
-    #state{source_db = Db, source_uuid = SrcUUID, targets = Targets} = State,
-    FoldFun = fun(#doc{id = Id} = Doc, Acc) ->
-        UpdatedAcc = case Id of
-            <<?LOCAL_DOC_PREFIX, "shard-sync-", _/binary>> ->
-                % Transform mem3 internal replicator checkpoints to avoid
-                % rewinding the changes feed when it sees the new shards
-                maps:map(fun(_, #target{uuid = TgtUUID, buffer = Docs} = T) ->
-                    Doc1 = update_checkpoint_doc(SrcUUID, TgtUUID, Doc),
-                    T#target{buffer = [Doc1 | Docs]}
-                end, Acc);
-            <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> ->
-                % Copy purge checkpoints to all shards
-                maps:map(fun(_, #target{buffer = Docs} = T) ->
-                    T#target{buffer = [Doc | Docs]}
-                end, Acc);
-            <<?LOCAL_DOC_PREFIX, _/binary>> ->
-                % Skip copying these that will be done during
-                % local docs top off right before the shards are switched
-                Acc
-        end,
-        {ok, UpdatedAcc}
-    end,
-    {ok, Targets1} = couch_db_engine:fold_local_docs(Db, FoldFun, Targets, []),
-    Targets2 = maps:map(fun(_, #target{db = TDb, buffer = Docs} = T) ->
-        case Docs of
-            [] ->
-                T;
-            [_ | _] ->
-                Docs1 = lists:reverse(Docs),
-                {ok, TDb1} = couch_db_engine:write_doc_infos(TDb, [], Docs1),
-                {ok, TDb2} = couch_db_engine:commit_data(TDb1),
-                T#target{db = TDb2, buffer = []}
-        end
-    end, Targets1),
-    State#state{targets = Targets2}.
-
-
-update_checkpoint_doc(Old, New, #doc{body = {Props}} = Doc) ->
-    NewProps = case couch_util:get_value(<<"target_uuid">>, Props) of
-        Old ->
-            replace_kv(Props, {<<"target_uuid">>, Old, New});
-        Other when is_binary(Other) ->
-            replace_kv(Props, {<<"source_uuid">>, Old, New})
-    end,
-    NewId = update_checkpoint_id(Doc#doc.id, Old, New),
-    Doc#doc{id = NewId, body = {NewProps}}.
-
-
-update_checkpoint_id(Id, Old, New) ->
-    OldHash = mem3_rep:local_id_hash(Old),
-    NewHash = mem3_rep:local_id_hash(New),
-    binary:replace(Id, OldHash, NewHash).
-
-
-replace_kv({[]}, _) ->
-    {[]};
-replace_kv({KVs}, Replacement) ->
-    {[replace_kv(KV, Replacement) || KV <- KVs]};
-replace_kv([], _) ->
-    [];
-replace_kv(List, Replacement) when is_list(List) ->
-    [replace_kv(V, Replacement) || V <- List];
-replace_kv({K, V}, {K, V, NewV}) ->
-    {K, NewV};
-replace_kv({K, V}, Replacement) ->
-    {K, replace_kv(V, Replacement)};
-replace_kv(V, _) ->
-    V.
-
-
-copy_meta(#state{source_db = SourceDb, targets = Targets} = State) ->
-    RevsLimit = couch_db:get_revs_limit(SourceDb),
-    {SecProps} = couch_db:get_security(SourceDb),
-    PurgeLimit = couch_db:get_purge_infos_limit(SourceDb),
-    Targets1 = maps:map(fun(_, #target{db = Db} = T) ->
-        {ok, Db1} = couch_db_engine:set_revs_limit(Db, RevsLimit),
-        {ok, Db2} = couch_db_engine:set_security(Db1, SecProps),
-        {ok, Db3} = couch_db_engine:set_purge_infos_limit(Db2, PurgeLimit),
-        T#target{db = Db3}
-    end, Targets),
-    State#state{targets = Targets1}.
-
-
-copy_purge_info(#state{source_db = Db} = State) ->
-    {ok, NewState} = couch_db:fold_purge_infos(Db, 0, fun purge_cb/2, State),
-    Targets = maps:map(fun(_, #target{} = T) ->
-        commit_purge_infos(T)
-    end, NewState#state.targets),
-    NewState#state{targets = Targets}.
-
-
-acc_and_flush(Item, #target{}= Target, MaxBuffer, FlushCb) ->
-    #target{buffer = Buffer, buffer_size = BSize} = Target,
-    BSize1 = BSize + ?term_size(Item),
-    Target1 = Target#target{buffer = [Item | Buffer], buffer_size = BSize1},
-    case BSize1 > MaxBuffer of
-        true -> FlushCb(Target1);
-        false -> Target1
-    end.
-
-
-purge_cb({_PSeq, _UUID, Id, _Revs} = PI, #state{targets = Targets} = State) ->
-    {Key, Target} = pick_target(Id, State, Targets),
-    MaxBuffer = State#state.max_buffer_size,
-    Target1 = acc_and_flush(PI, Target, MaxBuffer, fun commit_purge_infos/1),
-    {ok, State#state{targets = Targets#{Key => Target1}}}.
-
-
-commit_purge_infos(#target{buffer = [], db = Db} = Target) ->
-    Target#target{db = Db};
-
-commit_purge_infos(#target{buffer = PIs0, db = Db} = Target) ->
-    PIs = lists:reverse(PIs0),
-    {ok, Db1} = couch_db_engine:copy_purge_infos(Db, PIs),
-    {ok, Db2} = couch_db_engine:commit_data(Db1),
-    Target#target{buffer = [], buffer_size = 0, db = Db2}.
-
-
-copy_docs(#state{source_db = Db} = State) ->
-    {ok, NewState} = couch_db:fold_changes(Db, 0, fun changes_cb/2, State),
-    CommitTargets = maps:map(fun(_, #target{} = T) ->
-        commit_docs(T)
-    end, NewState#state.targets),
-    NewState#state{targets = CommitTargets}.
-
-
-% Backwards compatibility clause. Seq trees used to hold #doc_infos at one time
-changes_cb(#doc_info{id = Id}, #state{source_db = Db} = State) ->
-    [FDI = #full_doc_info{}] = couch_db_engine:open_docs(Db, [Id]),
-    changes_cb(FDI, State);
-
-changes_cb(#full_doc_info{id = Id} = FDI, #state{} = State) ->
-    #state{source_db = SourceDb, targets = Targets} = State,
-    {Key, Target} = pick_target(Id, State, Targets),
-    FDI1 = process_fdi(FDI, SourceDb, Target#target.db),
-    MaxBuffer = State#state.max_buffer_size,
-    Target1 = acc_and_flush(FDI1, Target, MaxBuffer, fun commit_docs/1),
-    {ok, State#state{targets = Targets#{Key => Target1}}}.
-
-
-commit_docs(#target{buffer = [], db = Db} = Target) ->
-    Target#target{db = Db};
-
-commit_docs(#target{buffer = FDIs, db = Db} = Target) ->
-    Pairs = [{not_found, FDI} || FDI <- lists:reverse(FDIs)],
-    {ok, Db1} = couch_db_engine:write_doc_infos(Db, Pairs, []),
-    {ok, Db2} = couch_db_engine:commit_data(Db1),
-    Target#target{buffer = [], buffer_size = 0, db = Db2}.
-
-
-process_fdi(FDI, SourceDb, TargetDb) ->
-    #full_doc_info{id = Id, rev_tree = RTree} = FDI,
-    Acc = #racc{id = Id, source_db = SourceDb, target_db = TargetDb},
-    {NewRTree, NewAcc} = couch_key_tree:mapfold(fun revtree_cb/4, Acc, RTree),
-    {Active, External} = total_sizes(NewAcc),
-    FDI#full_doc_info{
-        rev_tree = NewRTree,
-        sizes = #size_info{active = Active, external = External}
-    }.
-
-
-revtree_cb(_Rev, _Leaf, branch, Acc) ->
-    {[], Acc};
-
-revtree_cb({Pos, RevId}, Leaf, leaf, Acc) ->
-    #racc{id = Id, source_db = SourceDb, target_db = TargetDb} = Acc,
-    #leaf{deleted = Deleted, ptr = Ptr, sizes = LeafSizes} = Leaf,
-    Doc0 = #doc{
-        id = Id,
-        revs = {Pos, [RevId]},
-        deleted = Deleted,
-        body = Ptr
-    },
-    Doc1 = couch_db_engine:read_doc_body(SourceDb, Doc0),
-    #doc{body = Body, atts = AttInfos0} = Doc1,
-    External = case LeafSizes#size_info.external of
-        0 when is_binary(Body) ->
-            couch_compress:uncompressed_size(Body);
-        0 ->
-            couch_ejson_size:encoded_size(Body);
-        N -> N
-    end,
-    AttInfos = if not is_binary(AttInfos0) -> AttInfos0; true ->
-        couch_compress:decompress(AttInfos0)
-    end,
-    Atts = [process_attachment(Att, SourceDb, TargetDb) || Att <- AttInfos],
-    Doc2 = Doc1#doc{atts = Atts},
-    Doc3 = couch_db_engine:serialize_doc(TargetDb, Doc2),
-    {ok, Doc4, Active} = couch_db_engine:write_doc_body(TargetDb, Doc3),
-    % element(3,...) and (4,...) are the stream pointer and size respecitively
-    % (see couch_att.erl) They are numeric for compatibility with older formats
-    AttSizes = [{element(3, A), element(4, A)} || A <- Atts],
-    NewLeaf = Leaf#leaf{
-        ptr = Doc4#doc.body,
-        sizes = #size_info{active = Active, external = External},
-        atts = AttSizes
-    },
-    {NewLeaf, add_sizes(Active, External, AttSizes, Acc)}.
-
-
-% This is copied almost verbatim from the compactor
-process_attachment({Name, Type, BinSp, AttLen, RevPos, ExpectedMd5}, SourceDb,
-        TargetDb) ->
-    % 010 upgrade code
-    {ok, SrcStream} = couch_db_engine:open_read_stream(SourceDb, BinSp),
-    {ok, DstStream} = couch_db_engine:open_write_stream(TargetDb, []),
-    ok = couch_stream:copy(SrcStream, DstStream),
-    {NewStream, AttLen, AttLen, ActualMd5, _IdentityMd5} =
-            couch_stream:close(DstStream),
-    {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
-    couch_util:check_md5(ExpectedMd5, ActualMd5),
-    {Name, Type, NewBinSp, AttLen, AttLen, RevPos, ExpectedMd5, identity};
-
-process_attachment({Name, Type, BinSp, AttLen, DiskLen, RevPos, ExpectedMd5,
-        Enc1}, SourceDb, TargetDb) ->
-    {ok, SrcStream} = couch_db_engine:open_read_stream(SourceDb, BinSp),
-    {ok, DstStream} = couch_db_engine:open_write_stream(TargetDb, []),
-    ok = couch_stream:copy(SrcStream, DstStream),
-    {NewStream, AttLen, _, ActualMd5, _IdentityMd5} =
-            couch_stream:close(DstStream),
-    {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
-    couch_util:check_md5(ExpectedMd5, ActualMd5),
-    Enc = case Enc1 of
-        true -> gzip;  % 0110 upgrade code
-        false -> identity;  % 0110 upgrade code
-        _ -> Enc1
-    end,
-    {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc}.
-
-
-get_engine(Db) ->
-    {ok, DbInfoProps} = couch_db:get_db_info(Db),
-    proplists:get_value(engine, DbInfoProps).
-
-
-add_sizes(Active, External, Atts, #racc{} = Acc) ->
-    #racc{active = ActiveAcc, external = ExternalAcc, atts = AttsAcc} = Acc,
-    NewActiveAcc = ActiveAcc + Active,
-    NewExternalAcc = ExternalAcc + External,
-    NewAttsAcc = lists:umerge(Atts, AttsAcc),
-    Acc#racc{
-        active = NewActiveAcc,
-        external = NewExternalAcc,
-        atts = NewAttsAcc
-    }.
-
-
-total_sizes(#racc{active = Active, external = External, atts = Atts}) ->
-    TotalAtts = lists:foldl(fun({_, S}, A) -> S + A end, 0, Atts),
-    {Active + TotalAtts, External + TotalAtts}.
-
-
-get_max_buffer_size() ->
-    config:get_integer("reshard", "split_buffer_size", ?DEFAULT_BUFFER_SIZE).
-
-
-copy_local_docs(#state{source_db = Db, targets = Targets} = State) ->
-    FoldFun = fun(#doc{id = Id} = Doc, Acc) ->
-        UpdatedAcc = case Id of
-            <<?LOCAL_DOC_PREFIX, "shard-sync-", _/binary>> ->
-                Acc;
-            <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> ->
-                Acc;
-            <<?LOCAL_DOC_PREFIX, _/binary>> ->
-                % Users' and replicator app's checkpoints go to their
-                % respective shards based on the general hashing algorithm
-                {Key, Target} = pick_target(Id, State, Acc),
-                #target{buffer = Docs} = Target,
-                Acc#{Key => Target#target{buffer = [Doc | Docs]}}
-        end,
-        {ok, UpdatedAcc}
-    end,
-    {ok, Targets1} = couch_db:fold_local_docs(Db, FoldFun, Targets, []),
-    Targets2 = maps:map(fun(_, #target{db = TDb, buffer = Docs} = T) ->
-        case Docs of
-            [] ->
-                T;
-            [_ | _] ->
-                Docs1 = lists:reverse(Docs),
-                {ok, _} = couch_db:update_docs(TDb, Docs1),
-                T#target{buffer = []}
-        end
-    end, Targets1),
-    State#state{targets = Targets2}.
diff --git a/src/couch/src/couch_db_updater.erl b/src/couch/src/couch_db_updater.erl
deleted file mode 100644
index 1ca804c..0000000
--- a/src/couch/src/couch_db_updater.erl
+++ /dev/null
@@ -1,955 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_updater).
--behaviour(gen_server).
--vsn(1).
-
--export([add_sizes/3, upgrade_sizes/1]).
--export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]).
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_db_int.hrl").
-
--define(IDLE_LIMIT_DEFAULT, 61000).
--define(DEFAULT_MAX_PARTITION_SIZE, 16#280000000). % 10 GiB
-
-
--record(merge_acc, {
-    revs_limit,
-    merge_conflicts,
-    add_infos = [],
-    rem_seqs = [],
-    cur_seq,
-    full_partitions = []
-}).
-
-
-init({Engine, DbName, FilePath, Options0}) ->
-    erlang:put(io_priority, {db_update, DbName}),
-    update_idle_limit_from_config(),
-    DefaultSecObj = default_security_object(DbName),
-    Options = [{default_security_object, DefaultSecObj} | Options0],
-    try
-        {ok, EngineState} = couch_db_engine:init(Engine, FilePath, Options),
-        Db = init_db(DbName, FilePath, EngineState, Options),
-        case lists:member(sys_db, Options) of
-            false ->
-                couch_stats_process_tracker:track([couchdb, open_databases]);
-            true ->
-                ok
-        end,
-        % Don't load validation funs here because the fabric query is
-        % liable to race conditions. Instead see
-        % couch_db:validate_doc_update, which loads them lazily.
-        NewDb = Db#db{main_pid = self()},
-        proc_lib:init_ack({ok, NewDb}),
-        gen_server:enter_loop(?MODULE, [], NewDb, idle_limit())
-    catch
-        throw:InitError ->
-            proc_lib:init_ack(InitError)
-    end.
-
-
-terminate(Reason, Db) ->
-    couch_util:shutdown_sync(Db#db.compactor_pid),
-    couch_db_engine:terminate(Reason, Db),
-    ok.
-
-handle_call(get_db, _From, Db) ->
-    {reply, {ok, Db}, Db, idle_limit()};
-handle_call(start_compact, _From, Db) ->
-    {noreply, NewDb, _Timeout} = handle_cast(start_compact, Db),
-    {reply, {ok, NewDb#db.compactor_pid}, NewDb, idle_limit()};
-handle_call(compactor_pid, _From, #db{compactor_pid = Pid} = Db) ->
-    {reply, Pid, Db, idle_limit()};
-handle_call(cancel_compact, _From, #db{compactor_pid = nil} = Db) ->
-    {reply, ok, Db, idle_limit()};
-handle_call(cancel_compact, _From, #db{compactor_pid = Pid} = Db) ->
-    unlink(Pid),
-    exit(Pid, kill),
-    couch_server:delete_compaction_files(Db#db.name),
-    Db2 = Db#db{compactor_pid = nil},
-    ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
-    {reply, ok, Db2, idle_limit()};
-
-handle_call({set_security, NewSec}, _From, #db{} = Db) ->
-    {ok, NewDb} = couch_db_engine:set_security(Db, NewSec),
-    NewSecDb = commit_data(NewDb#db{
-        security = NewSec
-    }),
-    ok = gen_server:call(couch_server, {db_updated, NewSecDb}, infinity),
-    {reply, ok, NewSecDb, idle_limit()};
-
-handle_call({set_revs_limit, Limit}, _From, Db) ->
-    {ok, Db2} = couch_db_engine:set_revs_limit(Db, Limit),
-    Db3 = commit_data(Db2),
-    ok = gen_server:call(couch_server, {db_updated, Db3}, infinity),
-    {reply, ok, Db3, idle_limit()};
-
-handle_call({set_purge_infos_limit, Limit}, _From, Db) ->
-    {ok, Db2} = couch_db_engine:set_purge_infos_limit(Db, Limit),
-    ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
-    {reply, ok, Db2, idle_limit()};
-
-handle_call({purge_docs, [], _}, _From, Db) ->
-    {reply, {ok, []}, Db, idle_limit()};
-
-handle_call({purge_docs, PurgeReqs0, Options}, _From, Db) ->
-    % Filter out any previously applied updates during
-    % internal replication
-    IsRepl = lists:member(replicated_changes, Options),
-    PurgeReqs = if not IsRepl -> PurgeReqs0; true ->
-        UUIDs = [UUID || {UUID, _Id, _Revs} <- PurgeReqs0],
-        PurgeInfos = couch_db_engine:load_purge_infos(Db, UUIDs),
-        lists:flatmap(fun
-            ({not_found, PReq}) -> [PReq];
-            ({{_, _, _, _}, _}) -> []
-        end, lists:zip(PurgeInfos, PurgeReqs0))
-    end,
-    {ok, NewDb, Replies} = purge_docs(Db, PurgeReqs),
-    {reply, {ok, Replies}, NewDb, idle_limit()};
-
-handle_call(Msg, From, Db) ->
-    case couch_db_engine:handle_db_updater_call(Msg, From, Db) of
-        {reply, Resp, NewDb} ->
-            {reply, Resp, NewDb, idle_limit()};
-        Else ->
-            Else
-    end.
-
-
-handle_cast({load_validation_funs, ValidationFuns}, Db) ->
-    Db2 = Db#db{validate_doc_funs = ValidationFuns},
-    ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
-    {noreply, Db2, idle_limit()};
-handle_cast(start_compact, Db) ->
-    case Db#db.compactor_pid of
-        nil ->
-            % For now we only support compacting to the same
-            % storage engine. After the first round of patches
-            % we'll add a field that sets the target engine
-            % type to compact to with a new copy compactor.
-            UpdateSeq = couch_db_engine:get_update_seq(Db),
-            Args = [Db#db.name, UpdateSeq],
-            couch_log:info("Starting compaction for db \"~s\" at ~p", Args),
-            {ok, Db2} = couch_db_engine:start_compaction(Db),
-            ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
-            {noreply, Db2, idle_limit()};
-        _ ->
-            % compact currently running, this is a no-op
-            {noreply, Db, idle_limit()}
-    end;
-handle_cast({compact_done, _Engine, CompactInfo}, #db{} = OldDb) ->
-    {ok, NewDb} = couch_db_engine:finish_compaction(OldDb, CompactInfo),
-    {noreply, NewDb};
-
-handle_cast(wakeup, Db) ->
-    {noreply, Db, idle_limit()};
-
-handle_cast(Msg, #db{name = Name} = Db) ->
-    couch_log:error("Database `~s` updater received unexpected cast: ~p",
-                    [Name, Msg]),
-    {stop, Msg, Db}.
-
-
-handle_info({update_docs, Client, GroupedDocs, NonRepDocs, MergeConflicts},
-        Db) ->
-    GroupedDocs2 = sort_and_tag_grouped_docs(Client, GroupedDocs),
-    if NonRepDocs == [] ->
-        {GroupedDocs3, Clients} = collect_updates(GroupedDocs2,
-                [Client], MergeConflicts);
-    true ->
-        GroupedDocs3 = GroupedDocs2,
-        Clients = [Client]
-    end,
-    NonRepDocs2 = [{Client, NRDoc} || NRDoc <- NonRepDocs],
-    try update_docs_int(Db, GroupedDocs3, NonRepDocs2, MergeConflicts) of
-    {ok, Db2, UpdatedDDocIds} ->
-        ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
-        case {couch_db:get_update_seq(Db), couch_db:get_update_seq(Db2)} of
-            {Seq, Seq} -> ok;
-            _ -> couch_event:notify(Db2#db.name, updated)
-        end,
-        if NonRepDocs2 /= [] ->
-            couch_event:notify(Db2#db.name, local_updated);
-        true -> ok
-        end,
-        [catch(ClientPid ! {done, self()}) || ClientPid <- Clients],
-        Db3 = case length(UpdatedDDocIds) > 0 of
-            true ->
-                % Ken and ddoc_cache are the only things that
-                % use the unspecified ddoc_updated message. We
-                % should update them to use the new message per
-                % ddoc.
-                lists:foreach(fun(DDocId) ->
-                    couch_event:notify(Db2#db.name, {ddoc_updated, DDocId})
-                end, UpdatedDDocIds),
-                couch_event:notify(Db2#db.name, ddoc_updated),
-                ddoc_cache:refresh(Db2#db.name, UpdatedDDocIds),
-                refresh_validate_doc_funs(Db2);
-            false ->
-                Db2
-        end,
-        {noreply, Db3, hibernate_if_no_idle_limit()}
-    catch
-        throw: retry ->
-            [catch(ClientPid ! {retry, self()}) || ClientPid <- Clients],
-            {noreply, Db, hibernate_if_no_idle_limit()}
-    end;
-handle_info({'EXIT', _Pid, normal}, Db) ->
-    {noreply, Db, idle_limit()};
-handle_info({'EXIT', _Pid, Reason}, Db) ->
-    {stop, Reason, Db};
-handle_info(timeout, #db{name=DbName} = Db) ->
-    IdleLimitMSec = update_idle_limit_from_config(),
-    case couch_db:is_idle(Db) of
-        true ->
-            LastActivity = couch_db_engine:last_activity(Db),
-            DtMSec = timer:now_diff(os:timestamp(), LastActivity) div 1000,
-            MSecSinceLastActivity = max(0, DtMSec),
-            case MSecSinceLastActivity > IdleLimitMSec of
-                true ->
-                    ok = couch_server:close_db_if_idle(DbName);
-                false ->
-                    ok
-            end;
-        false ->
-            ok
-    end,
-    % Send a message to wake up and then hibernate. Hibernation here is done to
-    % force a thorough garbage collection.
-    gen_server:cast(self(), wakeup),
-    {noreply, Db, hibernate};
-
-handle_info(Msg, Db) ->
-    case couch_db_engine:handle_db_updater_info(Msg, Db) of
-        {noreply, NewDb} ->
-            {noreply, NewDb, idle_limit()};
-        Else ->
-            Else
-    end.
-
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-sort_and_tag_grouped_docs(Client, GroupedDocs) ->
-    % These groups should already be sorted but sometimes clients misbehave.
-    % The merge_updates function will fail and the database can end up with
-    % duplicate documents if the incoming groups are not sorted, so as a sanity
-    % check we sort them again here. See COUCHDB-2735.
-    Cmp = fun([#doc{id=A}|_], [#doc{id=B}|_]) -> A < B end,
-    lists:map(fun(DocGroup) ->
-        [{Client, maybe_tag_doc(D)} || D <- DocGroup]
-    end, lists:sort(Cmp, GroupedDocs)).
-
-maybe_tag_doc(#doc{id=Id, revs={Pos,[_Rev|PrevRevs]}, meta=Meta0}=Doc) ->
-    case lists:keymember(ref, 1, Meta0) of
-        true ->
-            Doc;
-        false ->
-            Key = {Id, {Pos-1, PrevRevs}},
-            Doc#doc{meta=[{ref, Key} | Meta0]}
-    end.
-
-merge_updates([[{_,#doc{id=X}}|_]=A|RestA], [[{_,#doc{id=X}}|_]=B|RestB]) ->
-    [A++B | merge_updates(RestA, RestB)];
-merge_updates([[{_,#doc{id=X}}|_]|_]=A, [[{_,#doc{id=Y}}|_]|_]=B) when X < Y ->
-    [hd(A) | merge_updates(tl(A), B)];
-merge_updates([[{_,#doc{id=X}}|_]|_]=A, [[{_,#doc{id=Y}}|_]|_]=B) when X > Y ->
-    [hd(B) | merge_updates(A, tl(B))];
-merge_updates([], RestB) ->
-    RestB;
-merge_updates(RestA, []) ->
-    RestA.
-
-collect_updates(GroupedDocsAcc, ClientsAcc, MergeConflicts) ->
-    receive
-        % Only collect updates with the same MergeConflicts flag and without
-        % local docs. It's easier to just avoid multiple _local doc
-        % updaters than deal with their possible conflicts, and local docs
-        % writes are relatively rare. Can be optmized later if really needed.
-        {update_docs, Client, GroupedDocs, [], MergeConflicts} ->
-            GroupedDocs2 = sort_and_tag_grouped_docs(Client, GroupedDocs),
-            GroupedDocsAcc2 =
-                merge_updates(GroupedDocsAcc, GroupedDocs2),
-            collect_updates(GroupedDocsAcc2, [Client | ClientsAcc],
-                    MergeConflicts)
-    after 0 ->
-        {GroupedDocsAcc, ClientsAcc}
-    end.
-
-
-init_db(DbName, FilePath, EngineState, Options) ->
-    % convert start time tuple to microsecs and store as a binary string
-    {MegaSecs, Secs, MicroSecs} = os:timestamp(),
-    StartTime = ?l2b(io_lib:format("~p",
-            [(MegaSecs*1000000*1000000) + (Secs*1000000) + MicroSecs])),
-
-    BDU = couch_util:get_value(before_doc_update, Options, nil),
-    ADR = couch_util:get_value(after_doc_read, Options, nil),
-
-    NonCreateOpts = [Opt || Opt <- Options, Opt /= create],
-
-    InitDb = #db{
-        name = DbName,
-        filepath = FilePath,
-        engine = EngineState,
-        instance_start_time = StartTime,
-        options = NonCreateOpts,
-        before_doc_update = BDU,
-        after_doc_read = ADR
-    },
-
-    DbProps = couch_db_engine:get_props(InitDb),
-
-    InitDb#db{
-        committed_update_seq = couch_db_engine:get_update_seq(InitDb),
-        security = couch_db_engine:get_security(InitDb),
-        options = lists:keystore(props, 1, NonCreateOpts, {props, DbProps})
-    }.
-
-
-refresh_validate_doc_funs(#db{name = <<"shards/", _/binary>> = Name} = Db) ->
-    spawn(fabric, reset_validation_funs, [mem3:dbname(Name)]),
-    Db#db{validate_doc_funs = undefined};
-refresh_validate_doc_funs(Db0) ->
-    Db = Db0#db{user_ctx=?ADMIN_USER},
-    {ok, DesignDocs} = couch_db:get_design_docs(Db),
-    ProcessDocFuns = lists:flatmap(
-        fun(DesignDocInfo) ->
-            {ok, DesignDoc} = couch_db:open_doc_int(
-                Db, DesignDocInfo, [ejson_body]),
-            case couch_doc:get_validate_doc_fun(DesignDoc) of
-            nil -> [];
-            Fun -> [Fun]
-            end
-        end, DesignDocs),
-    Db#db{validate_doc_funs=ProcessDocFuns}.
-
-% rev tree functions
-
-flush_trees(_Db, [], AccFlushedTrees) ->
-    {ok, lists:reverse(AccFlushedTrees)};
-flush_trees(#db{} = Db,
-        [InfoUnflushed | RestUnflushed], AccFlushed) ->
-    #full_doc_info{update_seq=UpdateSeq, rev_tree=Unflushed} = InfoUnflushed,
-    {Flushed, FinalAcc} = couch_key_tree:mapfold(
-        fun(_Rev, Value, Type, SizesAcc) ->
-            case Value of
-                % This node is a document summary that needs to be
-                % flushed to disk.
-                #doc{} = Doc ->
-                    check_doc_atts(Db, Doc),
-                    ExternalSize = get_meta_body_size(Value#doc.meta),
-                    {size_info, AttSizeInfo} =
-                            lists:keyfind(size_info, 1, Doc#doc.meta),
-                    {ok, NewDoc, WrittenSize} =
-                            couch_db_engine:write_doc_body(Db, Doc),
-                    Leaf = #leaf{
-                        deleted = Doc#doc.deleted,
-                        ptr = NewDoc#doc.body,
-                        seq = UpdateSeq,
-                        sizes = #size_info{
-                            active = WrittenSize,
-                            external = ExternalSize
-                        },
-                        atts = AttSizeInfo
-                    },
-                    {Leaf, add_sizes(Type, Leaf, SizesAcc)};
-                #leaf{} ->
-                    {Value, add_sizes(Type, Value, SizesAcc)};
-                _ ->
-                    {Value, SizesAcc}
-            end
-        end, {0, 0, []}, Unflushed),
-    {FinalAS, FinalES, FinalAtts} = FinalAcc,
-    TotalAttSize = lists:foldl(fun({_, S}, A) -> S + A end, 0, FinalAtts),
-    NewInfo = InfoUnflushed#full_doc_info{
-        rev_tree = Flushed,
-        sizes = #size_info{
-            active = FinalAS + TotalAttSize,
-            external = FinalES + TotalAttSize
-        }
-    },
-    flush_trees(Db, RestUnflushed, [NewInfo | AccFlushed]).
-
-
-check_doc_atts(Db, Doc) ->
-    {atts_stream, Stream} = lists:keyfind(atts_stream, 1, Doc#doc.meta),
-    % Make sure that the attachments were written to the currently
-    % active attachment stream. If compaction swaps during a write
-    % request we may have to rewrite our attachment bodies.
-    if Stream == nil -> ok; true ->
-        case couch_db:is_active_stream(Db, Stream) of
-            true ->
-                ok;
-            false ->
-                % Stream where the attachments were written to is
-                % no longer the current attachment stream. This
-                % can happen when a database is switched at
-                % compaction time.
-                couch_log:debug("Stream where the attachments were"
-                                " written has changed."
-                                " Possibly retrying.", []),
-                throw(retry)
-        end
-    end.
-
-
-add_sizes(Type, #leaf{sizes=Sizes, atts=AttSizes}, Acc) ->
-    % Maybe upgrade from disk_size only
-    #size_info{
-        active = ActiveSize,
-        external = ExternalSize
-    } = upgrade_sizes(Sizes),
-    {ASAcc, ESAcc, AttsAcc} = Acc,
-    NewASAcc = ActiveSize + ASAcc,
-    NewESAcc = ESAcc + if Type == leaf -> ExternalSize; true -> 0 end,
-    NewAttsAcc = lists:umerge(AttSizes, AttsAcc),
-    {NewASAcc, NewESAcc, NewAttsAcc}.
-
-
-upgrade_sizes(#size_info{}=SI) ->
-    SI;
-upgrade_sizes({D, E}) ->
-    #size_info{active=D, external=E};
-upgrade_sizes(S) when is_integer(S) ->
-    #size_info{active=S, external=0}.
-
-
... 68054 lines suppressed ...