You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by va...@apache.org on 2021/04/16 21:45:05 UTC

[couchdb] branch main updated (403d27b -> 3217974)

This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a change to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git.


    from 403d27b  Add docker creds to CI run (#3508) (#3509)
     new f9f7f21  Delete non-functional 3.x applications and modules from main
     new ba6819b  Clean up config files
     new e05a6bf  Close backend port and clean up url handlers
     new 1c3ed04  Update couch_primary_sup to not start couch_task_status child
     new 41fa9a7  Remove commented out tests from couch_att
     new 622285d  Update couch_secondary_sup to not start index_server
     new e909cdb  Clean up couch_debug
     new d317624  Update couch_flags to remove knowledge about shards
     new e31ae8d  Clean up couch_doc
     new 9e4fc19  Remove most of the functionality from couch_server
     new cc32e04  Update couch_(js_)os_process after ioq removal
     new 93cd8b5  Remove rewrite support from couch_js and couch_query_servers
     new 9ac2ae5  Update couch_util to remove couch_db and mem3 calls
     new c9e19fb  Remove couch_db_plugin from couch_db_epi services
     new 7b83445  Clean up couch_db.hrl
     new 3080cf5  Remove clouseau and dreyfus references from mango
     new 5ec2119  Move utilities and records from couch_mrview and couch_index to couch_views
     new 5b39839  Remove mem3_sync:get_backlog/0 call from stats in chttpd_node
     new 45de516  Clean up database name validation in fabric2_db
     new c0dba42  Clean up couch_auth_cache
     new f005aba  Update all the applications to use the new couch_views utility functions
     new 870ba4c  Clean up tests after removing 3.x applications and couch_views updates
     new 3f9894f  Update ./dev/run to not auto-create _global_changes
     new 3217974  Clean up Makefiles and start running all the unit tests

The 24 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 Makefile                                           |    6 +-
 Makefile.win                                       |    2 +-
 dev/run                                            |    2 +-
 emilio.config                                      |    4 +-
 mix.exs                                            |    5 +-
 rebar.config.script                                |   17 -
 rel/apps/couch_epi.config                          |    7 +-
 rel/overlay/etc/default.ini                        |  215 +-
 rel/overlay/etc/local.ini                          |   13 -
 rel/reltool.config                                 |   32 -
 src/chttpd/src/chttpd.erl                          |    7 +-
 src/chttpd/src/chttpd_changes.erl                  |    6 +-
 src/chttpd/src/chttpd_db.erl                       |  158 +-
 src/chttpd/src/chttpd_httpd_handlers.erl           |   25 +-
 src/chttpd/src/chttpd_misc.erl                     |   49 +-
 src/chttpd/src/chttpd_node.erl                     |   47 +-
 src/chttpd/src/chttpd_rewrite.erl                  |  487 -----
 src/chttpd/src/chttpd_show.erl                     |  154 +-
 src/chttpd/src/chttpd_util.erl                     |   41 +
 src/chttpd/src/chttpd_view.erl                     |   24 +-
 src/couch/include/couch_db.hrl                     |   44 -
 src/couch/src/couch.app.src                        |   41 +-
 src/couch/src/couch_att.erl                        |  189 --
 src/couch/src/couch_auth_cache.erl                 |   93 +-
 src/couch/src/couch_bt_engine.erl                  | 1246 ------------
 src/couch/src/couch_bt_engine.hrl                  |   27 -
 src/couch/src/couch_bt_engine_compactor.erl        |  590 ------
 src/couch/src/couch_bt_engine_header.erl           |  451 -----
 src/couch/src/couch_bt_engine_stream.erl           |   70 -
 src/couch/src/couch_btree.erl                      |  855 --------
 src/couch/src/couch_changes.erl                    |  724 -------
 src/couch/src/couch_compress.erl                   |   99 -
 src/couch/src/couch_db.erl                         | 2086 --------------------
 src/couch/src/couch_db_engine.erl                  | 1105 -----------
 src/couch/src/couch_db_epi.erl                     |    1 -
 src/couch/src/couch_db_header.erl                  |  405 ----
 src/couch/src/couch_db_int.hrl                     |   76 -
 src/couch/src/couch_db_plugin.erl                  |   96 -
 src/couch/src/couch_db_split.erl                   |  503 -----
 src/couch/src/couch_db_updater.erl                 |  955 ---------
 src/couch/src/couch_debug.erl                      |   38 -
 src/couch/src/couch_doc.erl                        |   59 +-
 src/couch/src/couch_emsort.erl                     |  318 ---
 src/couch/src/couch_event_sup.erl                  |   74 -
 src/couch/src/couch_file.erl                       |  804 --------
 src/couch/src/couch_flags.erl                      |   16 +-
 src/couch/src/couch_httpd.erl                      |  347 +---
 src/couch/src/couch_httpd_db.erl                   | 1263 ------------
 src/couch/src/couch_httpd_misc_handlers.erl        |  269 ---
 src/couch/src/couch_httpd_rewrite.erl              |  484 -----
 src/couch/src/couch_lru.erl                        |   67 -
 src/couch/src/couch_multidb_changes.erl            |  903 ---------
 src/couch/src/couch_os_process.erl                 |    2 +-
 src/couch/src/couch_partition.erl                  |    2 +-
 src/couch/src/couch_primary_sup.erl                |    6 -
 src/couch/src/couch_query_servers.erl              |   79 -
 src/couch/src/couch_secondary_sup.erl              |   11 +-
 src/couch/src/couch_server.erl                     |  872 +-------
 src/couch/src/couch_server_int.hrl                 |   23 -
 src/couch/src/couch_stream.erl                     |  322 ---
 src/couch/src/couch_task_status.erl                |  171 --
 src/couch/src/couch_users_db.erl                   |  137 --
 src/couch/src/couch_util.erl                       |   26 +-
 src/couch/src/test_util.erl                        |   42 +-
 src/couch/test/eunit/chttpd_endpoints_tests.erl    |   18 +-
 src/couch/test/eunit/couch_auth_cache_tests.erl    |  349 ----
 .../test/eunit/couch_bt_engine_compactor_tests.erl |  129 --
 src/couch/test/eunit/couch_bt_engine_tests.erl     |   20 -
 .../test/eunit/couch_bt_engine_upgrade_tests.erl   |  244 ---
 src/couch/test/eunit/couch_btree_tests.erl         |  572 ------
 src/couch/test/eunit/couch_changes_tests.erl       |  962 ---------
 src/couch/test/eunit/couch_db_doc_tests.erl        |  121 --
 src/couch/test/eunit/couch_db_mpr_tests.erl        |   12 +-
 src/couch/test/eunit/couch_db_plugin_tests.erl     |  205 --
 .../test/eunit/couch_db_props_upgrade_tests.erl    |   83 -
 src/couch/test/eunit/couch_db_split_tests.erl      |  331 ----
 src/couch/test/eunit/couch_db_tests.erl            |  198 --
 src/couch/test/eunit/couch_doc_json_tests.erl      |   82 +-
 src/couch/test/eunit/couch_doc_tests.erl           |   45 +-
 src/couch/test/eunit/couch_file_tests.erl          |  551 ------
 src/couch/test/eunit/couch_index_tests.erl         |  232 ---
 src/couch/test/eunit/couch_query_servers_tests.erl |    2 +-
 src/couch/test/eunit/couch_server_tests.erl        |  294 ---
 src/couch/test/eunit/couch_stream_tests.erl        |  124 --
 src/couch/test/eunit/couch_task_status_tests.erl   |  233 ---
 src/couch/test/eunit/couchdb_attachments_tests.erl |  765 -------
 src/couch/test/eunit/couchdb_auth_tests.erl        |   11 +-
 src/couch/test/eunit/couchdb_cors_tests.erl        |    9 +-
 src/couch/test/eunit/couchdb_db_tests.erl          |   91 -
 src/couch/test/eunit/couchdb_design_doc_tests.erl  |   87 -
 .../test/eunit/couchdb_file_compression_tests.erl  |  250 ---
 .../test/eunit/couchdb_location_header_tests.erl   |   78 -
 src/couch/test/eunit/couchdb_mrview_cors_tests.erl |   18 +-
 src/couch/test/eunit/couchdb_mrview_tests.erl      |  261 ---
 .../test/eunit/couchdb_update_conflicts_tests.erl  |  280 ---
 src/couch/test/eunit/couchdb_vhosts_tests.erl      |  271 ---
 src/couch/test/eunit/couchdb_views_tests.erl       |  668 -------
 .../test/eunit/fixtures/os_daemon_configer.escript |    3 +-
 src/couch/test/eunit/global_changes_tests.erl      |  159 --
 src/couch/test/exunit/couch_compress_tests.exs     |  113 --
 src/couch/test/exunit/fabric_test.exs              |  101 -
 src/couch_eval/src/couch_eval.erl                  |    6 +-
 src/couch_event/.gitignore                         |    2 -
 src/couch_event/LICENSE                            |  202 --
 src/couch_event/README.md                          |    3 -
 src/couch_event/rebar.config                       |    1 -
 src/couch_event/src/couch_event.app.src            |   22 -
 src/couch_event/src/couch_event.erl                |   65 -
 src/couch_event/src/couch_event_app.erl            |   27 -
 src/couch_event/src/couch_event_int.hrl            |   19 -
 src/couch_event/src/couch_event_listener.erl       |  238 ---
 src/couch_event/src/couch_event_listener_mfa.erl   |  107 -
 src/couch_event/src/couch_event_os_listener.erl    |   76 -
 src/couch_event/src/couch_event_server.erl         |  156 --
 src/couch_event/src/couch_event_sup2.erl           |   44 -
 src/couch_index/.gitignore                         |    3 -
 src/couch_index/LICENSE                            |  202 --
 src/couch_index/rebar.config                       |    2 -
 src/couch_index/src/couch_index.app.src            |   19 -
 src/couch_index/src/couch_index.erl                |  639 ------
 src/couch_index/src/couch_index_app.erl            |   21 -
 src/couch_index/src/couch_index_compactor.erl      |  135 --
 src/couch_index/src/couch_index_epi.erl            |   50 -
 src/couch_index/src/couch_index_plugin.erl         |   51 -
 .../src/couch_index_plugin_couch_db.erl            |   26 -
 src/couch_index/src/couch_index_server.erl         |  303 ---
 src/couch_index/src/couch_index_sup.erl            |   24 -
 src/couch_index/src/couch_index_updater.erl        |  239 ---
 src/couch_index/src/couch_index_util.erl           |   78 -
 .../test/eunit/couch_index_compaction_tests.erl    |  117 --
 .../test/eunit/couch_index_ddoc_updated_tests.erl  |  145 --
 src/couch_js/src/couch_js.app.src                  |    3 +-
 src/couch_js/src/couch_js_os_process.erl           |    2 +-
 src/couch_js/src/couch_js_query_servers.erl        |   80 -
 src/couch_mrview/LICENSE                           |  202 --
 src/couch_mrview/include/couch_mrview.hrl          |  114 --
 src/couch_mrview/priv/stats_descriptions.cfg       |   24 -
 src/couch_mrview/rebar.config                      |    2 -
 src/couch_mrview/src/couch_mrview.app.src          |   18 -
 src/couch_mrview/src/couch_mrview.erl              |  692 -------
 src/couch_mrview/src/couch_mrview_cleanup.erl      |   59 -
 src/couch_mrview/src/couch_mrview_compactor.erl    |  294 ---
 src/couch_mrview/src/couch_mrview_index.erl        |  329 ---
 src/couch_mrview/src/couch_mrview_show.erl         |  468 -----
 src/couch_mrview/src/couch_mrview_test_util.erl    |  123 --
 .../src/couch_mrview_update_notifier.erl           |   49 -
 src/couch_mrview/src/couch_mrview_updater.erl      |  373 ----
 src/couch_mrview/src/couch_mrview_util.erl         | 1180 -----------
 .../test/eunit/couch_mrview_all_docs_tests.erl     |  140 --
 .../test/eunit/couch_mrview_collation_tests.erl    |  207 --
 .../test/eunit/couch_mrview_compact_tests.erl      |  115 --
 .../test/eunit/couch_mrview_ddoc_updated_tests.erl |  145 --
 .../eunit/couch_mrview_ddoc_validation_tests.erl   |  422 ----
 .../test/eunit/couch_mrview_design_docs_tests.erl  |  136 --
 .../test/eunit/couch_mrview_http_tests.erl         |   28 -
 .../test/eunit/couch_mrview_index_info_tests.erl   |  111 --
 .../test/eunit/couch_mrview_local_docs_tests.erl   |  148 --
 .../test/eunit/couch_mrview_map_views_tests.erl    |  144 --
 .../eunit/couch_mrview_purge_docs_fabric_tests.erl |  286 ---
 .../test/eunit/couch_mrview_purge_docs_tests.erl   |  575 ------
 .../test/eunit/couch_mrview_red_views_tests.erl    |   95 -
 .../test/eunit/couch_mrview_util_tests.erl         |   39 -
 src/couch_peruser/.gitignore                       |    9 -
 src/couch_peruser/LICENSE                          |  202 --
 src/couch_peruser/README.md                        |   34 -
 src/couch_peruser/src/couch_peruser.app.src        |   20 -
 src/couch_peruser/src/couch_peruser.erl            |  423 ----
 src/couch_peruser/src/couch_peruser_app.erl        |   26 -
 src/couch_peruser/src/couch_peruser_sup.erl        |   29 -
 .../test/eunit/couch_peruser_test.erl              |  538 -----
 src/couch_plugins/LICENSE                          |  202 --
 src/couch_plugins/Makefile.am                      |   40 -
 src/couch_plugins/README.md                        |  159 --
 src/couch_plugins/src/couch_plugins.app.src        |   22 -
 src/couch_plugins/src/couch_plugins.erl            |  304 ---
 src/couch_plugins/src/couch_plugins_httpd.erl      |   65 -
 src/couch_pse_tests/src/couch_pse_tests.app.src    |   20 -
 src/couch_pse_tests/src/cpse_gather.erl            |   95 -
 src/couch_pse_tests/src/cpse_test_attachments.erl  |   99 -
 src/couch_pse_tests/src/cpse_test_compaction.erl   |  318 ---
 .../src/cpse_test_copy_purge_infos.erl             |   82 -
 src/couch_pse_tests/src/cpse_test_fold_changes.erl |  185 --
 src/couch_pse_tests/src/cpse_test_fold_docs.erl    |  400 ----
 .../src/cpse_test_fold_purge_infos.erl             |  167 --
 .../src/cpse_test_get_set_props.erl                |   95 -
 .../src/cpse_test_open_close_delete.erl            |   77 -
 .../src/cpse_test_purge_bad_checkpoints.erl        |   80 -
 src/couch_pse_tests/src/cpse_test_purge_docs.erl   |  464 -----
 .../src/cpse_test_purge_replication.erl            |  215 --
 src/couch_pse_tests/src/cpse_test_purge_seqs.erl   |  129 --
 .../src/cpse_test_read_write_docs.erl              |  311 ---
 src/couch_pse_tests/src/cpse_test_ref_counting.erl |  113 --
 src/couch_pse_tests/src/cpse_util.erl              |  677 -------
 .../src/couch_replicator_api_wrap.erl              |    2 +-
 src/couch_replicator/src/couch_replicator_ids.erl  |    2 +-
 src/couch_views/include/couch_views.hrl            |   94 +
 src/couch_views/src/couch_views.erl                |    7 +-
 src/couch_views/src/couch_views_batch.erl          |    2 +-
 src/couch_views/src/couch_views_batch_impl.erl     |    2 +-
 src/couch_views/src/couch_views_fdb.erl            |    1 -
 src/couch_views/src/couch_views_http.erl           |   32 +-
 .../src/couch_views_http_util.erl}                 |  308 +--
 src/couch_views/src/couch_views_indexer.erl        |    1 -
 src/couch_views/src/couch_views_jobs.erl           |    1 -
 src/couch_views/src/couch_views_reader.erl         |    5 +-
 src/couch_views/src/couch_views_trees.erl          |    1 -
 src/couch_views/src/couch_views_updater.erl        |    6 +-
 src/couch_views/src/couch_views_util.erl           |  105 +-
 src/couch_views/src/couch_views_validate.erl       |  460 +++++
 src/couch_views/test/couch_views_batch_test.erl    |    2 +-
 src/couch_views/test/couch_views_cleanup_test.erl  |    1 -
 .../test/couch_views_custom_red_test.erl           |    1 -
 src/couch_views/test/couch_views_indexer_test.erl  |    1 -
 src/couch_views/test/couch_views_info_test.erl     |    2 +-
 src/couch_views/test/couch_views_map_test.erl      |   22 -
 src/couch_views/test/couch_views_size_test.erl     |    3 +-
 .../test/couch_views_trace_index_test.erl          |    2 +-
 src/couch_views/test/couch_views_updater_test.erl  |    3 +-
 src/couch_views/test/couch_views_upgrade_test.erl  |    3 +-
 src/ddoc_cache/LICENSE                             |  202 --
 src/ddoc_cache/README.md                           |    4 -
 src/ddoc_cache/priv/stats_descriptions.cfg         |   12 -
 src/ddoc_cache/src/ddoc_cache.app.src              |   32 -
 src/ddoc_cache/src/ddoc_cache.erl                  |   60 -
 src/ddoc_cache/src/ddoc_cache.hrl                  |   40 -
 src/ddoc_cache/src/ddoc_cache_app.erl              |   25 -
 src/ddoc_cache/src/ddoc_cache_entry.erl            |  374 ----
 src/ddoc_cache/src/ddoc_cache_entry_custom.erl     |   37 -
 src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl     |   46 -
 src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl |   47 -
 .../src/ddoc_cache_entry_validation_funs.erl       |   44 -
 src/ddoc_cache/src/ddoc_cache_lru.erl              |  333 ----
 src/ddoc_cache/src/ddoc_cache_opener.erl           |   66 -
 src/ddoc_cache/src/ddoc_cache_sup.erl              |   46 -
 src/ddoc_cache/src/ddoc_cache_value.erl            |   27 -
 .../test/eunit/ddoc_cache_basic_test.erl           |  175 --
 .../test/eunit/ddoc_cache_coverage_test.erl        |   77 -
 .../test/eunit/ddoc_cache_disabled_test.erl        |   62 -
 .../test/eunit/ddoc_cache_entry_test.erl           |  159 --
 src/ddoc_cache/test/eunit/ddoc_cache_ev.erl        |   21 -
 .../test/eunit/ddoc_cache_eviction_test.erl        |   96 -
 src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl  |  219 --
 .../test/eunit/ddoc_cache_no_cache_test.erl        |   87 -
 .../test/eunit/ddoc_cache_open_error_test.erl      |   46 -
 src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl |  107 -
 .../test/eunit/ddoc_cache_opener_test.erl          |   33 -
 .../test/eunit/ddoc_cache_refresh_test.erl         |  174 --
 .../test/eunit/ddoc_cache_remove_test.erl          |  224 ---
 src/ddoc_cache/test/eunit/ddoc_cache_test.hrl      |   26 -
 src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl     |  111 --
 src/dreyfus/.gitignore                             |    4 -
 src/dreyfus/LICENSE.txt                            |  202 --
 src/dreyfus/README.md                              |   78 -
 src/dreyfus/include/dreyfus.hrl                    |   74 -
 src/dreyfus/priv/stats_descriptions.cfg            |   65 -
 src/dreyfus/src/clouseau_rpc.erl                   |  109 -
 src/dreyfus/src/dreyfus.app.src                    |   22 -
 src/dreyfus/src/dreyfus_app.erl                    |   24 -
 src/dreyfus/src/dreyfus_bookmark.erl               |   90 -
 src/dreyfus/src/dreyfus_config.erl                 |   15 -
 src/dreyfus/src/dreyfus_epi.erl                    |   46 -
 src/dreyfus/src/dreyfus_fabric.erl                 |  205 --
 src/dreyfus/src/dreyfus_fabric_cleanup.erl         |   78 -
 src/dreyfus/src/dreyfus_fabric_group1.erl          |  129 --
 src/dreyfus/src/dreyfus_fabric_group2.erl          |  158 --
 src/dreyfus/src/dreyfus_fabric_info.erl            |  108 -
 src/dreyfus/src/dreyfus_fabric_search.erl          |  270 ---
 src/dreyfus/src/dreyfus_httpd.erl                  |  614 ------
 src/dreyfus/src/dreyfus_httpd_handlers.erl         |   29 -
 src/dreyfus/src/dreyfus_index.erl                  |  391 ----
 src/dreyfus/src/dreyfus_index_manager.erl          |  153 --
 src/dreyfus/src/dreyfus_index_updater.erl          |  181 --
 src/dreyfus/src/dreyfus_plugin_couch_db.erl        |   26 -
 src/dreyfus/src/dreyfus_rpc.erl                    |  130 --
 src/dreyfus/src/dreyfus_sup.erl                    |   32 -
 src/dreyfus/src/dreyfus_util.erl                   |  441 -----
 src/dreyfus/test/dreyfus_blacklist_await_test.erl  |   76 -
 .../test/dreyfus_blacklist_request_test.erl        |   96 -
 src/dreyfus/test/dreyfus_config_test.erl           |   71 -
 src/dreyfus/test/dreyfus_purge_test.erl            |  867 --------
 src/dreyfus/test/dreyfus_test_util.erl             |   13 -
 src/dreyfus/test/elixir/mix.exs                    |   30 -
 src/dreyfus/test/elixir/mix.lock                   |    5 -
 src/dreyfus/test/elixir/run                        |    4 -
 .../test/elixir/test/partition_search_test.exs     |  247 ---
 src/dreyfus/test/elixir/test/search_test.exs       |  226 ---
 src/dreyfus/test/elixir/test/test_helper.exs       |    4 -
 src/fabric/include/fabric.hrl                      |   46 -
 src/fabric/src/fabric.app.src                      |    2 -
 src/fabric/src/fabric.erl                          |  720 -------
 src/fabric/src/fabric2_db.erl                      |   26 +-
 src/fabric/src/fabric2_util.erl                    |    2 +-
 src/fabric/src/fabric_db_create.erl                |  228 ---
 src/fabric/src/fabric_db_delete.erl                |   98 -
 src/fabric/src/fabric_db_doc_count.erl             |   62 -
 src/fabric/src/fabric_db_info.erl                  |  171 --
 src/fabric/src/fabric_db_meta.erl                  |  198 --
 src/fabric/src/fabric_db_partition_info.erl        |  155 --
 src/fabric/src/fabric_db_update_listener.erl       |  177 --
 src/fabric/src/fabric_design_doc_count.erl         |   62 -
 src/fabric/src/fabric_dict.erl                     |   61 -
 src/fabric/src/fabric_doc_attachments.erl          |  160 --
 src/fabric/src/fabric_doc_atts.erl                 |  170 --
 src/fabric/src/fabric_doc_missing_revs.erl         |   97 -
 src/fabric/src/fabric_doc_open.erl                 |  610 ------
 src/fabric/src/fabric_doc_open_revs.erl            |  799 --------
 src/fabric/src/fabric_doc_purge.erl                |  571 ------
 src/fabric/src/fabric_doc_update.erl               |  377 ----
 src/fabric/src/fabric_group_info.erl               |  139 --
 src/fabric/src/fabric_ring.erl                     |  519 -----
 src/fabric/src/fabric_rpc.erl                      |  664 -------
 src/fabric/src/fabric_streams.erl                  |  274 ---
 src/fabric/src/fabric_util.erl                     |  347 ----
 src/fabric/src/fabric_view.erl                     |  478 -----
 src/fabric/src/fabric_view_all_docs.erl            |  332 ----
 src/fabric/src/fabric_view_changes.erl             |  820 --------
 src/fabric/src/fabric_view_map.erl                 |  267 ---
 src/fabric/src/fabric_view_reduce.erl              |  165 --
 src/fabric/test/eunit/fabric_rpc_tests.erl         |  181 --
 src/fabric/test/fabric2_dir_prefix_tests.erl       |    4 +-
 src/fabric/test/fabric2_node_types_tests.erl       |    4 +-
 src/fabric/test/fabric2_tx_options_tests.erl       |    4 +-
 src/global_changes/.gitignore                      |    2 -
 src/global_changes/LICENSE                         |  203 --
 src/global_changes/README.md                       |   27 -
 src/global_changes/priv/stats_descriptions.cfg     |   20 -
 src/global_changes/src/global_changes.app.src      |   32 -
 src/global_changes/src/global_changes_app.erl      |   28 -
 src/global_changes/src/global_changes_epi.erl      |   51 -
 src/global_changes/src/global_changes_httpd.erl    |  285 ---
 .../src/global_changes_httpd_handlers.erl          |   28 -
 src/global_changes/src/global_changes_listener.erl |  165 --
 src/global_changes/src/global_changes_plugin.erl   |   40 -
 src/global_changes/src/global_changes_server.erl   |  229 ---
 src/global_changes/src/global_changes_sup.erl      |   84 -
 src/global_changes/src/global_changes_util.erl     |   27 -
 .../test/eunit/global_changes_hooks_tests.erl      |  156 --
 src/ioq/.gitignore                                 |    2 -
 src/ioq/src/ioq.app.src                            |   21 -
 src/ioq/src/ioq.erl                                |  189 --
 src/ioq/src/ioq_app.erl                            |   21 -
 src/ioq/src/ioq_sup.erl                            |   24 -
 src/ken/README.md                                  |   12 -
 src/ken/rebar.config.script                        |   28 -
 src/ken/src/ken.app.src.script                     |   38 -
 src/ken/src/ken.erl                                |   29 -
 src/ken/src/ken_app.erl                            |   28 -
 src/ken/src/ken_event_handler.erl                  |   56 -
 src/ken/src/ken_server.erl                         |  579 ------
 src/ken/src/ken_sup.erl                            |   33 -
 src/ken/test/config.ini                            |    2 -
 src/ken/test/ken_server_test.erl                   |   97 -
 src/mango/src/mango_cursor.erl                     |    8 -
 src/mango/src/mango_cursor_special.erl             |    2 +-
 src/mango/src/mango_cursor_text.erl                |  334 ----
 src/mango/src/mango_cursor_view.erl                |    4 +-
 src/mango/src/mango_idx.erl                        |   19 +-
 src/mango/src/mango_idx_text.erl                   |  459 -----
 src/mango/src/mango_json_bookmark.erl              |    2 +-
 src/mem3/LICENSE                                   |  202 --
 src/mem3/README.md                                 |   43 -
 src/mem3/README_reshard.md                         |   93 -
 src/mem3/include/mem3.hrl                          |   59 -
 src/mem3/priv/stats_descriptions.cfg               |   12 -
 src/mem3/rebar.config.script                       |   22 -
 src/mem3/src/mem3.app.src                          |   40 -
 src/mem3/src/mem3.erl                              |  424 ----
 src/mem3/src/mem3_app.erl                          |   21 -
 src/mem3/src/mem3_cluster.erl                      |  161 --
 src/mem3/src/mem3_epi.erl                          |   51 -
 src/mem3/src/mem3_hash.erl                         |   73 -
 src/mem3/src/mem3_httpd.erl                        |   84 -
 src/mem3/src/mem3_httpd_handlers.erl               |   61 -
 src/mem3/src/mem3_nodes.erl                        |  155 --
 src/mem3/src/mem3_plugin_couch_db.erl              |   21 -
 src/mem3/src/mem3_rep.erl                          |  998 ----------
 src/mem3/src/mem3_reshard.erl                      |  913 ---------
 src/mem3/src/mem3_reshard.hrl                      |   74 -
 src/mem3/src/mem3_reshard_api.erl                  |  217 --
 src/mem3/src/mem3_reshard_dbdoc.erl                |  274 ---
 src/mem3/src/mem3_reshard_httpd.erl                |  317 ---
 src/mem3/src/mem3_reshard_index.erl                |  164 --
 src/mem3/src/mem3_reshard_job.erl                  |  716 -------
 src/mem3/src/mem3_reshard_job_sup.erl              |   55 -
 src/mem3/src/mem3_reshard_store.erl                |  286 ---
 src/mem3/src/mem3_reshard_sup.erl                  |   47 -
 src/mem3/src/mem3_reshard_validate.erl             |  126 --
 src/mem3/src/mem3_rpc.erl                          |  711 -------
 src/mem3/src/mem3_seeds.erl                        |  162 --
 src/mem3/src/mem3_shards.erl                       |  766 -------
 src/mem3/src/mem3_sup.erl                          |   40 -
 src/mem3/src/mem3_sync.erl                         |  323 ---
 src/mem3/src/mem3_sync_event.erl                   |   86 -
 src/mem3/src/mem3_sync_event_listener.erl          |  353 ----
 src/mem3/src/mem3_sync_nodes.erl                   |  115 --
 src/mem3/src/mem3_sync_security.erl                |  117 --
 src/mem3/src/mem3_util.erl                         |  650 ------
 src/mem3/test/eunit/mem3_cluster_test.erl          |  133 --
 src/mem3/test/eunit/mem3_hash_test.erl             |   23 -
 src/mem3/test/eunit/mem3_rep_test.erl              |  321 ---
 src/mem3/test/eunit/mem3_reshard_api_test.erl      |  847 --------
 .../test/eunit/mem3_reshard_changes_feed_test.erl  |  389 ----
 src/mem3/test/eunit/mem3_reshard_test.erl          |  834 --------
 src/mem3/test/eunit/mem3_ring_prop_tests.erl       |  151 --
 src/mem3/test/eunit/mem3_seeds_test.erl            |   69 -
 src/mem3/test/eunit/mem3_sync_security_test.erl    |   54 -
 src/mem3/test/eunit/mem3_util_test.erl             |  130 --
 src/rexi/README.md                                 |   23 -
 src/rexi/include/rexi.hrl                          |   20 -
 src/rexi/priv/stats_descriptions.cfg               |   24 -
 src/rexi/rebar.config                              |    2 -
 src/rexi/src/rexi.app.src                          |   28 -
 src/rexi/src/rexi.erl                              |  320 ---
 src/rexi/src/rexi_app.erl                          |   22 -
 src/rexi/src/rexi_buffer.erl                       |  104 -
 src/rexi/src/rexi_monitor.erl                      |   65 -
 src/rexi/src/rexi_server.erl                       |  193 --
 src/rexi/src/rexi_server_mon.erl                   |  176 --
 src/rexi/src/rexi_server_sup.erl                   |   29 -
 src/rexi/src/rexi_sup.erl                          |   64 -
 src/rexi/src/rexi_utils.erl                        |  105 -
 src/setup/.gitignore                               |    4 -
 src/setup/LICENSE                                  |  203 --
 src/setup/README.md                                |  210 --
 src/setup/src/setup.app.src                        |   27 -
 src/setup/src/setup.erl                            |  386 ----
 src/setup/src/setup_app.erl                        |   28 -
 src/setup/src/setup_epi.erl                        |   49 -
 src/setup/src/setup_httpd.erl                      |  180 --
 src/setup/src/setup_httpd_handlers.erl             |   32 -
 src/setup/src/setup_sup.erl                        |   44 -
 src/setup/test/t-frontend-setup.sh                 |   71 -
 src/setup/test/t-single-node-auto-setup.sh         |   24 -
 src/setup/test/t-single-node.sh                    |   46 -
 src/setup/test/t.sh                                |   63 -
 src/smoosh/README.md                               |  140 --
 src/smoosh/operator_guide.md                       |  396 ----
 src/smoosh/src/smoosh.app.src                      |   29 -
 src/smoosh/src/smoosh.erl                          |   69 -
 src/smoosh/src/smoosh_app.erl                      |   28 -
 src/smoosh/src/smoosh_channel.erl                  |  325 ---
 src/smoosh/src/smoosh_priority_queue.erl           |   86 -
 src/smoosh/src/smoosh_server.erl                   |  606 ------
 src/smoosh/src/smoosh_sup.erl                      |   38 -
 src/smoosh/src/smoosh_utils.erl                    |   92 -
 src/smoosh/test/exunit/scheduling_window_test.exs  |   79 -
 src/smoosh/test/exunit/test_helper.exs             |    2 -
 test/elixir/lib/step/create_db.ex                  |    2 +-
 448 files changed, 962 insertions(+), 77692 deletions(-)
 delete mode 100644 src/chttpd/src/chttpd_rewrite.erl
 create mode 100644 src/chttpd/src/chttpd_util.erl
 delete mode 100644 src/couch/src/couch_bt_engine.erl
 delete mode 100644 src/couch/src/couch_bt_engine.hrl
 delete mode 100644 src/couch/src/couch_bt_engine_compactor.erl
 delete mode 100644 src/couch/src/couch_bt_engine_header.erl
 delete mode 100644 src/couch/src/couch_bt_engine_stream.erl
 delete mode 100644 src/couch/src/couch_btree.erl
 delete mode 100644 src/couch/src/couch_changes.erl
 delete mode 100644 src/couch/src/couch_compress.erl
 delete mode 100644 src/couch/src/couch_db.erl
 delete mode 100644 src/couch/src/couch_db_engine.erl
 delete mode 100644 src/couch/src/couch_db_header.erl
 delete mode 100644 src/couch/src/couch_db_int.hrl
 delete mode 100644 src/couch/src/couch_db_plugin.erl
 delete mode 100644 src/couch/src/couch_db_split.erl
 delete mode 100644 src/couch/src/couch_db_updater.erl
 delete mode 100644 src/couch/src/couch_emsort.erl
 delete mode 100644 src/couch/src/couch_event_sup.erl
 delete mode 100644 src/couch/src/couch_file.erl
 delete mode 100644 src/couch/src/couch_httpd_db.erl
 delete mode 100644 src/couch/src/couch_httpd_misc_handlers.erl
 delete mode 100644 src/couch/src/couch_httpd_rewrite.erl
 delete mode 100644 src/couch/src/couch_lru.erl
 delete mode 100644 src/couch/src/couch_multidb_changes.erl
 delete mode 100644 src/couch/src/couch_server_int.hrl
 delete mode 100644 src/couch/src/couch_stream.erl
 delete mode 100644 src/couch/src/couch_task_status.erl
 delete mode 100644 src/couch/src/couch_users_db.erl
 delete mode 100644 src/couch/test/eunit/couch_auth_cache_tests.erl
 delete mode 100644 src/couch/test/eunit/couch_bt_engine_compactor_tests.erl
 delete mode 100644 src/couch/test/eunit/couch_bt_engine_tests.erl
 delete mode 100644 src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl
 delete mode 100644 src/couch/test/eunit/couch_btree_tests.erl
 delete mode 100644 src/couch/test/eunit/couch_changes_tests.erl
 delete mode 100644 src/couch/test/eunit/couch_db_doc_tests.erl
 delete mode 100644 src/couch/test/eunit/couch_db_plugin_tests.erl
 delete mode 100644 src/couch/test/eunit/couch_db_props_upgrade_tests.erl
 delete mode 100644 src/couch/test/eunit/couch_db_split_tests.erl
 delete mode 100644 src/couch/test/eunit/couch_db_tests.erl
 delete mode 100644 src/couch/test/eunit/couch_file_tests.erl
 delete mode 100644 src/couch/test/eunit/couch_index_tests.erl
 delete mode 100644 src/couch/test/eunit/couch_server_tests.erl
 delete mode 100644 src/couch/test/eunit/couch_stream_tests.erl
 delete mode 100644 src/couch/test/eunit/couch_task_status_tests.erl
 delete mode 100644 src/couch/test/eunit/couchdb_attachments_tests.erl
 delete mode 100644 src/couch/test/eunit/couchdb_db_tests.erl
 delete mode 100644 src/couch/test/eunit/couchdb_design_doc_tests.erl
 delete mode 100644 src/couch/test/eunit/couchdb_file_compression_tests.erl
 delete mode 100644 src/couch/test/eunit/couchdb_location_header_tests.erl
 delete mode 100644 src/couch/test/eunit/couchdb_mrview_tests.erl
 delete mode 100644 src/couch/test/eunit/couchdb_update_conflicts_tests.erl
 delete mode 100644 src/couch/test/eunit/couchdb_vhosts_tests.erl
 delete mode 100644 src/couch/test/eunit/couchdb_views_tests.erl
 delete mode 100644 src/couch/test/eunit/global_changes_tests.erl
 delete mode 100644 src/couch/test/exunit/couch_compress_tests.exs
 delete mode 100644 src/couch/test/exunit/fabric_test.exs
 delete mode 100644 src/couch_event/.gitignore
 delete mode 100644 src/couch_event/LICENSE
 delete mode 100644 src/couch_event/README.md
 delete mode 100644 src/couch_event/rebar.config
 delete mode 100644 src/couch_event/src/couch_event.app.src
 delete mode 100644 src/couch_event/src/couch_event.erl
 delete mode 100644 src/couch_event/src/couch_event_app.erl
 delete mode 100644 src/couch_event/src/couch_event_int.hrl
 delete mode 100644 src/couch_event/src/couch_event_listener.erl
 delete mode 100644 src/couch_event/src/couch_event_listener_mfa.erl
 delete mode 100644 src/couch_event/src/couch_event_os_listener.erl
 delete mode 100644 src/couch_event/src/couch_event_server.erl
 delete mode 100644 src/couch_event/src/couch_event_sup2.erl
 delete mode 100644 src/couch_index/.gitignore
 delete mode 100644 src/couch_index/LICENSE
 delete mode 100644 src/couch_index/rebar.config
 delete mode 100644 src/couch_index/src/couch_index.app.src
 delete mode 100644 src/couch_index/src/couch_index.erl
 delete mode 100644 src/couch_index/src/couch_index_app.erl
 delete mode 100644 src/couch_index/src/couch_index_compactor.erl
 delete mode 100644 src/couch_index/src/couch_index_epi.erl
 delete mode 100644 src/couch_index/src/couch_index_plugin.erl
 delete mode 100644 src/couch_index/src/couch_index_plugin_couch_db.erl
 delete mode 100644 src/couch_index/src/couch_index_server.erl
 delete mode 100644 src/couch_index/src/couch_index_sup.erl
 delete mode 100644 src/couch_index/src/couch_index_updater.erl
 delete mode 100644 src/couch_index/src/couch_index_util.erl
 delete mode 100644 src/couch_index/test/eunit/couch_index_compaction_tests.erl
 delete mode 100644 src/couch_index/test/eunit/couch_index_ddoc_updated_tests.erl
 delete mode 100644 src/couch_mrview/LICENSE
 delete mode 100644 src/couch_mrview/include/couch_mrview.hrl
 delete mode 100644 src/couch_mrview/priv/stats_descriptions.cfg
 delete mode 100644 src/couch_mrview/rebar.config
 delete mode 100644 src/couch_mrview/src/couch_mrview.app.src
 delete mode 100644 src/couch_mrview/src/couch_mrview.erl
 delete mode 100644 src/couch_mrview/src/couch_mrview_cleanup.erl
 delete mode 100644 src/couch_mrview/src/couch_mrview_compactor.erl
 delete mode 100644 src/couch_mrview/src/couch_mrview_index.erl
 delete mode 100644 src/couch_mrview/src/couch_mrview_show.erl
 delete mode 100644 src/couch_mrview/src/couch_mrview_test_util.erl
 delete mode 100644 src/couch_mrview/src/couch_mrview_update_notifier.erl
 delete mode 100644 src/couch_mrview/src/couch_mrview_updater.erl
 delete mode 100644 src/couch_mrview/src/couch_mrview_util.erl
 delete mode 100644 src/couch_mrview/test/eunit/couch_mrview_all_docs_tests.erl
 delete mode 100644 src/couch_mrview/test/eunit/couch_mrview_collation_tests.erl
 delete mode 100644 src/couch_mrview/test/eunit/couch_mrview_compact_tests.erl
 delete mode 100644 src/couch_mrview/test/eunit/couch_mrview_ddoc_updated_tests.erl
 delete mode 100644 src/couch_mrview/test/eunit/couch_mrview_ddoc_validation_tests.erl
 delete mode 100644 src/couch_mrview/test/eunit/couch_mrview_design_docs_tests.erl
 delete mode 100644 src/couch_mrview/test/eunit/couch_mrview_http_tests.erl
 delete mode 100644 src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl
 delete mode 100644 src/couch_mrview/test/eunit/couch_mrview_local_docs_tests.erl
 delete mode 100644 src/couch_mrview/test/eunit/couch_mrview_map_views_tests.erl
 delete mode 100644 src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl
 delete mode 100644 src/couch_mrview/test/eunit/couch_mrview_purge_docs_tests.erl
 delete mode 100644 src/couch_mrview/test/eunit/couch_mrview_red_views_tests.erl
 delete mode 100644 src/couch_mrview/test/eunit/couch_mrview_util_tests.erl
 delete mode 100644 src/couch_peruser/.gitignore
 delete mode 100644 src/couch_peruser/LICENSE
 delete mode 100644 src/couch_peruser/README.md
 delete mode 100644 src/couch_peruser/src/couch_peruser.app.src
 delete mode 100644 src/couch_peruser/src/couch_peruser.erl
 delete mode 100644 src/couch_peruser/src/couch_peruser_app.erl
 delete mode 100644 src/couch_peruser/src/couch_peruser_sup.erl
 delete mode 100644 src/couch_peruser/test/eunit/couch_peruser_test.erl
 delete mode 100644 src/couch_plugins/LICENSE
 delete mode 100644 src/couch_plugins/Makefile.am
 delete mode 100644 src/couch_plugins/README.md
 delete mode 100644 src/couch_plugins/src/couch_plugins.app.src
 delete mode 100644 src/couch_plugins/src/couch_plugins.erl
 delete mode 100644 src/couch_plugins/src/couch_plugins_httpd.erl
 delete mode 100644 src/couch_pse_tests/src/couch_pse_tests.app.src
 delete mode 100644 src/couch_pse_tests/src/cpse_gather.erl
 delete mode 100644 src/couch_pse_tests/src/cpse_test_attachments.erl
 delete mode 100644 src/couch_pse_tests/src/cpse_test_compaction.erl
 delete mode 100644 src/couch_pse_tests/src/cpse_test_copy_purge_infos.erl
 delete mode 100644 src/couch_pse_tests/src/cpse_test_fold_changes.erl
 delete mode 100644 src/couch_pse_tests/src/cpse_test_fold_docs.erl
 delete mode 100644 src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl
 delete mode 100644 src/couch_pse_tests/src/cpse_test_get_set_props.erl
 delete mode 100644 src/couch_pse_tests/src/cpse_test_open_close_delete.erl
 delete mode 100644 src/couch_pse_tests/src/cpse_test_purge_bad_checkpoints.erl
 delete mode 100644 src/couch_pse_tests/src/cpse_test_purge_docs.erl
 delete mode 100644 src/couch_pse_tests/src/cpse_test_purge_replication.erl
 delete mode 100644 src/couch_pse_tests/src/cpse_test_purge_seqs.erl
 delete mode 100644 src/couch_pse_tests/src/cpse_test_read_write_docs.erl
 delete mode 100644 src/couch_pse_tests/src/cpse_test_ref_counting.erl
 delete mode 100644 src/couch_pse_tests/src/cpse_util.erl
 rename src/{couch_mrview/src/couch_mrview_http.erl => couch_views/src/couch_views_http_util.erl} (52%)
 create mode 100644 src/couch_views/src/couch_views_validate.erl
 delete mode 100644 src/ddoc_cache/LICENSE
 delete mode 100644 src/ddoc_cache/README.md
 delete mode 100644 src/ddoc_cache/priv/stats_descriptions.cfg
 delete mode 100644 src/ddoc_cache/src/ddoc_cache.app.src
 delete mode 100644 src/ddoc_cache/src/ddoc_cache.erl
 delete mode 100644 src/ddoc_cache/src/ddoc_cache.hrl
 delete mode 100644 src/ddoc_cache/src/ddoc_cache_app.erl
 delete mode 100644 src/ddoc_cache/src/ddoc_cache_entry.erl
 delete mode 100644 src/ddoc_cache/src/ddoc_cache_entry_custom.erl
 delete mode 100644 src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
 delete mode 100644 src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
 delete mode 100644 src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl
 delete mode 100644 src/ddoc_cache/src/ddoc_cache_lru.erl
 delete mode 100644 src/ddoc_cache/src/ddoc_cache_opener.erl
 delete mode 100644 src/ddoc_cache/src/ddoc_cache_sup.erl
 delete mode 100644 src/ddoc_cache/src/ddoc_cache_value.erl
 delete mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_basic_test.erl
 delete mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_coverage_test.erl
 delete mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_disabled_test.erl
 delete mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_entry_test.erl
 delete mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_ev.erl
 delete mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_eviction_test.erl
 delete mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl
 delete mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl
 delete mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_open_error_test.erl
 delete mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl
 delete mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_opener_test.erl
 delete mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_refresh_test.erl
 delete mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_remove_test.erl
 delete mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_test.hrl
 delete mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl
 delete mode 100644 src/dreyfus/.gitignore
 delete mode 100644 src/dreyfus/LICENSE.txt
 delete mode 100644 src/dreyfus/README.md
 delete mode 100644 src/dreyfus/include/dreyfus.hrl
 delete mode 100644 src/dreyfus/priv/stats_descriptions.cfg
 delete mode 100644 src/dreyfus/src/clouseau_rpc.erl
 delete mode 100644 src/dreyfus/src/dreyfus.app.src
 delete mode 100644 src/dreyfus/src/dreyfus_app.erl
 delete mode 100644 src/dreyfus/src/dreyfus_bookmark.erl
 delete mode 100644 src/dreyfus/src/dreyfus_config.erl
 delete mode 100644 src/dreyfus/src/dreyfus_epi.erl
 delete mode 100644 src/dreyfus/src/dreyfus_fabric.erl
 delete mode 100644 src/dreyfus/src/dreyfus_fabric_cleanup.erl
 delete mode 100644 src/dreyfus/src/dreyfus_fabric_group1.erl
 delete mode 100644 src/dreyfus/src/dreyfus_fabric_group2.erl
 delete mode 100644 src/dreyfus/src/dreyfus_fabric_info.erl
 delete mode 100644 src/dreyfus/src/dreyfus_fabric_search.erl
 delete mode 100644 src/dreyfus/src/dreyfus_httpd.erl
 delete mode 100644 src/dreyfus/src/dreyfus_httpd_handlers.erl
 delete mode 100644 src/dreyfus/src/dreyfus_index.erl
 delete mode 100644 src/dreyfus/src/dreyfus_index_manager.erl
 delete mode 100644 src/dreyfus/src/dreyfus_index_updater.erl
 delete mode 100644 src/dreyfus/src/dreyfus_plugin_couch_db.erl
 delete mode 100644 src/dreyfus/src/dreyfus_rpc.erl
 delete mode 100644 src/dreyfus/src/dreyfus_sup.erl
 delete mode 100644 src/dreyfus/src/dreyfus_util.erl
 delete mode 100644 src/dreyfus/test/dreyfus_blacklist_await_test.erl
 delete mode 100644 src/dreyfus/test/dreyfus_blacklist_request_test.erl
 delete mode 100644 src/dreyfus/test/dreyfus_config_test.erl
 delete mode 100644 src/dreyfus/test/dreyfus_purge_test.erl
 delete mode 100644 src/dreyfus/test/dreyfus_test_util.erl
 delete mode 100644 src/dreyfus/test/elixir/mix.exs
 delete mode 100644 src/dreyfus/test/elixir/mix.lock
 delete mode 100755 src/dreyfus/test/elixir/run
 delete mode 100644 src/dreyfus/test/elixir/test/partition_search_test.exs
 delete mode 100644 src/dreyfus/test/elixir/test/search_test.exs
 delete mode 100644 src/dreyfus/test/elixir/test/test_helper.exs
 delete mode 100644 src/fabric/include/fabric.hrl
 delete mode 100644 src/fabric/src/fabric.erl
 delete mode 100644 src/fabric/src/fabric_db_create.erl
 delete mode 100644 src/fabric/src/fabric_db_delete.erl
 delete mode 100644 src/fabric/src/fabric_db_doc_count.erl
 delete mode 100644 src/fabric/src/fabric_db_info.erl
 delete mode 100644 src/fabric/src/fabric_db_meta.erl
 delete mode 100644 src/fabric/src/fabric_db_partition_info.erl
 delete mode 100644 src/fabric/src/fabric_db_update_listener.erl
 delete mode 100644 src/fabric/src/fabric_design_doc_count.erl
 delete mode 100644 src/fabric/src/fabric_dict.erl
 delete mode 100644 src/fabric/src/fabric_doc_attachments.erl
 delete mode 100644 src/fabric/src/fabric_doc_atts.erl
 delete mode 100644 src/fabric/src/fabric_doc_missing_revs.erl
 delete mode 100644 src/fabric/src/fabric_doc_open.erl
 delete mode 100644 src/fabric/src/fabric_doc_open_revs.erl
 delete mode 100644 src/fabric/src/fabric_doc_purge.erl
 delete mode 100644 src/fabric/src/fabric_doc_update.erl
 delete mode 100644 src/fabric/src/fabric_group_info.erl
 delete mode 100644 src/fabric/src/fabric_ring.erl
 delete mode 100644 src/fabric/src/fabric_rpc.erl
 delete mode 100644 src/fabric/src/fabric_streams.erl
 delete mode 100644 src/fabric/src/fabric_util.erl
 delete mode 100644 src/fabric/src/fabric_view.erl
 delete mode 100644 src/fabric/src/fabric_view_all_docs.erl
 delete mode 100644 src/fabric/src/fabric_view_changes.erl
 delete mode 100644 src/fabric/src/fabric_view_map.erl
 delete mode 100644 src/fabric/src/fabric_view_reduce.erl
 delete mode 100644 src/fabric/test/eunit/fabric_rpc_tests.erl
 delete mode 100644 src/global_changes/.gitignore
 delete mode 100644 src/global_changes/LICENSE
 delete mode 100644 src/global_changes/README.md
 delete mode 100644 src/global_changes/priv/stats_descriptions.cfg
 delete mode 100644 src/global_changes/src/global_changes.app.src
 delete mode 100644 src/global_changes/src/global_changes_app.erl
 delete mode 100644 src/global_changes/src/global_changes_epi.erl
 delete mode 100644 src/global_changes/src/global_changes_httpd.erl
 delete mode 100644 src/global_changes/src/global_changes_httpd_handlers.erl
 delete mode 100644 src/global_changes/src/global_changes_listener.erl
 delete mode 100644 src/global_changes/src/global_changes_plugin.erl
 delete mode 100644 src/global_changes/src/global_changes_server.erl
 delete mode 100644 src/global_changes/src/global_changes_sup.erl
 delete mode 100644 src/global_changes/src/global_changes_util.erl
 delete mode 100644 src/global_changes/test/eunit/global_changes_hooks_tests.erl
 delete mode 100644 src/ioq/.gitignore
 delete mode 100644 src/ioq/src/ioq.app.src
 delete mode 100644 src/ioq/src/ioq.erl
 delete mode 100644 src/ioq/src/ioq_app.erl
 delete mode 100644 src/ioq/src/ioq_sup.erl
 delete mode 100644 src/ken/README.md
 delete mode 100644 src/ken/rebar.config.script
 delete mode 100644 src/ken/src/ken.app.src.script
 delete mode 100644 src/ken/src/ken.erl
 delete mode 100644 src/ken/src/ken_app.erl
 delete mode 100644 src/ken/src/ken_event_handler.erl
 delete mode 100644 src/ken/src/ken_server.erl
 delete mode 100644 src/ken/src/ken_sup.erl
 delete mode 100644 src/ken/test/config.ini
 delete mode 100644 src/ken/test/ken_server_test.erl
 delete mode 100644 src/mango/src/mango_cursor_text.erl
 delete mode 100644 src/mango/src/mango_idx_text.erl
 delete mode 100644 src/mem3/LICENSE
 delete mode 100644 src/mem3/README.md
 delete mode 100644 src/mem3/README_reshard.md
 delete mode 100644 src/mem3/include/mem3.hrl
 delete mode 100644 src/mem3/priv/stats_descriptions.cfg
 delete mode 100644 src/mem3/rebar.config.script
 delete mode 100644 src/mem3/src/mem3.app.src
 delete mode 100644 src/mem3/src/mem3.erl
 delete mode 100644 src/mem3/src/mem3_app.erl
 delete mode 100644 src/mem3/src/mem3_cluster.erl
 delete mode 100644 src/mem3/src/mem3_epi.erl
 delete mode 100644 src/mem3/src/mem3_hash.erl
 delete mode 100644 src/mem3/src/mem3_httpd.erl
 delete mode 100644 src/mem3/src/mem3_httpd_handlers.erl
 delete mode 100644 src/mem3/src/mem3_nodes.erl
 delete mode 100644 src/mem3/src/mem3_plugin_couch_db.erl
 delete mode 100644 src/mem3/src/mem3_rep.erl
 delete mode 100644 src/mem3/src/mem3_reshard.erl
 delete mode 100644 src/mem3/src/mem3_reshard.hrl
 delete mode 100644 src/mem3/src/mem3_reshard_api.erl
 delete mode 100644 src/mem3/src/mem3_reshard_dbdoc.erl
 delete mode 100644 src/mem3/src/mem3_reshard_httpd.erl
 delete mode 100644 src/mem3/src/mem3_reshard_index.erl
 delete mode 100644 src/mem3/src/mem3_reshard_job.erl
 delete mode 100644 src/mem3/src/mem3_reshard_job_sup.erl
 delete mode 100644 src/mem3/src/mem3_reshard_store.erl
 delete mode 100644 src/mem3/src/mem3_reshard_sup.erl
 delete mode 100644 src/mem3/src/mem3_reshard_validate.erl
 delete mode 100644 src/mem3/src/mem3_rpc.erl
 delete mode 100644 src/mem3/src/mem3_seeds.erl
 delete mode 100644 src/mem3/src/mem3_shards.erl
 delete mode 100644 src/mem3/src/mem3_sup.erl
 delete mode 100644 src/mem3/src/mem3_sync.erl
 delete mode 100644 src/mem3/src/mem3_sync_event.erl
 delete mode 100644 src/mem3/src/mem3_sync_event_listener.erl
 delete mode 100644 src/mem3/src/mem3_sync_nodes.erl
 delete mode 100644 src/mem3/src/mem3_sync_security.erl
 delete mode 100644 src/mem3/src/mem3_util.erl
 delete mode 100644 src/mem3/test/eunit/mem3_cluster_test.erl
 delete mode 100644 src/mem3/test/eunit/mem3_hash_test.erl
 delete mode 100644 src/mem3/test/eunit/mem3_rep_test.erl
 delete mode 100644 src/mem3/test/eunit/mem3_reshard_api_test.erl
 delete mode 100644 src/mem3/test/eunit/mem3_reshard_changes_feed_test.erl
 delete mode 100644 src/mem3/test/eunit/mem3_reshard_test.erl
 delete mode 100644 src/mem3/test/eunit/mem3_ring_prop_tests.erl
 delete mode 100644 src/mem3/test/eunit/mem3_seeds_test.erl
 delete mode 100644 src/mem3/test/eunit/mem3_sync_security_test.erl
 delete mode 100644 src/mem3/test/eunit/mem3_util_test.erl
 delete mode 100644 src/rexi/README.md
 delete mode 100644 src/rexi/include/rexi.hrl
 delete mode 100644 src/rexi/priv/stats_descriptions.cfg
 delete mode 100644 src/rexi/rebar.config
 delete mode 100644 src/rexi/src/rexi.app.src
 delete mode 100644 src/rexi/src/rexi.erl
 delete mode 100644 src/rexi/src/rexi_app.erl
 delete mode 100644 src/rexi/src/rexi_buffer.erl
 delete mode 100644 src/rexi/src/rexi_monitor.erl
 delete mode 100644 src/rexi/src/rexi_server.erl
 delete mode 100644 src/rexi/src/rexi_server_mon.erl
 delete mode 100644 src/rexi/src/rexi_server_sup.erl
 delete mode 100644 src/rexi/src/rexi_sup.erl
 delete mode 100644 src/rexi/src/rexi_utils.erl
 delete mode 100644 src/setup/.gitignore
 delete mode 100644 src/setup/LICENSE
 delete mode 100644 src/setup/README.md
 delete mode 100644 src/setup/src/setup.app.src
 delete mode 100644 src/setup/src/setup.erl
 delete mode 100644 src/setup/src/setup_app.erl
 delete mode 100644 src/setup/src/setup_epi.erl
 delete mode 100644 src/setup/src/setup_httpd.erl
 delete mode 100644 src/setup/src/setup_httpd_handlers.erl
 delete mode 100644 src/setup/src/setup_sup.erl
 delete mode 100755 src/setup/test/t-frontend-setup.sh
 delete mode 100755 src/setup/test/t-single-node-auto-setup.sh
 delete mode 100755 src/setup/test/t-single-node.sh
 delete mode 100755 src/setup/test/t.sh
 delete mode 100644 src/smoosh/README.md
 delete mode 100644 src/smoosh/operator_guide.md
 delete mode 100644 src/smoosh/src/smoosh.app.src
 delete mode 100644 src/smoosh/src/smoosh.erl
 delete mode 100644 src/smoosh/src/smoosh_app.erl
 delete mode 100644 src/smoosh/src/smoosh_channel.erl
 delete mode 100644 src/smoosh/src/smoosh_priority_queue.erl
 delete mode 100644 src/smoosh/src/smoosh_server.erl
 delete mode 100644 src/smoosh/src/smoosh_sup.erl
 delete mode 100644 src/smoosh/src/smoosh_utils.erl
 delete mode 100644 src/smoosh/test/exunit/scheduling_window_test.exs
 delete mode 100644 src/smoosh/test/exunit/test_helper.exs

[couchdb] 03/24: Close backend port and clean up url handlers

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit e05a6bfc03e100e998de89cd99c8c9e0000d7acf
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Tue Apr 13 23:27:10 2021 -0400

    Close backend port and clean up url handlers
    
    Backend (5986) port is closed. Requests to `/_node/_local/_config` and
    a few other `_*` endpoints continues to work. Backend db access now
    returns error code 410 (not_supported). Previously, it was accessing
    couch_server and couch_files to create 3.x style local dbs.
    
    Url handlers are updated to return `not_supported` for features which are not
    coming back, and `not_implemented` for features which haven't been implemented
    yet.
    
    `parse_copy_destination_header/1` is the only function from `couch_httpd_db`
    that's still needed, so it was moved to `chttpd_util` module.
    
    `couch_httpd_db` handled `/_uuid` requests and that handler was moved to
    `chttpd_misc` module.
    
    "Welcome" endpoint (/) was updated to not call `clouseau_rpc:connected/0`.
    
    Request handling in `couch_httpd` was removed, so most of the file is
    now a bunch of utility functions mostly.
---
 src/chttpd/src/chttpd.erl                         |   7 +-
 src/chttpd/src/chttpd_db.erl                      | 146 +--------
 src/chttpd/src/chttpd_httpd_handlers.erl          |  25 +-
 src/chttpd/src/chttpd_misc.erl                    |  37 ++-
 src/chttpd/src/chttpd_node.erl                    |  46 +--
 src/chttpd/src/chttpd_show.erl                    | 150 +---------
 src/chttpd/src/chttpd_util.erl                    |  41 +++
 src/couch/src/couch.app.src                       |  34 ---
 src/couch/src/couch_httpd.erl                     | 347 +---------------------
 src/couch/src/couch_secondary_sup.erl             |  10 +-
 src/couch/test/eunit/chttpd_endpoints_tests.erl   |  18 +-
 src/couch_replicator/src/couch_replicator_ids.erl |   2 +-
 12 files changed, 112 insertions(+), 751 deletions(-)

diff --git a/src/chttpd/src/chttpd.erl b/src/chttpd/src/chttpd.erl
index b124375..8567ada 100644
--- a/src/chttpd/src/chttpd.erl
+++ b/src/chttpd/src/chttpd.erl
@@ -123,6 +123,12 @@ start_link(Name, Options) ->
          end,
     ok = couch_httpd:validate_bind_address(IP),
 
+    % Ensure uuid is set so that concurrent replications
+    % get the same value. This used to in the backend (:5986) httpd
+    % start_link and was moved here for now. Ideally this should be set
+    % in FDB or coordinated across all the nodes
+    couch_server:get_uuid(),
+
     set_auth_handlers(),
 
     Options1 = Options ++ [
@@ -153,7 +159,6 @@ stop() ->
     mochiweb_http:stop(?MODULE).
 
 handle_request(MochiReq0) ->
-    erlang:put(?REWRITE_COUNT, 0),
     MochiReq = couch_httpd_vhost:dispatch_host(MochiReq0),
     handle_request_int(MochiReq).
 
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index ac3d3b1..8b99059 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -23,8 +23,7 @@
     db_req/2, couch_doc_open/4,handle_changes_req/2,
     update_doc_result_to_json/1, update_doc_result_to_json/2,
     handle_design_info_req/3, handle_view_cleanup_req/2,
-    update_doc/4, http_code_from_status/1,
-    handle_partition_req/2]).
+    update_doc/4, http_code_from_status/1]).
 
 -import(chttpd,
     [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
@@ -275,80 +274,6 @@ handle_view_cleanup_req(Req, Db) ->
     ok = fabric2_index:cleanup(Db),
     send_json(Req, 202, {[{ok, true}]}).
 
-
-handle_partition_req(#httpd{path_parts=[_,_]}=_Req, _Db) ->
-    throw({bad_request, invalid_partition_req});
-
-handle_partition_req(#httpd{method='GET', path_parts=[_,_,PartId]}=Req, Db) ->
-    couch_partition:validate_partition(PartId),
-    case couch_db:is_partitioned(Db) of
-        true ->
-            {ok, PartitionInfo} = fabric:get_partition_info(Db, PartId),
-            send_json(Req, {PartitionInfo});
-        false ->
-            throw({bad_request, <<"database is not partitioned">>})
-    end;
-
-handle_partition_req(#httpd{method='POST',
-    path_parts=[_, <<"_partition">>, <<"_", _/binary>>]}, _Db) ->
-    Msg = <<"Partition must not start with an underscore">>,
-    throw({illegal_partition, Msg});
-
-handle_partition_req(#httpd{path_parts = [_, _, _]}=Req, _Db) ->
-    send_method_not_allowed(Req, "GET");
-
-handle_partition_req(#httpd{path_parts=[DbName, _, PartId | Rest]}=Req, Db) ->
-    case couch_db:is_partitioned(Db) of
-        true ->
-            couch_partition:validate_partition(PartId),
-            QS = chttpd:qs(Req),
-            PartIdStr = ?b2l(PartId),
-            QSPartIdStr = couch_util:get_value("partition", QS, PartIdStr),
-            if QSPartIdStr == PartIdStr -> ok; true ->
-                Msg = <<"Conflicting value for `partition` in query string">>,
-                throw({bad_request, Msg})
-            end,
-            NewQS = lists:ukeysort(1, [{"partition", PartIdStr} | QS]),
-            NewReq = Req#httpd{
-                path_parts = [DbName | Rest],
-                qs = NewQS
-            },
-            update_partition_stats(Rest),
-            case Rest of
-                [OP | _] when OP == <<"_all_docs">> orelse ?IS_MANGO(OP) ->
-                    case chttpd_handlers:db_handler(OP, fun db_req/2) of
-                        Handler when is_function(Handler, 2) ->
-                            Handler(NewReq, Db);
-                        _ ->
-                            chttpd:send_error(Req, not_found)
-                    end;
-                [<<"_design">>, _Name, <<"_", _/binary>> | _] ->
-                    handle_design_req(NewReq, Db);
-                _ ->
-                    chttpd:send_error(Req, not_found)
-            end;
-        false ->
-            throw({bad_request, <<"database is not partitioned">>})
-    end;
-
-handle_partition_req(Req, _Db) ->
-    chttpd:send_error(Req, not_found).
-
-update_partition_stats(PathParts) ->
-    case PathParts of
-            [<<"_design">> | _] ->
-                couch_stats:increment_counter([couchdb, httpd, partition_view_requests]);
-            [<<"_all_docs">> | _] ->
-                couch_stats:increment_counter([couchdb, httpd, partition_all_docs_requests]);
-            [<<"_find">> | _] ->
-                couch_stats:increment_counter([couchdb, httpd, partition_find_requests]);
-            [<<"_explain">> | _] ->
-                couch_stats:increment_counter([couchdb, httpd, partition_explain_requests]);
-            _ ->
-                ok % ignore path that do not match
-        end.
-
-
 handle_design_req(#httpd{
         path_parts=[_DbName, _Design, Name, <<"_",_/binary>> = Action | _Rest]
     }=Req, Db) ->
@@ -635,41 +560,6 @@ db_req(#httpd{method='POST', path_parts=[_, <<"_bulk_get">>],
 db_req(#httpd{path_parts=[_, <<"_bulk_get">>]}=Req, _Db) ->
     send_method_not_allowed(Req, "POST");
 
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) ->
-    couch_stats:increment_counter([couchdb, httpd, purge_requests]),
-    chttpd:validate_ctype(Req, "application/json"),
-    {IdsRevs} = chttpd:json_body_obj(Req),
-    IdsRevs2 = [{Id, couch_doc:parse_revs(Revs)} || {Id, Revs} <- IdsRevs],
-    MaxIds = config:get_integer("purge", "max_document_id_number", 100),
-    case length(IdsRevs2) =< MaxIds of
-        false -> throw({bad_request, "Exceeded maximum number of documents."});
-        true -> ok
-    end,
-    RevsLen = lists:foldl(fun({_Id, Revs}, Acc) ->
-        length(Revs) + Acc
-    end, 0, IdsRevs2),
-    MaxRevs = config:get_integer("purge", "max_revisions_number", 1000),
-    case RevsLen =< MaxRevs of
-        false -> throw({bad_request, "Exceeded maximum number of revisions."});
-        true -> ok
-    end,
-    couch_stats:increment_counter([couchdb, document_purges, total], length(IdsRevs2)),
-    Results2 = case fabric:purge_docs(Db, IdsRevs2, []) of
-        {ok, Results} ->
-            chttpd_stats:incr_writes(length(Results)),
-            Results;
-        {accepted, Results} ->
-            chttpd_stats:incr_writes(length(Results)),
-            Results
-    end,
-    {Code, Json} = purge_results_to_json(IdsRevs2, Results2),
-    send_json(Req, Code, {[{<<"purge_seq">>, null}, {<<"purged">>, {Json}}]});
-
-db_req(#httpd{path_parts=[_,<<"_purge">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "POST");
-
-
 db_req(#httpd{method='GET',path_parts=[_,OP]}=Req, Db) when ?IS_ALL_DOCS(OP) ->
     case chttpd:qs_json_value(Req, "keys", nil) of
     Keys when is_list(Keys) ->
@@ -778,22 +668,6 @@ db_req(#httpd{method='GET',path_parts=[_,<<"_revs_limit">>]}=Req, Db) ->
 db_req(#httpd{path_parts=[_,<<"_revs_limit">>]}=Req, _Db) ->
     send_method_not_allowed(Req, "PUT,GET");
 
-db_req(#httpd{method='PUT',path_parts=[_,<<"_purged_infos_limit">>]}=Req, Db) ->
-    case chttpd:json_body(Req) of
-        Limit when is_integer(Limit), Limit > 0 ->
-            case fabric:set_purge_infos_limit(Db, Limit, []) of
-                ok ->
-                    send_json(Req, {[{<<"ok">>, true}]});
-                Error ->
-                    throw(Error)
-            end;
-        _->
-            throw({bad_request, "`purge_infos_limit` must be positive integer"})
-    end;
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_purged_infos_limit">>]}=Req, Db) ->
-    send_json(Req, fabric:get_purge_infos_limit(Db));
-
 % Special case to enable using an unencoded slash in the URL of design docs,
 % as slashes in document IDs must otherwise be URL encoded.
 db_req(#httpd{method='GET', mochi_req=MochiReq, path_parts=[_DbName, <<"_design/", _/binary>> | _]}=Req, _Db) ->
@@ -1444,24 +1318,6 @@ update_doc_result_to_json(DocId, Error) ->
     {_Code, ErrorStr, Reason} = chttpd:error_info(Error),
     {[{id, DocId}, {error, ErrorStr}, {reason, Reason}]}.
 
-purge_results_to_json([], []) ->
-    {201, []};
-purge_results_to_json([{DocId, _Revs} | RIn], [{ok, PRevs} | ROut]) ->
-    {Code, Results} = purge_results_to_json(RIn, ROut),
-    couch_stats:increment_counter([couchdb, document_purges, success]),
-    {Code, [{DocId, couch_doc:revs_to_strs(PRevs)} | Results]};
-purge_results_to_json([{DocId, _Revs} | RIn], [{accepted, PRevs} | ROut]) ->
-    {Code, Results} = purge_results_to_json(RIn, ROut),
-    couch_stats:increment_counter([couchdb, document_purges, success]),
-    NewResults = [{DocId, couch_doc:revs_to_strs(PRevs)} | Results],
-    {erlang:max(Code, 202), NewResults};
-purge_results_to_json([{DocId, _Revs} | RIn], [Error | ROut]) ->
-    {Code, Results} = purge_results_to_json(RIn, ROut),
-    {NewCode, ErrorStr, Reason} = chttpd:error_info(Error),
-    couch_stats:increment_counter([couchdb, document_purges, failure]),
-    NewResults = [{DocId, {[{error, ErrorStr}, {reason, Reason}]}} | Results],
-    {erlang:max(NewCode, Code), NewResults}.
-
 send_updated_doc(Req, Db, DocId, Json) ->
     send_updated_doc(Req, Db, DocId, Json, []).
 
diff --git a/src/chttpd/src/chttpd_httpd_handlers.erl b/src/chttpd/src/chttpd_httpd_handlers.erl
index d501159..e5374b1 100644
--- a/src/chttpd/src/chttpd_httpd_handlers.erl
+++ b/src/chttpd/src/chttpd_httpd_handlers.erl
@@ -15,9 +15,11 @@
 -export([url_handler/1, db_handler/1, design_handler/1, handler_info/3]).
 
 -export([
-    not_supported/2,
     not_supported/3,
-    not_implemented/2
+    not_supported/2,
+    not_supported/1,
+    not_implemented/2,
+    not_implemented/1
 ]).
 
 
@@ -38,16 +40,22 @@ url_handler(<<"_replicate">>)      -> fun chttpd_misc:handle_replicate_req/1;
 url_handler(<<"_uuids">>)          -> fun chttpd_misc:handle_uuids_req/1;
 url_handler(<<"_session">>)        -> fun chttpd_auth:handle_session_req/1;
 url_handler(<<"_up">>)             -> fun chttpd_misc:handle_up_req/1;
+url_handler(<<"_membership">>)     -> fun ?MODULE:not_supported/1;
+url_handler(<<"_reshard">>)        -> fun ?MODULE:not_supported/1;
+url_handler(<<"_db_updates">>)     -> fun ?MODULE:not_implemented/1;
+url_handler(<<"_cluster_setup">>)  -> fun ?MODULE:not_implemented/1;
 url_handler(_) -> no_match.
 
 db_handler(<<"_view_cleanup">>) -> fun chttpd_db:handle_view_cleanup_req/2;
 db_handler(<<"_compact">>)      -> fun chttpd_db:handle_compact_req/2;
 db_handler(<<"_design">>)       -> fun chttpd_db:handle_design_req/2;
-db_handler(<<"_partition">>)    -> fun chttpd_db:handle_partition_req/2;
+db_handler(<<"_partition">>)    -> fun ?MODULE:not_implemented/2;
 db_handler(<<"_temp_view">>)    -> fun ?MODULE:not_supported/2;
 db_handler(<<"_changes">>)      -> fun chttpd_db:handle_changes_req/2;
 db_handler(<<"_purge">>)        -> fun ?MODULE:not_implemented/2;
 db_handler(<<"_purged_infos_limit">>) -> fun ?MODULE:not_implemented/2;
+db_handler(<<"_shards">>)       -> fun ?MODULE:not_supported/2;
+db_handler(<<"_sync_shards">>)  -> fun ?MODULE:not_supported/2;
 db_handler(_) -> no_match.
 
 design_handler(<<"_view">>)    -> fun chttpd_view:handle_view_req/3;
@@ -186,7 +194,6 @@ handler_info(Method, [<<"_", _/binary>> = Part| Rest], Req) ->
     % on for known system databases.
     DbName = case Part of
         <<"_dbs">> -> '_dbs';
-        <<"_global_changes">> -> '_global_changes';
         <<"_metadata">> -> '_metadata';
         <<"_nodes">> -> '_nodes';
         <<"_replicator">> -> '_replicator';
@@ -497,7 +504,7 @@ handler_info(_, _, _) ->
 
 get_copy_destination(Req) ->
     try
-        {DocIdStr, _} = couch_httpd_db:parse_copy_destination_header(Req),
+        {DocIdStr, _} = chttpd_util:parse_copy_destination_header(Req),
         list_to_binary(mochiweb_util:unquote(DocIdStr))
     catch _:_ ->
         unknown
@@ -509,10 +516,18 @@ not_supported(#httpd{} = Req, Db, _DDoc) ->
 
 
 not_supported(#httpd{} = Req, _Db) ->
+    not_supported(Req).
+
+
+not_supported(#httpd{} = Req) ->
     Msg = <<"resource is not supported in CouchDB >= 4.x">>,
     chttpd:send_error(Req, 410, gone, Msg).
 
 
 not_implemented(#httpd{} = Req, _Db) ->
+    not_implemented(Req).
+
+
+not_implemented(#httpd{} = Req) ->
     Msg = <<"resource is not implemented">>,
     chttpd:send_error(Req, 501, not_implemented, Msg).
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index 5cfd0f7..5d9706a 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -33,7 +33,7 @@
 -include_lib("couch_mrview/include/couch_mrview.hrl").
 
 -import(chttpd,
-    [send_json/2,send_json/3,send_method_not_allowed/2,
+    [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
     send_chunk/2,start_chunked_response/3]).
 
 -define(MAX_DB_NUM_FOR_DBS_INFO, 100).
@@ -61,12 +61,7 @@ handle_welcome_req(Req, _) ->
     send_method_not_allowed(Req, "GET,HEAD").
 
 get_features() ->
-    case clouseau_rpc:connected() of
-        true ->
-            [search | config:features()];
-        false ->
-            config:features()
-    end.
+    config:features().
 
 handle_favicon_req(Req) ->
     handle_favicon_req(Req, get_docroot()).
@@ -334,9 +329,33 @@ handle_reload_query_servers_req(#httpd{method='POST'}=Req) ->
 handle_reload_query_servers_req(Req) ->
     send_method_not_allowed(Req, "POST").
 
+handle_uuids_req(#httpd{method='GET'}=Req) ->
+    Max = list_to_integer(config:get("uuids","max_count","1000")),
+    Count = try list_to_integer(couch_httpd:qs_value(Req, "count", "1")) of
+        N when N > Max ->
+            throw({bad_request, <<"count parameter too large">>});
+        N when N < 0 ->
+            throw({bad_request, <<"count must be a positive integer">>});
+        N -> N
+    catch
+        error:badarg ->
+            throw({bad_request, <<"count must be a positive integer">>})
+    end,
+    UUIDs = [couch_uuids:new() || _ <- lists:seq(1, Count)],
+    Etag = couch_httpd:make_etag(UUIDs),
+    couch_httpd:etag_respond(Req, Etag, fun() ->
+        CacheBustingHeaders = [
+            {"Date", couch_util:rfc1123_date()},
+            {"Cache-Control", "no-cache"},
+            % Past date, ON PURPOSE!
+            {"Expires", "Mon, 01 Jan 1990 00:00:00 GMT"},
+            {"Pragma", "no-cache"},
+            {"ETag", Etag}
+        ],
+        send_json(Req, 200, CacheBustingHeaders, {[{<<"uuids">>, UUIDs}]})
+    end);
 handle_uuids_req(Req) ->
-    couch_httpd_misc_handlers:handle_uuids_req(Req).
-
+    send_method_not_allowed(Req, "GET").
 
 handle_up_req(#httpd{method='GET'} = Req) ->
     case config:get("couchdb", "maintenance_mode") of
diff --git a/src/chttpd/src/chttpd_node.erl b/src/chttpd/src/chttpd_node.erl
index b6c4fac..e36380a 100644
--- a/src/chttpd/src/chttpd_node.erl
+++ b/src/chttpd/src/chttpd_node.erl
@@ -138,54 +138,14 @@ handle_node_req(#httpd{method='POST', path_parts=[_, Node, <<"_restart">>]}=Req)
     send_json(Req, 200, {[{ok, true}]});
 handle_node_req(#httpd{path_parts=[_, _Node, <<"_restart">>]}=Req) ->
     send_method_not_allowed(Req, "POST");
-handle_node_req(#httpd{path_parts=[_, Node | PathParts],
-                       mochi_req=MochiReq0}) ->
-    % strip /_node/{node} from Req0 before descending further
-    RawUri = MochiReq0:get(raw_path),
-    {_, Query, Fragment} = mochiweb_util:urlsplit_path(RawUri),
-    NewPath0 = "/" ++ lists:join("/", [couch_util:url_encode(P) || P <- PathParts]),
-    NewRawPath = mochiweb_util:urlunsplit_path({NewPath0, Query, Fragment}),
-    MaxSize =  config:get_integer("httpd", "max_http_request_size", 4294967296),
-    NewOpts = [{body, MochiReq0:recv_body(MaxSize)} | MochiReq0:get(opts)],
-    Ref = erlang:make_ref(),
-    MochiReq = mochiweb_request:new({remote, self(), Ref},
-                               NewOpts,
-                               MochiReq0:get(method),
-                               NewRawPath,
-                               MochiReq0:get(version),
-                               MochiReq0:get(headers)),
-    call_node(Node, couch_httpd, handle_request, [MochiReq]),
-    recv_loop(Ref, MochiReq0);
+handle_node_req(#httpd{path_parts=[_, _Node | _PathParts]}=Req) ->
+    % Local (backend) dbs are not support any more
+    chttpd_httpd_handlers:not_supported(Req);
 handle_node_req(#httpd{path_parts=[_]}=Req) ->
     chttpd:send_error(Req, {bad_request, <<"Incomplete path to _node request">>});
 handle_node_req(Req) ->
     chttpd:send_error(Req, not_found).
 
-recv_loop(Ref, ReqResp) ->
-    receive
-        {Ref, Code, Headers, _Args, start_response} ->
-            recv_loop(Ref, ReqResp:start({Code, Headers}));
-        {Ref, Code, Headers, Len, start_response_length} ->
-            recv_loop(Ref, ReqResp:start_response_length({Code, Headers, Len}));
-        {Ref, Code, Headers, chunked, respond} ->
-            Resp = ReqResp:respond({Code, Headers, chunked}),
-            recv_loop(Ref, Resp);
-        {Ref, Code, Headers, Args, respond} ->
-            Resp = ReqResp:respond({Code, Headers, Args}),
-            {ok, Resp};
-        {Ref, send, Data} ->
-            ReqResp:send(Data),
-            {ok, ReqResp};
-        {Ref, chunk, <<>>} ->
-            ReqResp:write_chunk(<<>>),
-            {ok, ReqResp};
-        {Ref, chunk, Data} ->
-            ReqResp:write_chunk(Data),
-            recv_loop(Ref, ReqResp);
-        _Else ->
-            recv_loop(Ref, ReqResp)
-    end.
-
 call_node(Node0, Mod, Fun, Args) when is_binary(Node0) ->
     Node1 = try
                 list_to_existing_atom(?b2l(Node0))
diff --git a/src/chttpd/src/chttpd_show.erl b/src/chttpd/src/chttpd_show.erl
index 8a15bdc..295d753 100644
--- a/src/chttpd/src/chttpd_show.erl
+++ b/src/chttpd/src/chttpd_show.erl
@@ -12,15 +12,11 @@
 
 -module(chttpd_show).
 
--export([handle_doc_show_req/3, handle_doc_update_req/3, handle_view_list_req/3]).
+-export([handle_doc_update_req/3]).
 
 -include_lib("couch/include/couch_db.hrl").
 -include_lib("couch_mrview/include/couch_mrview.hrl").
 
-% /db/_design/foo/_show/bar/docid
-% show converts a json doc to a response of any content-type.
-% it looks up the doc an then passes it to the query server.
-% then it sends the response from the query server to the http client.
 
 maybe_open_doc(Db, DocId, Options) ->
     case fabric:open_doc(Db, DocId, Options) of
@@ -31,70 +27,6 @@ maybe_open_doc(Db, DocId, Options) ->
         nil
     end.
 
-handle_doc_show_req(#httpd{
-        path_parts=[_, _, _, _, ShowName, DocId]
-    }=Req, Db, DDoc) ->
-
-    % open the doc
-    Options = [conflicts, {user_ctx, Req#httpd.user_ctx}],
-    Doc = maybe_open_doc(Db, DocId, Options),
-
-    % we don't handle revs here b/c they are an internal api
-    % returns 404 if there is no doc with DocId
-    handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId);
-
-handle_doc_show_req(#httpd{
-        path_parts=[_, _, _, _, ShowName, DocId|Rest]
-    }=Req, Db, DDoc) ->
-
-    DocParts = [DocId|Rest],
-    DocId1 = ?l2b(string:join([?b2l(P)|| P <- DocParts], "/")),
-
-    % open the doc
-    Options = [conflicts, {user_ctx, Req#httpd.user_ctx}],
-    Doc = maybe_open_doc(Db, DocId1, Options),
-
-    % we don't handle revs here b/c they are an internal api
-    % pass 404 docs to the show function
-    handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId1);
-
-handle_doc_show_req(#httpd{
-        path_parts=[_, _, _, _, ShowName]
-    }=Req, Db, DDoc) ->
-    % with no docid the doc is nil
-    handle_doc_show(Req, Db, DDoc, ShowName, nil);
-
-handle_doc_show_req(Req, _Db, _DDoc) ->
-    chttpd:send_error(Req, 404, <<"show_error">>, <<"Invalid path.">>).
-
-handle_doc_show(Req, Db, DDoc, ShowName, Doc) ->
-    handle_doc_show(Req, Db, DDoc, ShowName, Doc, null).
-
-handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId) ->
-    %% Will throw an exception if the _show handler is missing
-    couch_util:get_nested_json_value(DDoc#doc.body, [<<"shows">>, ShowName]),
-    % get responder for ddoc/showname
-    CurrentEtag = show_etag(Req, Doc, DDoc, []),
-    chttpd:etag_respond(Req, CurrentEtag, fun() ->
-        JsonReq = chttpd_external:json_req_obj(Req, Db, DocId),
-        JsonDoc = couch_query_servers:json_doc(Doc),
-        [<<"resp">>, ExternalResp] =
-            couch_query_servers:ddoc_prompt(DDoc, [<<"shows">>, ShowName],
-                [JsonDoc, JsonReq]),
-        JsonResp = apply_etag(ExternalResp, CurrentEtag),
-        chttpd_external:send_external_response(Req, JsonResp)
-    end).
-
-
-show_etag(#httpd{user_ctx=UserCtx}=Req, Doc, DDoc, More) ->
-    Accept = chttpd:header_value(Req, "Accept"),
-    DocPart = case Doc of
-        nil -> nil;
-        Doc -> chttpd:doc_etag(Doc)
-    end,
-    couch_httpd:make_etag({couch_httpd:doc_etag(DDoc), DocPart, Accept,
-        UserCtx#user_ctx.roles, More}).
-
 % /db/_design/foo/update/bar/docid
 % updates a doc based on a request
 % handle_doc_update_req(#httpd{method = 'GET'}=Req, _Db, _DDoc) ->
@@ -154,86 +86,6 @@ send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId) ->
     % todo set location field
     chttpd_external:send_external_response(Req, JsonResp).
 
-
-% view-list request with view and list from same design doc.
-handle_view_list_req(#httpd{method=Method,
-        path_parts=[_, _, DesignName, _, ListName, ViewName]}=Req, Db, DDoc)
-        when Method =:= 'GET' orelse Method =:= 'OPTIONS' ->
-    Keys = chttpd:qs_json_value(Req, "keys", undefined),
-    handle_view_list(Req, Db, DDoc, ListName, {DesignName, ViewName}, Keys);
-
-% view-list request with view and list from different design docs.
-handle_view_list_req(#httpd{method=Method,
-        path_parts=[_, _, _, _, ListName, DesignName, ViewName]}=Req, Db, DDoc)
-        when Method =:= 'GET' orelse Method =:= 'OPTIONS' ->
-    Keys = chttpd:qs_json_value(Req, "keys", undefined),
-    handle_view_list(Req, Db, DDoc, ListName, {DesignName, ViewName}, Keys);
-
-handle_view_list_req(#httpd{method=Method}=Req, _Db, _DDoc)
-        when Method =:= 'GET' orelse Method =:= 'OPTIONS' ->
-    chttpd:send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>);
-
-handle_view_list_req(#httpd{method='POST',
-        path_parts=[_, _, DesignName, _, ListName, ViewName]}=Req, Db, DDoc) ->
-    chttpd:validate_ctype(Req, "application/json"),
-    ReqBody = chttpd:body(Req),
-    {Props2} = ?JSON_DECODE(ReqBody),
-    Keys = proplists:get_value(<<"keys">>, Props2, undefined),
-    handle_view_list(Req#httpd{req_body=ReqBody}, Db, DDoc, ListName,
-        {DesignName, ViewName}, Keys);
-
-handle_view_list_req(#httpd{method='POST',
-        path_parts=[_, _, _, _, ListName, DesignName, ViewName]}=Req, Db, DDoc) ->
-    chttpd:validate_ctype(Req, "application/json"),
-    ReqBody = chttpd:body(Req),
-    {Props2} = ?JSON_DECODE(ReqBody),
-    Keys = proplists:get_value(<<"keys">>, Props2, undefined),
-    handle_view_list(Req#httpd{req_body=ReqBody}, Db, DDoc, ListName,
-        {DesignName, ViewName}, Keys);
-
-handle_view_list_req(#httpd{method='POST'}=Req, _Db, _DDoc) ->
-    chttpd:send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>);
-
-handle_view_list_req(Req, _Db, _DDoc) ->
-    chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
-handle_view_list(Req, Db, DDoc, LName, {ViewDesignName, ViewName}, Keys) ->
-    %% Will throw an exception if the _list handler is missing
-    couch_util:get_nested_json_value(DDoc#doc.body, [<<"lists">>, LName]),
-    DbName = couch_db:name(Db),
-    {ok, VDoc} = ddoc_cache:open(DbName, <<"_design/", ViewDesignName/binary>>),
-    CB = fun list_cb/2,
-    QueryArgs = couch_mrview_http:parse_body_and_query(Req, Keys),
-    Options = [{user_ctx, Req#httpd.user_ctx}],
-    couch_query_servers:with_ddoc_proc(DDoc, fun(QServer) ->
-        Acc = #lacc{
-            lname = LName,
-            req = Req,
-            qserver = QServer,
-            db = Db
-        },
-        case ViewName of
-            <<"_all_docs">> ->
-                fabric:all_docs(Db, Options, CB, Acc, QueryArgs);
-            _ ->
-                fabric:query_view(Db, Options, VDoc, ViewName,
-                    CB, Acc, QueryArgs)
-        end
-    end).
-
-
-list_cb({row, Row} = Msg, Acc) ->
-    case lists:keymember(doc, 1, Row) of
-        true -> chttpd_stats:incr_reads();
-        false -> ok
-    end,
-    chttpd_stats:incr_rows(),
-    couch_mrview_show:list_cb(Msg, Acc);
-
-list_cb(Msg, Acc) ->
-    couch_mrview_show:list_cb(Msg, Acc).
-
-
 % Maybe this is in the proplists API
 % todo move to couch_util
 json_apply_field(H, {L}) ->
diff --git a/src/chttpd/src/chttpd_util.erl b/src/chttpd/src/chttpd_util.erl
new file mode 100644
index 0000000..fcaa09d
--- /dev/null
+++ b/src/chttpd/src/chttpd_util.erl
@@ -0,0 +1,41 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_util).
+
+
+-export([
+    parse_copy_destination_header/1
+]).
+
+
+parse_copy_destination_header(Req) ->
+    case couch_httpd:header_value(Req, "Destination") of
+    undefined ->
+        throw({bad_request, "Destination header is mandatory for COPY."});
+    Destination ->
+        case re:run(Destination, "^https?://", [{capture, none}]) of
+        match ->
+            throw({bad_request, "Destination URL must be relative."});
+        nomatch ->
+            % see if ?rev=revid got appended to the Destination header
+            case re:run(Destination, "\\?", [{capture, none}]) of
+            nomatch ->
+                {list_to_binary(Destination), {0, []}};
+            match ->
+                [DocId, RevQs] = re:split(Destination, "\\?", [{return, list}]),
+                [_RevQueryKey, Rev] = re:split(RevQs, "=", [{return, list}]),
+                {Pos, RevId} = couch_doc:parse_rev(Rev),
+                {list_to_binary(DocId), {Pos, [RevId]}}
+            end
+        end
+    end.
diff --git a/src/couch/src/couch.app.src b/src/couch/src/couch.app.src
index e411b5e..af277c1 100644
--- a/src/couch/src/couch.app.src
+++ b/src/couch/src/couch.app.src
@@ -42,39 +42,5 @@
         couch_stats,
         hyper,
         couch_prometheus
-    ]},
-    {env, [
-        { httpd_global_handlers, [
-            {"/", "{couch_httpd_misc_handlers, handle_welcome_req, <<\"Welcome\">>}"},
-            {"favicon.ico", "{couch_httpd_misc_handlers, handle_favicon_req, \"{{prefix}}/share/www\"}"},
-            {"_utils", "{couch_httpd_misc_handlers, handle_utils_dir_req, \"{{prefix}}/share/www\"}"},
-            {"_all_dbs", "{couch_httpd_misc_handlers, handle_all_dbs_req}"},
-            {"_active_tasks", "{couch_httpd_misc_handlers, handle_task_status_req}"},
-            {"_config", "{couch_httpd_misc_handlers, handle_config_req}"},
-            {"_replicate", "{couch_replicator_httpd, handle_req}"},
-            {"_uuids", "{couch_httpd_misc_handlers, handle_uuids_req}"},
-            {"_stats", "{couch_stats_httpd, handle_stats_req}"},
-            {"_session", "{couch_httpd_auth, handle_session_req}"},
-            {"_plugins", "{couch_plugins_httpd, handle_req}"}
-        ]},
-          { httpd_db_handlers, [
-            {"_all_docs", "{couch_mrview_http, handle_all_docs_req}"},
-            {"_local_docs", "{couch_mrview_http, handle_local_docs_req}"},
-            {"_design_docs", "{couch_mrview_http, handle_design_docs_req}"},
-            {"_changes", "{couch_httpd_db, handle_db_changes_req}"},
-            {"_compact", "{couch_httpd_db, handle_compact_req}"},
-            {"_design", "{couch_httpd_db, handle_design_req}"},
-            {"_temp_view", "{couch_mrview_http, handle_temp_view_req}"},
-            {"_view_cleanup", "{couch_mrview_http, handle_cleanup_req}"}
-        ]},
-        { httpd_design_handlers, [
-            {"_compact", "{couch_mrview_http, handle_compact_req}"},
-            {"_info", "{couch_mrview_http, handle_info_req}"},
-            {"_list", "{couch_mrview_show, handle_view_list_req}"},
-            {"_rewrite", "{couch_httpd_rewrite, handle_rewrite_req}"},
-            {"_show", "{couch_mrview_show, handle_doc_show_req}"},
-            {"_update", "{couch_mrview_show, handle_doc_update_req}"},
-            {"_view", "{couch_mrview_http, handle_view_req}"}
-        ]}
     ]}
 ]}.
diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl
index d89c749..fd83c25 100644
--- a/src/couch/src/couch_httpd.erl
+++ b/src/couch/src/couch_httpd.erl
@@ -16,8 +16,6 @@
 
 -include_lib("couch/include/couch_db.hrl").
 
--export([start_link/0, start_link/1, stop/0, handle_request/5]).
-
 -export([header_value/2,header_value/3,qs_value/2,qs_value/3,qs/1,qs_json_value/3]).
 -export([path/1,absolute_uri/2,body_length/1]).
 -export([verify_is_server_admin/1,unquote/1,quote/1,recv/2,recv_chunked/4,error_info/1]).
@@ -32,164 +30,17 @@
 -export([send_response/4,send_response_no_cors/4,send_method_not_allowed/2,
     send_error/2,send_error/4, send_redirect/2,send_chunked_error/2]).
 -export([send_json/2,send_json/3,send_json/4,last_chunk/1,parse_multipart_request/3]).
--export([accepted_encodings/1,handle_request_int/5,validate_referer/1,validate_ctype/2]).
+-export([accepted_encodings/1,validate_referer/1,validate_ctype/2]).
 -export([http_1_0_keep_alive/2]).
 -export([validate_host/1]).
 -export([validate_bind_address/1]).
 -export([check_max_request_length/1]).
--export([handle_request/1]).
--export([set_auth_handlers/0]).
 -export([maybe_decompress/2]).
 
 -define(HANDLER_NAME_IN_MODULE_POS, 6).
 -define(MAX_DRAIN_BYTES, 1048576).
 -define(MAX_DRAIN_TIME_MSEC, 1000).
 
-start_link() ->
-    start_link(http).
-start_link(http) ->
-    Port = config:get("httpd", "port", "5984"),
-    start_link(?MODULE, [{port, Port}]);
-start_link(https) ->
-    Port = config:get("ssl", "port", "6984"),
-    {ok, Ciphers} = couch_util:parse_term(config:get("ssl", "ciphers", undefined)),
-    {ok, Versions} = couch_util:parse_term(config:get("ssl", "tls_versions", undefined)),
-    {ok, SecureRenegotiate} = couch_util:parse_term(config:get("ssl", "secure_renegotiate", undefined)),
-    ServerOpts0 =
-        [{cacertfile, config:get("ssl", "cacert_file", undefined)},
-         {keyfile, config:get("ssl", "key_file", undefined)},
-         {certfile, config:get("ssl", "cert_file", undefined)},
-         {password, config:get("ssl", "password", undefined)},
-         {secure_renegotiate, SecureRenegotiate},
-         {versions, Versions},
-         {ciphers, Ciphers}],
-
-    case (couch_util:get_value(keyfile, ServerOpts0) == undefined orelse
-        couch_util:get_value(certfile, ServerOpts0) == undefined) of
-        true ->
-            couch_log:error("SSL enabled but PEM certificates are missing", []),
-            throw({error, missing_certs});
-        false ->
-            ok
-    end,
-
-    ServerOpts = [Opt || {_, V}=Opt <- ServerOpts0, V /= undefined],
-
-    ClientOpts = case config:get("ssl", "verify_ssl_certificates", "false") of
-        "false" ->
-            [];
-        "true" ->
-            FailIfNoPeerCert = case config:get("ssl", "fail_if_no_peer_cert", "false") of
-            "false" -> false;
-            "true" -> true
-            end,
-            [{depth, list_to_integer(config:get("ssl",
-                "ssl_certificate_max_depth", "1"))},
-             {fail_if_no_peer_cert, FailIfNoPeerCert},
-             {verify, verify_peer}] ++
-            case config:get("ssl", "verify_fun", undefined) of
-                undefined -> [];
-                SpecStr ->
-                    [{verify_fun, make_arity_3_fun(SpecStr)}]
-            end
-    end,
-    SslOpts = ServerOpts ++ ClientOpts,
-
-    Options =
-        [{port, Port},
-         {ssl, true},
-         {ssl_opts, SslOpts}],
-    start_link(https, Options).
-start_link(Name, Options) ->
-    BindAddress = case config:get("httpd", "bind_address", "any") of
-                      "any" -> any;
-                      Else -> Else
-                  end,
-    ok = validate_bind_address(BindAddress),
-
-    {ok, ServerOptions} = couch_util:parse_term(
-        config:get("httpd", "server_options", "[]")),
-    {ok, SocketOptions} = couch_util:parse_term(
-        config:get("httpd", "socket_options", "[]")),
-
-    set_auth_handlers(),
-    Handlers = get_httpd_handlers(),
-
-    % ensure uuid is set so that concurrent replications
-    % get the same value.
-    couch_server:get_uuid(),
-
-    Loop = fun(Req)->
-        case SocketOptions of
-        [] ->
-            ok;
-        _ ->
-            ok = mochiweb_socket:setopts(Req:get(socket), SocketOptions)
-        end,
-        apply(?MODULE, handle_request, [Req | Handlers])
-    end,
-
-    % set mochiweb options
-    FinalOptions = lists:append([Options, ServerOptions, [
-            {loop, Loop},
-            {name, Name},
-            {ip, BindAddress}]]),
-
-    % launch mochiweb
-    case mochiweb_http:start(FinalOptions) of
-        {ok, MochiPid} ->
-            {ok, MochiPid};
-        {error, Reason} ->
-            couch_log:error("Failure to start Mochiweb: ~s~n", [Reason]),
-            throw({error, Reason})
-    end.
-
-
-stop() ->
-    mochiweb_http:stop(couch_httpd),
-    catch mochiweb_http:stop(https).
-
-
-set_auth_handlers() ->
-    AuthenticationSrcs = make_fun_spec_strs(
-        config:get("httpd", "authentication_handlers", "")),
-    AuthHandlers = lists:map(
-        fun(A) -> {auth_handler_name(A), make_arity_1_fun(A)} end, AuthenticationSrcs),
-    AuthenticationFuns = AuthHandlers ++ [
-        fun couch_httpd_auth:party_mode_handler/1 %% must be last
-    ],
-    ok = application:set_env(couch, auth_handlers, AuthenticationFuns).
-
-auth_handler_name(SpecStr) ->
-    lists:nth(?HANDLER_NAME_IN_MODULE_POS, re:split(SpecStr, "[\\W_]", [])).
-
-get_httpd_handlers() ->
-    {ok, HttpdGlobalHandlers} = application:get_env(couch, httpd_global_handlers),
-
-    UrlHandlersList = lists:map(
-        fun({UrlKey, SpecStr}) ->
-            {?l2b(UrlKey), make_arity_1_fun(SpecStr)}
-        end, HttpdGlobalHandlers),
-
-    {ok, HttpdDbHandlers} = application:get_env(couch, httpd_db_handlers),
-
-    DbUrlHandlersList = lists:map(
-        fun({UrlKey, SpecStr}) ->
-            {?l2b(UrlKey), make_arity_2_fun(SpecStr)}
-        end, HttpdDbHandlers),
-
-    {ok, HttpdDesignHandlers} = application:get_env(couch, httpd_design_handlers),
-
-    DesignUrlHandlersList = lists:map(
-        fun({UrlKey, SpecStr}) ->
-            {?l2b(UrlKey), make_arity_3_fun(SpecStr)}
-        end, HttpdDesignHandlers),
-
-    UrlHandlers = dict:from_list(UrlHandlersList),
-    DbUrlHandlers = dict:from_list(DbUrlHandlersList),
-    DesignUrlHandlers = dict:from_list(DesignUrlHandlersList),
-    DefaultFun = make_arity_1_fun("{couch_httpd_db, handle_request}"),
-    [DefaultFun, UrlHandlers, DbUrlHandlers, DesignUrlHandlers].
 
 % SpecStr is a string like "{my_module, my_fun}"
 %  or "{my_module, my_fun, <<"my_arg">>}"
@@ -221,175 +72,6 @@ make_arity_3_fun(SpecStr) ->
 make_fun_spec_strs(SpecStr) ->
     re:split(SpecStr, "(?<=})\\s*,\\s*(?={)", [{return, list}]).
 
-handle_request(MochiReq) ->
-    Body = proplists:get_value(body, MochiReq:get(opts)),
-    erlang:put(mochiweb_request_body, Body),
-    apply(?MODULE, handle_request, [MochiReq | get_httpd_handlers()]).
-
-handle_request(MochiReq, DefaultFun, UrlHandlers, DbUrlHandlers,
-    DesignUrlHandlers) ->
-    %% reset rewrite count for new request
-    erlang:put(?REWRITE_COUNT, 0),
-
-    MochiReq1 = couch_httpd_vhost:dispatch_host(MochiReq),
-
-    handle_request_int(MochiReq1, DefaultFun,
-                UrlHandlers, DbUrlHandlers, DesignUrlHandlers).
-
-handle_request_int(MochiReq, DefaultFun,
-            UrlHandlers, DbUrlHandlers, DesignUrlHandlers) ->
-    Begin = os:timestamp(),
-    % for the path, use the raw path with the query string and fragment
-    % removed, but URL quoting left intact
-    RawUri = MochiReq:get(raw_path),
-    {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
-
-    % get requested path
-    RequestedPath = case MochiReq:get_header_value("x-couchdb-vhost-path") of
-        undefined ->
-            case MochiReq:get_header_value("x-couchdb-requested-path") of
-                undefined -> RawUri;
-                R -> R
-            end;
-        P -> P
-    end,
-
-    HandlerKey =
-    case mochiweb_util:partition(Path, "/") of
-    {"", "", ""} ->
-        <<"/">>; % Special case the root url handler
-    {FirstPart, _, _} ->
-        list_to_binary(FirstPart)
-    end,
-    couch_log:debug("~p ~s ~p from ~p~nHeaders: ~p", [
-        MochiReq:get(method),
-        RawUri,
-        MochiReq:get(version),
-        peer(MochiReq),
-        mochiweb_headers:to_list(MochiReq:get(headers))
-    ]),
-
-    Method1 =
-    case MochiReq:get(method) of
-        % already an atom
-        Meth when is_atom(Meth) -> Meth;
-
-        % Non standard HTTP verbs aren't atoms (COPY, MOVE etc) so convert when
-        % possible (if any module references the atom, then it's existing).
-        Meth -> couch_util:to_existing_atom(Meth)
-    end,
-    increment_method_stats(Method1),
-
-    % allow broken HTTP clients to fake a full method vocabulary with an X-HTTP-METHOD-OVERRIDE header
-    MethodOverride = MochiReq:get_primary_header_value("X-HTTP-Method-Override"),
-    Method2 = case lists:member(MethodOverride, ["GET", "HEAD", "POST",
-                                                 "PUT", "DELETE",
-                                                 "TRACE", "CONNECT",
-                                                 "COPY"]) of
-    true ->
-        couch_log:info("MethodOverride: ~s (real method was ~s)",
-                       [MethodOverride, Method1]),
-        case Method1 of
-        'POST' -> couch_util:to_existing_atom(MethodOverride);
-        _ ->
-            % Ignore X-HTTP-Method-Override when the original verb isn't POST.
-            % I'd like to send a 406 error to the client, but that'd require a nasty refactor.
-            % throw({not_acceptable, <<"X-HTTP-Method-Override may only be used with POST requests.">>})
-            Method1
-        end;
-    _ -> Method1
-    end,
-
-    % alias HEAD to GET as mochiweb takes care of stripping the body
-    Method = case Method2 of
-        'HEAD' -> 'GET';
-        Other -> Other
-    end,
-
-    HttpReq = #httpd{
-        mochi_req = MochiReq,
-        peer = peer(MochiReq),
-        method = Method,
-        requested_path_parts =
-            [?l2b(unquote(Part)) || Part <- string:tokens(RequestedPath, "/")],
-        path_parts = [?l2b(unquote(Part)) || Part <- string:tokens(Path, "/")],
-        db_url_handlers = DbUrlHandlers,
-        design_url_handlers = DesignUrlHandlers,
-        default_fun = DefaultFun,
-        url_handlers = UrlHandlers,
-        user_ctx = erlang:erase(pre_rewrite_user_ctx),
-        auth = erlang:erase(pre_rewrite_auth)
-    },
-
-    HandlerFun = couch_util:dict_find(HandlerKey, UrlHandlers, DefaultFun),
-
-    {ok, Resp} =
-    try
-        validate_host(HttpReq),
-        check_request_uri_length(RawUri),
-        case chttpd_cors:maybe_handle_preflight_request(HttpReq) of
-        not_preflight ->
-            case authenticate_request(HttpReq) of
-            #httpd{} = Req ->
-                HandlerFun(Req);
-            Response ->
-                Response
-            end;
-        Response ->
-            Response
-        end
-    catch
-        throw:{http_head_abort, Resp0} ->
-            {ok, Resp0};
-        throw:{invalid_json, S} ->
-            couch_log:error("attempted upload of invalid JSON"
-                            " (set log_level to debug to log it)", []),
-            couch_log:debug("Invalid JSON: ~p",[S]),
-            send_error(HttpReq, {bad_request, invalid_json});
-        throw:unacceptable_encoding ->
-            couch_log:error("unsupported encoding method for the response", []),
-            send_error(HttpReq, {not_acceptable, "unsupported encoding"});
-        throw:bad_accept_encoding_value ->
-            couch_log:error("received invalid Accept-Encoding header", []),
-            send_error(HttpReq, bad_request);
-        exit:normal ->
-            exit(normal);
-        exit:snappy_nif_not_loaded ->
-            ErrorReason = "To access the database or view index, Apache CouchDB"
-                          " must be built with Erlang OTP R13B04 or higher.",
-            couch_log:error("~s", [ErrorReason]),
-            send_error(HttpReq, {bad_otp_release, ErrorReason});
-        exit:{body_too_large, _} ->
-            send_error(HttpReq, request_entity_too_large);
-        exit:{uri_too_long, _} ->
-            send_error(HttpReq, request_uri_too_long);
-        throw:Error ->
-            Stack = erlang:get_stacktrace(),
-            couch_log:debug("Minor error in HTTP request: ~p",[Error]),
-            couch_log:debug("Stacktrace: ~p",[Stack]),
-            send_error(HttpReq, Error);
-        error:badarg ->
-            Stack = erlang:get_stacktrace(),
-            couch_log:error("Badarg error in HTTP request",[]),
-            couch_log:info("Stacktrace: ~p",[Stack]),
-            send_error(HttpReq, badarg);
-        error:function_clause ->
-            Stack = erlang:get_stacktrace(),
-            couch_log:error("function_clause error in HTTP request",[]),
-            couch_log:info("Stacktrace: ~p",[Stack]),
-            send_error(HttpReq, function_clause);
-        Tag:Error ->
-            Stack = erlang:get_stacktrace(),
-            couch_log:error("Uncaught error in HTTP request: ~p",
-                            [{Tag, Error}]),
-            couch_log:info("Stacktrace: ~p",[Stack]),
-            send_error(HttpReq, Error)
-    end,
-    RequestTime = round(timer:now_diff(os:timestamp(), Begin)/1000),
-    couch_stats:update_histogram([couchdb, request_time], RequestTime),
-    couch_stats:increment_counter([couchdb, httpd, requests]),
-    {ok, Resp}.
-
 validate_host(#httpd{} = Req) ->
     case config:get_boolean("httpd", "validate_host", false) of
         true ->
@@ -418,26 +100,6 @@ valid_hosts() ->
     List = config:get("httpd", "valid_hosts", ""),
     re:split(List, ",", [{return, list}]).
 
-check_request_uri_length(Uri) ->
-    check_request_uri_length(Uri, config:get("httpd", "max_uri_length")).
-
-check_request_uri_length(_Uri, undefined) ->
-    ok;
-check_request_uri_length(Uri, MaxUriLen) when is_list(MaxUriLen) ->
-    case length(Uri) > list_to_integer(MaxUriLen) of
-        true ->
-            throw(request_uri_too_long);
-        false ->
-            ok
-    end.
-
-authenticate_request(Req) ->
-    {ok, AuthenticationFuns} = application:get_env(couch, auth_handlers),
-    chttpd:authenticate_request(Req, couch_auth_cache, AuthenticationFuns).
-
-increment_method_stats(Method) ->
-    couch_stats:increment_counter([couchdb, httpd_request_methods, Method]).
-
 validate_referer(Req) ->
     Host = host_for_request(Req),
     Referer = header_value(Req, "Referer", fail),
@@ -1225,13 +887,6 @@ http_respond_(#httpd{mochi_req = MochiReq}, 413, Headers, Args, Type) ->
 http_respond_(#httpd{mochi_req = MochiReq}, Code, Headers, Args, Type) ->
     MochiReq:Type({Code, Headers, Args}).
 
-peer(MochiReq) ->
-    case MochiReq:get(socket) of
-        {remote, Pid, _} ->
-            node(Pid);
-        _ ->
-            MochiReq:get(peer)
-    end.
 
 %%%%%%%% module tests below %%%%%%%%
 
diff --git a/src/couch/src/couch_secondary_sup.erl b/src/couch/src/couch_secondary_sup.erl
index bb78215..4ccd0c9 100644
--- a/src/couch/src/couch_secondary_sup.erl
+++ b/src/couch/src/couch_secondary_sup.erl
@@ -33,11 +33,6 @@ init([]) ->
         {uuids, {couch_uuids, start, []}}
     ],
 
-    MaybeHttp = case http_enabled() of
-        true -> [{httpd, {couch_httpd, start_link, []}}];
-        false -> couch_httpd:set_auth_handlers(), []
-    end,
-
     MaybeHttps = case https_enabled() of
         true -> [{httpsd, {chttpd, start_link, [https]}}];
         false -> []
@@ -55,13 +50,10 @@ init([]) ->
                 [Module]}
         end
         || {Name, Spec}
-        <- Daemons ++ MaybeHttp ++ MaybeHttps, Spec /= ""],
+        <- Daemons ++ MaybeHttps, Spec /= ""],
     {ok, {{one_for_one, 50, 3600},
         couch_epi:register_service(couch_db_epi, Children)}}.
 
-http_enabled() ->
-    config:get_boolean("httpd", "enable", false).
-
 https_enabled() ->
     % 1. [ssl] enable = true | false
     % 2. if [daemons] httpsd == {chttpd, start_link, [https]} -> pretend true as well
diff --git a/src/couch/test/eunit/chttpd_endpoints_tests.erl b/src/couch/test/eunit/chttpd_endpoints_tests.erl
index 3c8586a..f164ae6 100644
--- a/src/couch/test/eunit/chttpd_endpoints_tests.erl
+++ b/src/couch/test/eunit/chttpd_endpoints_tests.erl
@@ -47,10 +47,10 @@ url_handlers() ->
         {<<"_replicate">>, chttpd_misc, handle_replicate_req},
         {<<"_uuids">>, chttpd_misc, handle_uuids_req},
         {<<"_session">>, chttpd_auth, handle_session_req},
-        {<<"_up">>, chttpd_misc, handle_up_req},
-        {<<"_membership">>, mem3_httpd, handle_membership_req},
-        {<<"_db_updates">>, global_changes_httpd, handle_global_changes_req},
-        {<<"_cluster_setup">>, setup_httpd, handle_setup_req}
+        {<<"_membership">>, chttpd_httpd_handlers, not_supported},
+        {<<"_db_updates">>, chttpd_httpd_handlers, not_implemented},
+        {<<"_cluster_setup">>, chttpd_httpd_handlers, not_implemented},
+        {<<"_up">>, chttpd_misc, handle_up_req}
     ],
 
     lists:foreach(fun({Path, Mod, Fun}) ->
@@ -67,9 +67,9 @@ db_handlers() ->
         {<<"_view_cleanup">>, chttpd_db, handle_view_cleanup_req},
         {<<"_compact">>, chttpd_db, handle_compact_req},
         {<<"_design">>, chttpd_db, handle_design_req},
-        {<<"_temp_view">>, chttpd_view, handle_temp_view_req},
+        {<<"_temp_view">>, chttpd_httpd_handlers, not_supported},
         {<<"_changes">>, chttpd_db, handle_changes_req},
-        {<<"_shards">>, mem3_httpd, handle_shards_req},
+        {<<"_shards">>, chttpd_httpd_handlers, not_supported},
         {<<"_index">>, mango_httpd, handle_req},
         {<<"_explain">>, mango_httpd, handle_req},
         {<<"_find">>, mango_httpd, handle_req}
@@ -87,11 +87,11 @@ db_handlers() ->
 design_handlers() ->
     Handlers = [
         {<<"_view">>, chttpd_view, handle_view_req},
-        {<<"_show">>, chttpd_show, handle_doc_show_req},
-        {<<"_list">>, chttpd_show, handle_view_list_req},
+        {<<"_show">>, chttpd_httpd_handlers, not_supported},
+        {<<"_list">>, chttpd_httpd_handlers, not_supported},
         {<<"_update">>, chttpd_show, handle_doc_update_req},
         {<<"_info">>, chttpd_db, handle_design_info_req},
-        {<<"_rewrite">>, chttpd_rewrite, handle_rewrite_req}
+        {<<"_rewrite">>, chttpd_httpd_handlers, not_supported}
     ],
 
     lists:foreach(fun({Path, Mod, Fun}) ->
diff --git a/src/couch_replicator/src/couch_replicator_ids.erl b/src/couch_replicator/src/couch_replicator_ids.erl
index d1cbe57..44b9e47 100644
--- a/src/couch_replicator/src/couch_replicator_ids.erl
+++ b/src/couch_replicator/src/couch_replicator_ids.erl
@@ -58,7 +58,7 @@ base_id(#{?SOURCE := Src0, ?TARGET := Tgt0} = Rep, 3) ->
 
 base_id(#{?SOURCE := Src0, ?TARGET := Tgt0} = Rep, 2) ->
     {ok, HostName} = inet:gethostname(),
-    Port = case (catch mochiweb_socket_server:get(couch_httpd, port)) of
+    Port = case (catch mochiweb_socket_server:get(chttpd, port)) of
     P when is_number(P) ->
         P;
     _ ->

[couchdb] 06/24: Update couch_secondary_sup to not start index_server

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 622285d2eda24edab270c6f66042f6f7dbc499e2
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Apr 14 00:19:08 2021 -0400

    Update couch_secondary_sup to not start index_server
    
    Index server is gone and replaced by couch_views + couch_jobs applications.
---
 src/couch/src/couch_secondary_sup.erl | 1 -
 1 file changed, 1 deletion(-)

diff --git a/src/couch/src/couch_secondary_sup.erl b/src/couch/src/couch_secondary_sup.erl
index 4ccd0c9..293e1b5 100644
--- a/src/couch/src/couch_secondary_sup.erl
+++ b/src/couch/src/couch_secondary_sup.erl
@@ -27,7 +27,6 @@ init([]) ->
             dynamic}
     ],
     Daemons = [
-        {index_server, {couch_index_server, start_link, []}},
         {query_servers, {couch_proc_manager, start_link, []}},
         {vhosts, {couch_httpd_vhost, start_link, []}},
         {uuids, {couch_uuids, start, []}}

[couchdb] 19/24: Clean up database name validation in fabric2_db

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 45de516f4571e4bdc9e0f7007f31e37d6857cc67
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Apr 14 02:44:35 2021 -0400

    Clean up database name validation in fabric2_db
    
    `normalize_dbname/1` is not needed as database names do not have the `.couch`
    suffix, and we don't have shard paths any more. For validation, send the
    `DbName` to the `fabric2_db_plugin` as both the real DbName and the
    "normalized" one. This is mostly to avoid changing the plugin interface for now
    and should be eventually updated (in a separate PR).
---
 src/fabric/src/fabric2_db.erl | 24 +++++++-----------------
 1 file changed, 7 insertions(+), 17 deletions(-)

diff --git a/src/fabric/src/fabric2_db.erl b/src/fabric/src/fabric2_db.erl
index 4e0a9fd..aab80a8 100644
--- a/src/fabric/src/fabric2_db.erl
+++ b/src/fabric/src/fabric2_db.erl
@@ -128,7 +128,6 @@
     %% wait_for_compaction/2,
 
     dbname_suffix/1,
-    normalize_dbname/1,
     validate_dbname/1,
 
     %% make_doc/5,
@@ -1126,27 +1125,19 @@ fold_changes(Db, SinceSeq, UserFun, UserAcc, Options) ->
 
 
 dbname_suffix(DbName) ->
-    filename:basename(normalize_dbname(DbName)).
-
-
-normalize_dbname(DbName) ->
-    % Remove in the final cleanup. We don't need to handle shards prefix or
-    % remove .couch suffixes anymore. Keep it for now to pass all the existing
-    % tests.
-    couch_db:normalize_dbname(DbName).
+    filename:basename(DbName).
 
 
 validate_dbname(DbName) when is_list(DbName) ->
     validate_dbname(?l2b(DbName));
 
 validate_dbname(DbName) when is_binary(DbName) ->
-    Normalized = normalize_dbname(DbName),
     fabric2_db_plugin:validate_dbname(
-        DbName, Normalized, fun validate_dbname_int/2).
+        DbName, DbName, fun validate_dbname_int/2).
 
-validate_dbname_int(DbName, Normalized) when is_binary(DbName) ->
+validate_dbname_int(DbName, DbName) when is_binary(DbName) ->
     case validate_dbname_length(DbName) of
-        ok -> validate_dbname_pat(DbName, Normalized);
+        ok -> validate_dbname_pat(DbName);
         {error, _} = Error -> Error
     end.
 
@@ -1160,13 +1151,12 @@ validate_dbname_length(DbName) ->
     end.
 
 
-validate_dbname_pat(DbName, Normalized) ->
-    DbNoExt = couch_util:drop_dot_couch_ext(DbName),
-    case re:run(DbNoExt, ?DBNAME_REGEX, [{capture,none}, dollar_endonly]) of
+validate_dbname_pat(DbName) ->
+    case re:run(DbName, ?DBNAME_REGEX, [{capture,none}, dollar_endonly]) of
         match ->
             ok;
         nomatch ->
-            case is_system_db_name(Normalized) of
+            case is_system_db_name(DbName) of
                 true -> ok;
                 false -> {error, {illegal_database_name, DbName}}
             end

[couchdb] 22/24: Clean up tests after removing 3.x applications and couch_views updates

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 870ba4cc6d02e628e70e753638de7b3adf88635f
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Apr 14 02:59:53 2021 -0400

    Clean up tests after removing 3.x applications and couch_views updates
    
     * Update the couch_views include paths
    
     * Exclude non-existent applications from setup logic
    
     * Do not run tests agains the backdoor port
    
     * Do not run tests checking for non-existent system dbs
    
     * Since "new" couch_att attachment format has changed a bit and encoding is
       not `identity` and `md5` is not `<<>>` any longer, some tests had to be
       updated to set those explicitly.
---
 src/couch/src/test_util.erl                        | 42 +----------
 src/couch/test/eunit/couch_db_mpr_tests.erl        | 12 ++--
 src/couch/test/eunit/couch_doc_json_tests.erl      | 82 +++++++---------------
 src/couch/test/eunit/couch_doc_tests.erl           | 45 +-----------
 src/couch/test/eunit/couch_query_servers_tests.erl |  2 +-
 src/couch/test/eunit/couchdb_auth_tests.erl        | 11 +--
 src/couch/test/eunit/couchdb_cors_tests.erl        |  9 ++-
 src/couch/test/eunit/couchdb_mrview_cors_tests.erl | 18 +----
 src/couch_views/test/couch_views_batch_test.erl    |  2 +-
 src/couch_views/test/couch_views_cleanup_test.erl  |  1 -
 .../test/couch_views_custom_red_test.erl           |  1 -
 src/couch_views/test/couch_views_indexer_test.erl  |  1 -
 src/couch_views/test/couch_views_info_test.erl     |  2 +-
 src/couch_views/test/couch_views_map_test.erl      | 22 ------
 src/couch_views/test/couch_views_size_test.erl     |  3 +-
 .../test/couch_views_trace_index_test.erl          |  2 +-
 src/couch_views/test/couch_views_updater_test.erl  |  3 +-
 src/couch_views/test/couch_views_upgrade_test.erl  |  3 +-
 src/fabric/test/fabric2_dir_prefix_tests.erl       |  4 +-
 src/fabric/test/fabric2_node_types_tests.erl       |  4 +-
 src/fabric/test/fabric2_tx_options_tests.erl       |  4 +-
 test/elixir/lib/step/create_db.ex                  |  2 +-
 22 files changed, 59 insertions(+), 216 deletions(-)

diff --git a/src/couch/src/test_util.erl b/src/couch/src/test_util.erl
index 125e764..c95c444 100644
--- a/src/couch/src/test_util.erl
+++ b/src/couch/src/test_util.erl
@@ -14,8 +14,6 @@
 
 -include_lib("couch/include/couch_eunit.hrl").
 -include("couch_db.hrl").
--include("couch_db_int.hrl").
--include("couch_bt_engine.hrl").
 
 -export([init_code_path/0]).
 -export([source_file/1, build_file/1]).
@@ -36,12 +34,10 @@
 
 -export([start/1, start/2, start/3, stop/1]).
 
--export([fake_db/1]).
-
 -record(test_context, {mocked = [], started = [], module}).
 
 -define(DEFAULT_APPS,
-        [inets, ibrowse, ssl, config, couch_epi, couch_event, couch]).
+        [inets, ibrowse, ssl, config, couch_epi, couch]).
 
 srcdir() ->
     code:priv_dir(couch) ++ "/../../".
@@ -54,8 +50,7 @@ init_code_path() ->
         "couchdb",
         "jiffy",
         "ibrowse",
-        "mochiweb",
-        "snappy"
+        "mochiweb"
     ],
     lists:foreach(fun(Name) ->
         code:add_patha(filename:join([builddir(), "src", Name]))
@@ -248,7 +243,7 @@ start(Module, ExtraApps) ->
     start(Module, ExtraApps, []).
 
 start(Module, ExtraApps, Options) ->
-    Apps = start_applications([config, couch_log, ioq, couch_epi | ExtraApps]),
+    Apps = start_applications([config, couch_log, couch_epi | ExtraApps]),
     ToMock = [config, couch_stats] -- proplists:get_value(dont_mock, Options, []),
     mock(ToMock),
     #test_context{module = Module, mocked = ToMock, started = Apps}.
@@ -257,37 +252,6 @@ stop(#test_context{mocked = Mocked, started = Apps}) ->
     meck:unload(Mocked),
     stop_applications(Apps).
 
-fake_db(Fields0) ->
-    {ok, Db, Fields} = maybe_set_engine(Fields0),
-    Indexes = lists:zip(
-            record_info(fields, db),
-            lists:seq(2, record_info(size, db))
-        ),
-    lists:foldl(fun({FieldName, Value}, Acc) ->
-        Idx = couch_util:get_value(FieldName, Indexes),
-        setelement(Idx, Acc, Value)
-    end, Db, Fields).
-
-maybe_set_engine(Fields0) ->
-    case lists:member(engine, Fields0) of
-        true ->
-            {ok, #db{}, Fields0};
-        false ->
-            {ok, Header, Fields} = get_engine_header(Fields0),
-            Db = #db{engine = {couch_bt_engine, #st{header = Header}}},
-            {ok, Db, Fields}
-    end.
-
-get_engine_header(Fields) ->
-    Keys = [disk_version, update_seq, unused, id_tree_state,
-        seq_tree_state, local_tree_state, purge_seq, purged_docs,
-        security_ptr, revs_limit, uuid, epochs, compacted_seq],
-    {HeadFields, RestFields} = lists:partition(
-        fun({K, _}) -> lists:member(K, Keys) end, Fields),
-    Header0 = couch_bt_engine_header:new(),
-    Header = couch_bt_engine_header:set(Header0, HeadFields),
-    {ok, Header, RestFields}.
-
 now_us() ->
     {MegaSecs, Secs, MicroSecs} = os:timestamp(),
     (MegaSecs * 1000000 + Secs) * 1000000 + MicroSecs.
diff --git a/src/couch/test/eunit/couch_db_mpr_tests.erl b/src/couch/test/eunit/couch_db_mpr_tests.erl
index bb97c66..3713797 100644
--- a/src/couch/test/eunit/couch_db_mpr_tests.erl
+++ b/src/couch/test/eunit/couch_db_mpr_tests.erl
@@ -31,8 +31,8 @@ setup() ->
     Hashed = couch_passwords:hash_admin_password(?PASS),
     ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
     TmpDb = ?tempdb(),
-    Addr = config:get("httpd", "bind_address", "127.0.0.1"),
-    Port = mochiweb_socket_server:get(couch_httpd, port),
+    Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
+    Port = mochiweb_socket_server:get(chttpd, port),
     Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
     Url.
 
@@ -64,8 +64,12 @@ couch_db_mpr_test_() ->
         "multi-part attachment tests",
         {
             setup,
-            fun test_util:start_couch/0,
-            fun test_util:stop_couch/1,
+            fun() ->
+                test_util:start_couch([chttpd])
+            end,
+            fun(Ctx) ->
+                test_util:stop_couch(Ctx)
+            end,
             {
                 foreach,
                 fun setup/0,
diff --git a/src/couch/test/eunit/couch_doc_json_tests.erl b/src/couch/test/eunit/couch_doc_json_tests.erl
index 51f2289..3a07642 100644
--- a/src/couch/test/eunit/couch_doc_json_tests.erl
+++ b/src/couch/test/eunit/couch_doc_json_tests.erl
@@ -19,19 +19,19 @@
 setup() ->
     mock(couch_log),
     mock(config),
-    mock(couch_db_plugin),
+    mock(fabric2_db_plugin),
     ok.
 
 teardown(_) ->
     meck:unload(couch_log),
     meck:unload(config),
-    meck:unload(couch_db_plugin),
+    meck:unload(fabric2_db_plugin),
     ok.
 
-mock(couch_db_plugin) ->
-    ok = meck:new(couch_db_plugin, [passthrough]),
-    ok = meck:expect(couch_db_plugin, validate_docid, fun(_) -> false end),
-    ok;
+mock(fabric2_db_plugin) ->
+     ok = meck:new(fabric2_db_plugin, [passthrough]),
+     ok = meck:expect(fabric2_db_plugin, validate_docid, fun(_) -> false end),
+     ok;
 mock(couch_log) ->
     ok = meck:new(couch_log, [passthrough]),
     ok = meck:expect(couch_log, debug, fun(_, _) -> ok end),
@@ -52,7 +52,6 @@ json_doc_test_() ->
         fun(_) ->
             [{"Document from JSON", [
                 from_json_with_dbname_error_cases(),
-                from_json_with_db_name_success_cases(),
                 from_json_success_cases(),
                 from_json_error_cases()
              ]},
@@ -113,7 +112,9 @@ from_json_success_cases() ->
                     {type, <<"application/awesome">>},
                     {att_len, 45},
                     {disk_len, 45},
-                    {revpos, undefined}
+                    {revpos, undefined},
+                    {encoding, identity},
+                    {md5, <<>>}
                 ]),
                 couch_att:new([
                     {name, <<"noahs_private_key.gpg">>},
@@ -121,7 +122,9 @@ from_json_success_cases() ->
                     {type, <<"application/pgp-signature">>},
                     {att_len, 18},
                     {disk_len, 18},
-                    {revpos, 0}
+                    {revpos, 0},
+                    {encoding, undefined},
+                    {md5, undefined}
                 ])
             ]},
             "Attachments are parsed correctly."
@@ -173,44 +176,6 @@ from_json_success_cases() ->
         end,
         Cases).
 
-from_json_with_db_name_success_cases() ->
-    Cases = [
-        {
-            {[]},
-            <<"_dbs">>,
-            #doc{},
-            "DbName _dbs is acceptable with no docid"
-        },
-        {
-            {[{<<"_id">>, <<"zing!">>}]},
-            <<"_dbs">>,
-            #doc{id = <<"zing!">>},
-            "DbName _dbs is acceptable with a normal docid"
-        },
-        {
-            {[{<<"_id">>, <<"_users">>}]},
-            <<"_dbs">>,
-            #doc{id = <<"_users">>},
-            "_dbs/_users is acceptable"
-        },
-        {
-            {[{<<"_id">>, <<"_replicator">>}]},
-            <<"_dbs">>,
-            #doc{id = <<"_replicator">>},
-            "_dbs/_replicator is acceptable"
-        },
-        {
-            {[{<<"_id">>, <<"_global_changes">>}]},
-            <<"_dbs">>,
-            #doc{id = <<"_global_changes">>},
-            "_dbs/_global_changes is acceptable"
-        }
-    ],
-    lists:map(
-        fun({EJson, DbName, Expect, Msg}) ->
-            {Msg, ?_assertMatch(Expect, couch_doc:from_json_obj_validate(EJson, DbName))}
-        end,
-        Cases).
 
 from_json_error_cases() ->
     Cases = [
@@ -308,13 +273,6 @@ from_json_with_dbname_error_cases() ->
     Cases = [
         {
             {[{<<"_id">>, <<"_random">>}]},
-            <<"_dbs">>,
-            {illegal_docid,
-             <<"Only reserved document ids may start with underscore.">>},
-            "Disallow non-system-DB underscore prefixed docids in _dbs database."
-        },
-        {
-            {[{<<"_id">>, <<"_random">>}]},
             <<"foobar">>,
             {illegal_docid,
              <<"Only reserved document ids may start with underscore.">>},
@@ -418,7 +376,9 @@ to_json_success_cases() ->
                     {data, fun() -> ok end},
                     {revpos, 1},
                     {att_len, 400},
-                    {disk_len, 400}
+                    {disk_len, 400},
+                    {md5, <<>>},
+                    {encoding, identity}
                 ]),
                 couch_att:new([
                     {name, <<"fast.json">>},
@@ -426,7 +386,9 @@ to_json_success_cases() ->
                     {data, <<"{\"so\": \"there!\"}">>},
                     {revpos, 1},
                     {att_len, 16},
-                    {disk_len, 16}
+                    {disk_len, 16},
+                    {md5, <<>>},
+                    {encoding, identity}
                 ])
             ]},
             {[
@@ -457,13 +419,17 @@ to_json_success_cases() ->
                     {data, fun() -> <<"diet pepsi">> end},
                     {revpos, 1},
                     {att_len, 10},
-                    {disk_len, 10}
+                    {disk_len, 10},
+                    {md5, <<>>},
+                    {encoding, identity}
                 ]),
                 couch_att:new([
                     {name, <<"food.now">>},
                     {type, <<"application/food">>},
                     {revpos, 1},
-                    {data, <<"sammich">>}
+                    {data, <<"sammich">>},
+                    {md5, <<>>},
+                    {encoding, identity}
                 ])
             ]},
             {[
diff --git a/src/couch/test/eunit/couch_doc_tests.erl b/src/couch/test/eunit/couch_doc_tests.erl
index cf41df6..079b13f 100644
--- a/src/couch/test/eunit/couch_doc_tests.erl
+++ b/src/couch/test/eunit/couch_doc_tests.erl
@@ -42,7 +42,7 @@ doc_to_multi_part_stream_test() ->
     AttLength = size(AttData),
     Atts = [couch_att:new([
        {name, <<"test">>}, {data, AttData}, {type, <<"text/plain">>},
-       {att_len, AttLength}, {disk_len, AttLength}])],
+       {att_len, AttLength}, {disk_len, AttLength}, {encoding, identity}])],
     couch_doc:doc_to_multi_part_stream(Boundary, JsonBytes, Atts, fun send/1, true),
     AttLengthStr = integer_to_binary(AttLength),
     BoundaryLen = size(Boundary),
@@ -69,51 +69,11 @@ len_doc_to_multi_part_stream_test() ->
     AttLength = size(AttData),
     Atts = [couch_att:new([
        {name, <<"test">>}, {data, AttData}, {type, <<"text/plain">>},
-       {att_len, AttLength}, {disk_len, AttLength}])],
+       {att_len, AttLength}, {disk_len, AttLength}, {encoding, identity}])],
     {ContentType, 258} = %% 258 is expected size of the document
         couch_doc:len_doc_to_multi_part_stream(Boundary, JsonBytes, Atts, true),
     ok.
 
-validate_docid_test_() ->
-    {setup,
-        fun() ->
-            mock_config(),
-            ok = meck:new(couch_db_plugin, [passthrough]),
-            meck:expect(couch_db_plugin, validate_docid, fun(_) -> false end)
-        end,
-        fun(_) ->
-            meck:unload(config),
-            meck:unload(couch_db_plugin)
-        end,
-        [
-            ?_assertEqual(ok, couch_doc:validate_docid(<<"idx">>)),
-            ?_assertEqual(ok, couch_doc:validate_docid(<<"_design/idx">>)),
-            ?_assertEqual(ok, couch_doc:validate_docid(<<"_local/idx">>)),
-            ?_assertEqual(ok, couch_doc:validate_docid(large_id(1024))),
-            ?_assertEqual(ok, couch_doc:validate_docid(<<"_users">>, <<"_dbs">>)),
-            ?_assertEqual(ok, couch_doc:validate_docid(<<"_replicator">>, <<"_dbs">>)),
-            ?_assertEqual(ok, couch_doc:validate_docid(<<"_global_changes">>, <<"_dbs">>)),
-            ?_assertThrow({illegal_docid, _},
-                couch_doc:validate_docid(<<>>)),
-            ?_assertThrow({illegal_docid, _},
-                couch_doc:validate_docid(<<16#80>>)),
-            ?_assertThrow({illegal_docid, _},
-                couch_doc:validate_docid(<<"_idx">>)),
-            ?_assertThrow({illegal_docid, _},
-                couch_doc:validate_docid(<<"_">>)),
-            ?_assertThrow({illegal_docid, _},
-                couch_doc:validate_docid(<<"_design/">>)),
-            ?_assertThrow({illegal_docid, _},
-                couch_doc:validate_docid(<<"_local/">>)),
-            ?_assertThrow({illegal_docid, _},
-                couch_doc:validate_docid(large_id(1025))),
-            ?_assertThrow({illegal_docid, _},
-                couch_doc:validate_docid(<<"_users">>, <<"foo">>)),
-            ?_assertThrow({illegal_docid, _},
-                couch_doc:validate_docid(<<"_weeee">>, <<"_dbs">>))
-        ]
-    }.
-
 large_id(N) ->
     << <<"x">> || _ <- lists:seq(1, N) >>.
 
@@ -139,7 +99,6 @@ mock_config() ->
     meck:expect(config, get,
         fun("couchdb", "max_document_id_length", "infinity") -> "1024";
            ("couchdb", "max_attachment_size", "infinity") -> "infinity";
-           ("mem3", "shards_db", "_dbs") -> "_dbs";
             (Key, Val, Default) -> meck:passthrough([Key, Val, Default])
         end
     ).
diff --git a/src/couch/test/eunit/couch_query_servers_tests.erl b/src/couch/test/eunit/couch_query_servers_tests.erl
index 440fc8e..46a8474 100644
--- a/src/couch/test/eunit/couch_query_servers_tests.erl
+++ b/src/couch/test/eunit/couch_query_servers_tests.erl
@@ -25,7 +25,7 @@ teardown(_) ->
 
 
 setup_oom() ->
-    test_util:start_couch([ioq]).
+    test_util:start_couch().
 
 
 teardown_oom(Ctx) ->
diff --git a/src/couch/test/eunit/couchdb_auth_tests.erl b/src/couch/test/eunit/couchdb_auth_tests.erl
index 19d32d0..92f8a0a 100644
--- a/src/couch/test/eunit/couchdb_auth_tests.erl
+++ b/src/couch/test/eunit/couchdb_auth_tests.erl
@@ -49,7 +49,6 @@ auth_test_() ->
             fun() -> test_util:start_couch([chttpd]) end, fun test_util:stop_couch/1,
             [
                 make_test_cases(clustered, Tests),
-                make_test_cases(backdoor, Tests),
                 make_require_valid_user_test_cases(clustered, RequireValidUserTests)
             ]
         }
@@ -86,12 +85,6 @@ should_not_return_authenticated_field(_PortType, Url) ->
                 <<"info">>, <<"authenticated">>])
         end).
 
-should_return_list_of_handlers(backdoor, Url) ->
-    ?_assertEqual([<<"cookie">>,<<"default">>],
-        begin
-            couch_util:get_nested_json_value(session(Url), [
-                <<"info">>, <<"authentication_handlers">>])
-        end);
 should_return_list_of_handlers(clustered, Url) ->
     ?_assertEqual([<<"cookie">>,<<"default">>],
         begin
@@ -110,6 +103,4 @@ session(Url) ->
     jiffy:decode(Body).
 
 port(clustered) ->
-    integer_to_list(mochiweb_socket_server:get(chttpd, port));
-port(backdoor) ->
-    integer_to_list(mochiweb_socket_server:get(couch_httpd, port)).
+    integer_to_list(mochiweb_socket_server:get(chttpd, port)).
diff --git a/src/couch/test/eunit/couchdb_cors_tests.erl b/src/couch/test/eunit/couchdb_cors_tests.erl
index 82630bb..0e0926c 100644
--- a/src/couch/test/eunit/couchdb_cors_tests.erl
+++ b/src/couch/test/eunit/couchdb_cors_tests.erl
@@ -26,21 +26,20 @@
     ?assertEqual(lists:usort(A), lists:usort(B))).
 
 start() ->
-    Ctx = test_util:start_couch([ioq]),
+    Ctx = test_util:start_couch([chttpd]),
     ok = config:set("httpd", "enable_cors", "true", false),
     ok = config:set("vhosts", "example.com", "/", false),
     Ctx.
 
 setup() ->
     DbName = ?tempdb(),
-    {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
-    couch_db:close(Db),
+    {ok, _} = fabric2_db:create(DbName, [?ADMIN_CTX]),
 
     config:set("cors", "credentials", "false", false),
     config:set("cors", "origins", "http://example.com", false),
 
     Addr = config:get("httpd", "bind_address", "127.0.0.1"),
-    Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+    Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
     Host = "http://" ++ Addr ++ ":" ++ Port,
     {Host, ?b2l(DbName)}.
 
@@ -57,7 +56,7 @@ setup({Mod, VHost}) ->
     {Host, DbName, Url, DefaultHeaders}.
 
 teardown(DbName) when is_list(DbName) ->
-    ok = couch_server:delete(?l2b(DbName), [?ADMIN_CTX]),
+    ok = fabric2_db:delete(?l2b(DbName), [?ADMIN_CTX]),
     ok;
 teardown({_, DbName}) ->
     teardown(DbName).
diff --git a/src/couch/test/eunit/couchdb_mrview_cors_tests.erl b/src/couch/test/eunit/couchdb_mrview_cors_tests.erl
index 0f69048..a9215f5 100644
--- a/src/couch/test/eunit/couchdb_mrview_cors_tests.erl
+++ b/src/couch/test/eunit/couchdb_mrview_cors_tests.erl
@@ -70,8 +70,7 @@ show_tests() ->
     {
         "Check CORS for show",
         [
-            make_test_case(clustered, [fun should_make_shows_request/2]),
-            make_test_case(backdoor, [fun should_make_shows_request/2])
+            make_test_case(clustered, [fun should_make_shows_request/2])
         ]
     }.
 
@@ -86,22 +85,14 @@ should_make_shows_request(_, {Host, DbName}) ->
          ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_show/bar",
          Headers = [{"Origin", "http://example.com"},
                     {"Access-Control-Request-Method", "GET"}, ?AUTH],
-         {ok, _, Resp, Body} = test_request:get(ReqUrl, Headers),
-         Origin = proplists:get_value("Access-Control-Allow-Origin", Resp),
-         ?assertEqual("http://example.com", Origin),
-         ?assertEqual(<<"<h1>wosh</h1>">>, Body)
+         ?assertMatch({ok, 410, _, _}, test_request:get(ReqUrl, Headers))
     end).
 
-create_db(backdoor, DbName) ->
-    {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
-    couch_db:close(Db);
 create_db(clustered, DbName) ->
     {ok, Status, _, _} = test_request:put(db_url(DbName), [?AUTH], ""),
     assert_success(create_db, Status),
     ok.
 
-delete_db(backdoor, DbName) ->
-    couch_server:delete(DbName, [?ADMIN_CTX]);
 delete_db(clustered, DbName) ->
     {ok, Status, _, _} = test_request:delete(db_url(DbName), [?AUTH]),
     assert_success(delete_db, Status),
@@ -119,7 +110,6 @@ host_url(PortType) ->
 bind_address(PortType) ->
     config:get(section(PortType), "bind_address", "127.0.0.1").
 
-section(backdoor) -> "http";
 section(clustered) -> "chttpd".
 
 db_url(DbName) when is_binary(DbName) ->
@@ -128,9 +118,7 @@ db_url(DbName) when is_list(DbName) ->
     host_url(clustered) ++ "/" ++ DbName.
 
 port(clustered) ->
-    integer_to_list(mochiweb_socket_server:get(chttpd, port));
-port(backdoor) ->
-    integer_to_list(mochiweb_socket_server:get(couch_httpd, port)).
+    integer_to_list(mochiweb_socket_server:get(chttpd, port)).
 
 
 upload_ddoc(Host, DbName) ->
diff --git a/src/couch_views/test/couch_views_batch_test.erl b/src/couch_views/test/couch_views_batch_test.erl
index 78e6892..d4dbb50 100644
--- a/src/couch_views/test/couch_views_batch_test.erl
+++ b/src/couch_views/test/couch_views_batch_test.erl
@@ -15,7 +15,7 @@
 
 -include_lib("eunit/include/eunit.hrl").
 -include_lib("fabric/test/fabric2_test.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 
 
 batch_test_() ->
diff --git a/src/couch_views/test/couch_views_cleanup_test.erl b/src/couch_views/test/couch_views_cleanup_test.erl
index 54048c9..d1b6f2a 100644
--- a/src/couch_views/test/couch_views_cleanup_test.erl
+++ b/src/couch_views/test/couch_views_cleanup_test.erl
@@ -17,7 +17,6 @@
 -include_lib("couch/include/couch_eunit.hrl").
 -include_lib("eunit/include/eunit.hrl").
 -include_lib("couch_views/include/couch_views.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
 -include_lib("fabric/include/fabric2.hrl").
 -include_lib("fabric/test/fabric2_test.hrl").
 
diff --git a/src/couch_views/test/couch_views_custom_red_test.erl b/src/couch_views/test/couch_views_custom_red_test.erl
index 911846d..e8f8cbc 100644
--- a/src/couch_views/test/couch_views_custom_red_test.erl
+++ b/src/couch_views/test/couch_views_custom_red_test.erl
@@ -14,7 +14,6 @@
 
 -include_lib("couch/include/couch_eunit.hrl").
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
 -include_lib("fabric/test/fabric2_test.hrl").
 -include("couch_views.hrl").
 
diff --git a/src/couch_views/test/couch_views_indexer_test.erl b/src/couch_views/test/couch_views_indexer_test.erl
index 6918646..c41db3b 100644
--- a/src/couch_views/test/couch_views_indexer_test.erl
+++ b/src/couch_views/test/couch_views_indexer_test.erl
@@ -15,7 +15,6 @@
 -include_lib("eunit/include/eunit.hrl").
 -include_lib("couch/include/couch_db.hrl").
 -include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
 -include_lib("couch_views/include/couch_views.hrl").
 -include_lib("fabric/test/fabric2_test.hrl").
 
diff --git a/src/couch_views/test/couch_views_info_test.erl b/src/couch_views/test/couch_views_info_test.erl
index 993801a..18a0a63 100644
--- a/src/couch_views/test/couch_views_info_test.erl
+++ b/src/couch_views/test/couch_views_info_test.erl
@@ -15,7 +15,7 @@
 
 -include_lib("couch/include/couch_eunit.hrl").
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 -include_lib("fabric/test/fabric2_test.hrl").
 
 
diff --git a/src/couch_views/test/couch_views_map_test.erl b/src/couch_views/test/couch_views_map_test.erl
index 125b43d..97e35cc 100644
--- a/src/couch_views/test/couch_views_map_test.erl
+++ b/src/couch_views/test/couch_views_map_test.erl
@@ -14,7 +14,6 @@
 
 -include_lib("couch/include/couch_eunit.hrl").
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
 -include("couch_views.hrl").
 
 
@@ -567,27 +566,6 @@ check_local_seq(Val, Expected) ->
     ?assertEqual(Expected, Result).
 
 
-% should_give_ext_size_seq_indexed_test(Db) ->
-%     DDoc = couch_doc:from_json_obj({[
-%         {<<"_id">>, <<"_design/seqdoc">>},
-%         {<<"options">>, {[{<<"seq_indexed">>, true}]}},
-%         {<<"views">>, {[
-%                 {<<"view1">>, {[
-%                     {<<"map">>, <<"function(doc){emit(doc._id, doc._id);}">>}
-%                 ]}}
-%             ]}
-%         }
-%     ]}),
-%     {ok, _} = couch_db:update_doc(Db, DDoc, []),
-%     {ok, Db1} = couch_db:open_int(couch_db:name(Db), []),
-%     {ok, DDoc1} = couch_db:open_doc(Db1, <<"_design/seqdoc">>, [ejson_body]),
-%     couch_mrview:query_view(Db1, DDoc1, <<"view1">>, [{update, true}]),
-%     {ok, Info} = couch_mrview:get_info(Db1, DDoc),
-%     Size = couch_util:get_nested_json_value({Info}, [sizes, external]),
-%     ok = couch_db:close(Db1),
-%     ?assert(is_number(Size)).
-
-
 run_query(Idx, Args) ->
     run_query(Idx, Args, false).
 
diff --git a/src/couch_views/test/couch_views_size_test.erl b/src/couch_views/test/couch_views_size_test.erl
index e69b5b2..91684a9 100644
--- a/src/couch_views/test/couch_views_size_test.erl
+++ b/src/couch_views/test/couch_views_size_test.erl
@@ -15,7 +15,6 @@
 -include_lib("eunit/include/eunit.hrl").
 -include_lib("couch/include/couch_db.hrl").
 -include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
 -include_lib("couch_views/include/couch_views.hrl").
 -include_lib("fabric/test/fabric2_test.hrl").
 
@@ -353,4 +352,4 @@ fold_fun({meta, _Meta}, Acc) ->
 fold_fun({row, _} = Row, Acc) ->
     {ok, [Row | Acc]};
 fold_fun(complete, Acc) ->
-    {ok, lists:reverse(Acc)}.
\ No newline at end of file
+    {ok, lists:reverse(Acc)}.
diff --git a/src/couch_views/test/couch_views_trace_index_test.erl b/src/couch_views/test/couch_views_trace_index_test.erl
index 03c21a3..346a99c 100644
--- a/src/couch_views/test/couch_views_trace_index_test.erl
+++ b/src/couch_views/test/couch_views_trace_index_test.erl
@@ -17,7 +17,7 @@
 -include_lib("eunit/include/eunit.hrl").
 -include_lib("couch/include/couch_db.hrl").
 -include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 
 
 % Steps for this to work
diff --git a/src/couch_views/test/couch_views_updater_test.erl b/src/couch_views/test/couch_views_updater_test.erl
index aadbe94..1bd637d 100644
--- a/src/couch_views/test/couch_views_updater_test.erl
+++ b/src/couch_views/test/couch_views_updater_test.erl
@@ -14,10 +14,9 @@
 
 -include_lib("couch/include/couch_db.hrl").
 -include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 -include_lib("fabric/test/fabric2_test.hrl").
 -include_lib("mango/src/mango_idx.hrl").
--include_lib("couch_views/include/couch_views.hrl").
 
 
 indexer_test_() ->
diff --git a/src/couch_views/test/couch_views_upgrade_test.erl b/src/couch_views/test/couch_views_upgrade_test.erl
index 556a762..3926db3 100644
--- a/src/couch_views/test/couch_views_upgrade_test.erl
+++ b/src/couch_views/test/couch_views_upgrade_test.erl
@@ -15,7 +15,6 @@
 -include_lib("eunit/include/eunit.hrl").
 -include_lib("couch/include/couch_db.hrl").
 -include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
 -include_lib("couch_views/include/couch_views.hrl").
 -include_lib("fabric/include/fabric2.hrl").
 -include_lib("fabric/test/fabric2_test.hrl").
@@ -397,4 +396,4 @@ doc(Id, Val) ->
 
 
 run_query(#{} = Db, DDoc, <<_/binary>> = View) ->
-    couch_views:query(Db, DDoc, View, fun fold_fun/2, [], #mrargs{}).
\ No newline at end of file
+    couch_views:query(Db, DDoc, View, fun fold_fun/2, [], #mrargs{}).
diff --git a/src/fabric/test/fabric2_dir_prefix_tests.erl b/src/fabric/test/fabric2_dir_prefix_tests.erl
index 2943d65..8eacfaf 100644
--- a/src/fabric/test/fabric2_dir_prefix_tests.erl
+++ b/src/fabric/test/fabric2_dir_prefix_tests.erl
@@ -23,10 +23,10 @@ dir_prefix_test_() ->
         "Test couchdb fdb directory prefix",
         setup,
         fun() ->
-            % erlfdb, rexi and mem3 are all dependent apps for fabric. We make
+            % erlfdb, ctrace are all dependent apps for fabric. We make
             % sure to start them so when fabric is started during the test it
             % already has its dependencies
-            test_util:start_couch([erlfdb, rexi, mem3, ctrace, fabric])
+            test_util:start_couch([erlfdb, ctrace, fabric])
         end,
         fun(Ctx) ->
             config:delete("fabric", "fdb_directory"),
diff --git a/src/fabric/test/fabric2_node_types_tests.erl b/src/fabric/test/fabric2_node_types_tests.erl
index 074afe8..98bfd7e 100644
--- a/src/fabric/test/fabric2_node_types_tests.erl
+++ b/src/fabric/test/fabric2_node_types_tests.erl
@@ -25,10 +25,10 @@ node_types_test_() ->
             os:putenv("COUCHDB_NODE_TYPE_FOO", "false"),
             os:putenv("COUCHDB_NODE_TYPE_BAZ", "true"),
             os:putenv("COUCHDB_NODE_TYPE_ZIG", ""),
-            % erlfdb, rexi and mem3 are all dependent apps for fabric. We make
+            % erlfdb, ctrace are all dependent apps for fabric. We make
             % sure to start them so when fabric is started during the test it
             % already has its dependencies
-            test_util:start_couch([erlfdb, rexi, mem3, ctrace, fabric])
+            test_util:start_couch([erlfdb, ctrace, fabric])
         end,
         fun(Ctx) ->
             test_util:stop_couch(Ctx),
diff --git a/src/fabric/test/fabric2_tx_options_tests.erl b/src/fabric/test/fabric2_tx_options_tests.erl
index 78428c6..b326464 100644
--- a/src/fabric/test/fabric2_tx_options_tests.erl
+++ b/src/fabric/test/fabric2_tx_options_tests.erl
@@ -26,10 +26,10 @@ fdb_tx_options_test_() ->
         setup,
         fun() ->
             meck:new(erlfdb, [passthrough]),
-            % erlfdb, rexi and mem3 are all dependent apps for fabric. We make
+            % erlfdb, ctrace are all dependent apps for fabric. We make
             % sure to start them so when fabric is started during the test it
             % already has its dependencies
-            test_util:start_couch([erlfdb, rexi, mem3, ctrace, fabric])
+            test_util:start_couch([erlfdb, ctrace, fabric])
         end,
         fun(Ctx) ->
             meck:unload(),
diff --git a/test/elixir/lib/step/create_db.ex b/test/elixir/lib/step/create_db.ex
index d38e672..412c858 100644
--- a/test/elixir/lib/step/create_db.ex
+++ b/test/elixir/lib/step/create_db.ex
@@ -36,7 +36,7 @@ defmodule Couch.Test.Setup.Step.Create.DB do
   def setup(setup, %__MODULE__{name: name} = step) do
     assert Setup.completed?(setup, Step.Start), "Require `Start` step"
     assert :fabric in Step.Start.apps(), "Fabric is not started"
-    res = :fabric.create_db(name, [@admin])
+    res = :fabric2_db.create_db(name, [@admin])
     assert res in [:ok, :accepted], "Cannot create `#{name}` database"
     step
   end

[couchdb] 17/24: Move utilities and records from couch_mrview and couch_index to couch_views

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 5ec21191ad2956d393132e6fc3f11711e62d8d2c
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Apr 14 02:12:40 2021 -0400

    Move utilities and records from couch_mrview and couch_index to couch_views
    
     * `couch_mrview_util` functions ended up mostly in `couch_views_util`.
    
     * `couch_mrview` validatation functions ended up in `couch_views_validate`
       module.
    
     * `couch_mrview_http` functions moved to `couch_views_http_util`. The reason
       they didn't end up in `couch_views_http` is because a lot of the functions
       there have the exact same names as the ones in `couch_views_http_util`.
       There is quite a bit of duplication involved but that is left for another
       refactoring in the future. The general flow of control goes from chttpd ->
       couch_views_http -> couch_views_http_util.
    
    Most of the changes are just copy and paste with the exception of the
    `ddoc_to_mrst/2` function. Previously, there were two almost identical copies
    -- one in `couch_mrview_util` and another in `couch_views_util`. Both were used
    by different parts of the code. The difference was the couch_views one
    optionally disabled reduce functions, and replaced their body with the
    `disabled` atom, while the one in `couch_mrview` didn't. Trying to unify them
    such that only the `couch_views` one is used, resulted in the inability to
    write design documents on server which have custom reduce disabled. That may be
    a better behavior, however that should be updated in a separate PR and possibly
    a mailing list discussion. So in order to preserve the exisiting behavior,
    couch_eval was update to not fail in `try_compile` when design documents are
    disabled.
    
    Patches to the rest of the code to update the include path and use the new
    utility functions will be updated in a separate commit.
---
 src/couch_eval/src/couch_eval.erl             |   6 +-
 src/couch_views/include/couch_views.hrl       |  94 ++++++
 src/couch_views/src/couch_views_http_util.erl | 358 ++++++++++++++++++++
 src/couch_views/src/couch_views_util.erl      | 105 +++++-
 src/couch_views/src/couch_views_validate.erl  | 460 ++++++++++++++++++++++++++
 5 files changed, 1018 insertions(+), 5 deletions(-)

diff --git a/src/couch_eval/src/couch_eval.erl b/src/couch_eval/src/couch_eval.erl
index a6e5965..f87ba97 100644
--- a/src/couch_eval/src/couch_eval.erl
+++ b/src/couch_eval/src/couch_eval.erl
@@ -37,7 +37,7 @@
 -type result() :: {doc_id(), [[{any(), any()}]]}.
 -type api_mod() :: atom().
 -type context() :: {api_mod(), any()}.
--type function_type() :: binary().
+-type function_type() :: binary() | atom().
 -type function_name() :: binary().
 -type function_src() :: binary().
 -type error(_Error) :: no_return().
@@ -117,6 +117,10 @@ with_context(#{language := Language}, Fun) ->
 
 
 -spec try_compile(context(), function_type(), function_name(), function_src()) -> ok.
+try_compile({_ApiMod, _Ctx}, reduce, <<_/binary>>, disabled) ->
+    % Reduce functions may be disabled. Accept that as a valid configuration.
+    ok;
+
 try_compile({ApiMod, Ctx}, FuncType, FuncName, FuncSrc) -> 
     ApiMod:try_compile(Ctx, FuncType, FuncName, FuncSrc).
 
diff --git a/src/couch_views/include/couch_views.hrl b/src/couch_views/include/couch_views.hrl
index e28fa74..86f73a3 100644
--- a/src/couch_views/include/couch_views.hrl
+++ b/src/couch_views/include/couch_views.hrl
@@ -45,3 +45,97 @@
 % be used. Use `null` so it can can be round-tripped through json serialization
 % with couch_jobs.
 -define(VIEW_CURRENT_VSN, null).
+
+
+-record(mrst, {
+    sig=nil,
+    fd=nil,
+    fd_monitor,
+    db_name,
+    idx_name,
+    language,
+    design_opts=[],
+    partitioned=false,
+    lib,
+    views,
+    id_btree=nil,
+    update_seq=0,
+    purge_seq=0,
+    first_build,
+    partial_resp_pid,
+    doc_acc,
+    doc_queue,
+    write_queue,
+    qserver=nil
+}).
+
+
+-record(mrview, {
+    id_num,
+    update_seq=0,
+    purge_seq=0,
+    map_names=[],
+    reduce_funs=[],
+    def,
+    btree=nil,
+    options=[]
+}).
+
+
+-define(MAX_VIEW_LIMIT, 16#10000000).
+
+-record(mrargs, {
+    view_type,
+    reduce,
+
+    preflight_fun,
+
+    start_key,
+    start_key_docid,
+    end_key,
+    end_key_docid,
+    keys,
+
+    direction = fwd,
+    limit = ?MAX_VIEW_LIMIT,
+    skip = 0,
+    group_level = 0,
+    group = undefined,
+    stable = false,
+    update = true,
+    multi_get = false,
+    inclusive_end = true,
+    include_docs = false,
+    doc_options = [],
+    update_seq=false,
+    conflicts,
+    callback,
+    sorted = true,
+    extra = [],
+    page_size = undefined,
+    bookmark=nil
+}).
+
+-record(vacc, {
+    db,
+    req,
+    resp,
+    prepend,
+    etag,
+    should_close = false,
+    buffer = [],
+    bufsize = 0,
+    threshold = 1490,
+    row_sent = false,
+    meta_sent = false,
+    paginated = false,
+    meta = #{}
+}).
+
+
+-record(view_row, {
+    key,
+    id,
+    value,
+    doc
+}).
diff --git a/src/couch_views/src/couch_views_http_util.erl b/src/couch_views/src/couch_views_http_util.erl
new file mode 100644
index 0000000..7af0726
--- /dev/null
+++ b/src/couch_views/src/couch_views_http_util.erl
@@ -0,0 +1,358 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+% The reason this module and couch_views_http exist is because they have
+% functions which are named the same but do slightly different things. The
+% general pattern is chttpd code would call into couch_view_http and those
+% function will in turn call into this module.
+
+-module(couch_views_http_util).
+
+-export([
+    prepend_val/1,
+    parse_body_and_query/2,
+    parse_body_and_query/3,
+    parse_params/2,
+    parse_params/3,
+    parse_params/4,
+    view_cb/2,
+    row_to_obj/1,
+    row_to_obj/2,
+    row_to_json/1,
+    row_to_json/2
+]).
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
+
+%% these clauses start (and possibly end) the response
+view_cb({error, Reason}, #vacc{resp=undefined}=Acc) ->
+    {ok, Resp} = chttpd:send_error(Acc#vacc.req, Reason),
+    {ok, Acc#vacc{resp=Resp}};
+
+view_cb(complete, #vacc{resp=undefined}=Acc) ->
+    % Nothing in view
+    {ok, Resp} = chttpd:send_json(Acc#vacc.req, 200, {[{rows, []}]}),
+    {ok, Acc#vacc{resp=Resp}};
+
+view_cb(Msg, #vacc{resp=undefined}=Acc) ->
+    %% Start response
+    Headers = [],
+    {ok, Resp} = chttpd:start_delayed_json_response(Acc#vacc.req, 200, Headers),
+    view_cb(Msg, Acc#vacc{resp=Resp, should_close=true});
+
+%% ---------------------------------------------------
+
+%% From here on down, the response has been started.
+
+view_cb({error, Reason}, #vacc{resp=Resp}=Acc) ->
+    {ok, Resp1} = chttpd:send_delayed_error(Resp, Reason),
+    {ok, Acc#vacc{resp=Resp1}};
+
+view_cb(complete, #vacc{resp=Resp, buffer=Buf, threshold=Max}=Acc) ->
+    % Finish view output and possibly end the response
+    {ok, Resp1} = chttpd:close_delayed_json_object(Resp, Buf, "\r\n]}", Max),
+    case Acc#vacc.should_close of
+        true ->
+            {ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
+            {ok, Acc#vacc{resp=Resp2}};
+        _ ->
+            {ok, Acc#vacc{resp=Resp1, meta_sent=false, row_sent=false,
+                prepend=",\r\n", buffer=[], bufsize=0}}
+    end;
+
+view_cb({meta, Meta}, #vacc{meta_sent=false, row_sent=false}=Acc) ->
+    % Sending metadata as we've not sent it or any row yet
+    Parts = case couch_util:get_value(total, Meta) of
+        undefined -> [];
+        Total -> [io_lib:format("\"total_rows\":~p", [Total])]
+    end ++ case couch_util:get_value(offset, Meta) of
+        undefined -> [];
+        Offset -> [io_lib:format("\"offset\":~p", [Offset])]
+    end ++ case couch_util:get_value(update_seq, Meta) of
+        undefined -> [];
+        null ->
+            ["\"update_seq\":null"];
+        UpdateSeq when is_integer(UpdateSeq) ->
+            [io_lib:format("\"update_seq\":~B", [UpdateSeq])];
+        UpdateSeq when is_binary(UpdateSeq) ->
+            [io_lib:format("\"update_seq\":\"~s\"", [UpdateSeq])]
+    end ++ ["\"rows\":["],
+    Chunk = [prepend_val(Acc), "{", string:join(Parts, ","), "\r\n"],
+    {ok, AccOut} = maybe_flush_response(Acc, Chunk, iolist_size(Chunk)),
+    {ok, AccOut#vacc{prepend="", meta_sent=true}};
+
+view_cb({meta, _Meta}, #vacc{}=Acc) ->
+    %% ignore metadata
+    {ok, Acc};
+
+view_cb({row, Row}, #vacc{meta_sent=false}=Acc) ->
+    %% sorted=false and row arrived before meta
+    % Adding another row
+    Chunk = [prepend_val(Acc), "{\"rows\":[\r\n", row_to_json(Row)],
+    maybe_flush_response(Acc#vacc{meta_sent=true, row_sent=true}, Chunk, iolist_size(Chunk));
+
+view_cb({row, Row}, #vacc{meta_sent=true}=Acc) ->
+    % Adding another row
+    Chunk = [prepend_val(Acc), row_to_json(Row)],
+    maybe_flush_response(Acc#vacc{row_sent=true}, Chunk, iolist_size(Chunk)).
+
+
+maybe_flush_response(#vacc{bufsize=Size, threshold=Max} = Acc, Data, Len)
+        when Size > 0 andalso (Size + Len) > Max ->
+    #vacc{buffer = Buffer, resp = Resp} = Acc,
+    {ok, R1} = chttpd:send_delayed_chunk(Resp, Buffer),
+    {ok, Acc#vacc{prepend = ",\r\n", buffer = Data, bufsize = Len, resp = R1}};
+maybe_flush_response(Acc0, Data, Len) ->
+    #vacc{buffer = Buf, bufsize = Size} = Acc0,
+    Acc = Acc0#vacc{
+        prepend = ",\r\n",
+        buffer = [Buf | Data],
+        bufsize = Size + Len
+    },
+    {ok, Acc}.
+
+prepend_val(#vacc{prepend=Prepend}) ->
+    case Prepend of
+        undefined ->
+            "";
+        _ ->
+            Prepend
+    end.
+
+
+row_to_json(Row) ->
+    ?JSON_ENCODE(row_to_obj(Row)).
+
+
+row_to_json(Kind, Row) ->
+    ?JSON_ENCODE(row_to_obj(Kind, Row)).
+
+
+row_to_obj(Row) ->
+    Id = couch_util:get_value(id, Row),
+    row_to_obj(Id, Row).
+
+
+row_to_obj(error, Row) ->
+    % Special case for _all_docs request with KEYS to
+    % match prior behavior.
+    Key = couch_util:get_value(key, Row),
+    Val = couch_util:get_value(value, Row),
+    Reason = couch_util:get_value(reason, Row),
+    ReasonProp = if Reason == undefined -> []; true ->
+        [{reason, Reason}]
+    end,
+    {[{key, Key}, {error, Val}] ++ ReasonProp};
+row_to_obj(Id0, Row) ->
+    Id = case Id0 of
+        undefined -> [];
+        Id0 -> [{id, Id0}]
+    end,
+    Key = couch_util:get_value(key, Row, null),
+    Val = couch_util:get_value(value, Row),
+    Doc = case couch_util:get_value(doc, Row) of
+        undefined -> [];
+        Doc0 -> [{doc, Doc0}]
+    end,
+    {Id ++ [{key, Key}, {value, Val}] ++ Doc}.
+
+
+parse_params(#httpd{}=Req, Keys) ->
+    parse_params(chttpd:qs(Req), Keys);
+parse_params(Props, Keys) ->
+    Args = #mrargs{},
+    parse_params(Props, Keys, Args).
+
+
+parse_params(Props, Keys, Args) ->
+    parse_params(Props, Keys, Args, []).
+
+parse_params(Props, Keys, #mrargs{}=Args0, Options) ->
+    IsDecoded = lists:member(decoded, Options),
+    Args1 = case lists:member(keep_group_level, Options) of
+        true ->
+            Args0;
+        _ ->
+            % group_level set to undefined to detect if explicitly set by user
+            Args0#mrargs{keys=Keys, group=undefined, group_level=undefined}
+    end,
+    lists:foldl(fun({K, V}, Acc) ->
+        parse_param(K, V, Acc, IsDecoded)
+    end, Args1, Props).
+
+
+parse_body_and_query(#httpd{method='POST'} = Req, Keys) ->
+    Props = chttpd:json_body_obj(Req),
+    parse_body_and_query(Req, Props, Keys);
+
+parse_body_and_query(Req, Keys) ->
+    parse_params(chttpd:qs(Req), Keys, #mrargs{keys=Keys, group=undefined,
+        group_level=undefined}, [keep_group_level]).
+
+parse_body_and_query(Req, {Props}, Keys) ->
+    Args = #mrargs{keys=Keys, group=undefined, group_level=undefined},
+    BodyArgs = parse_params(Props, Keys, Args, [decoded]),
+    parse_params(chttpd:qs(Req), Keys, BodyArgs, [keep_group_level]).
+
+parse_param(Key, Val, Args, IsDecoded) when is_binary(Key) ->
+    parse_param(binary_to_list(Key), Val, Args, IsDecoded);
+parse_param(Key, Val, Args, IsDecoded) ->
+    case Key of
+        "" ->
+            Args;
+        "reduce" ->
+            Args#mrargs{reduce=parse_boolean(Val)};
+        "key" when IsDecoded ->
+            Args#mrargs{start_key=Val, end_key=Val};
+        "key" ->
+            JsonKey = ?JSON_DECODE(Val),
+            Args#mrargs{start_key=JsonKey, end_key=JsonKey};
+        "keys" when IsDecoded ->
+            Args#mrargs{keys=Val};
+        "keys" ->
+            Args#mrargs{keys=?JSON_DECODE(Val)};
+        "startkey" when IsDecoded ->
+            Args#mrargs{start_key=Val};
+        "start_key" when IsDecoded ->
+            Args#mrargs{start_key=Val};
+        "startkey" ->
+            Args#mrargs{start_key=?JSON_DECODE(Val)};
+        "start_key" ->
+            Args#mrargs{start_key=?JSON_DECODE(Val)};
+        "startkey_docid" ->
+            Args#mrargs{start_key_docid=couch_util:to_binary(Val)};
+        "start_key_doc_id" ->
+            Args#mrargs{start_key_docid=couch_util:to_binary(Val)};
+        "endkey" when IsDecoded ->
+            Args#mrargs{end_key=Val};
+        "end_key" when IsDecoded ->
+            Args#mrargs{end_key=Val};
+        "endkey" ->
+            Args#mrargs{end_key=?JSON_DECODE(Val)};
+        "end_key" ->
+            Args#mrargs{end_key=?JSON_DECODE(Val)};
+        "endkey_docid" ->
+            Args#mrargs{end_key_docid=couch_util:to_binary(Val)};
+        "end_key_doc_id" ->
+            Args#mrargs{end_key_docid=couch_util:to_binary(Val)};
+        "limit" ->
+            Args#mrargs{limit=parse_pos_int(Val)};
+        "page_size" ->
+            Args#mrargs{page_size=parse_pos_int(Val)};
+        "stale" when Val == "ok" orelse Val == <<"ok">> ->
+            Args#mrargs{stable=true, update=false};
+        "stale" when Val == "update_after" orelse Val == <<"update_after">> ->
+            Args#mrargs{stable=true, update=lazy};
+        "stale" ->
+            throw({query_parse_error, <<"Invalid value for `stale`.">>});
+        "stable" when Val == "true" orelse Val == <<"true">> orelse Val == true ->
+            Args#mrargs{stable=true};
+        "stable" when Val == "false" orelse Val == <<"false">> orelse Val == false ->
+            Args#mrargs{stable=false};
+        "stable" ->
+            throw({query_parse_error, <<"Invalid value for `stable`.">>});
+        "update" when Val == "true" orelse Val == <<"true">> orelse Val == true ->
+            Args#mrargs{update=true};
+        "update" when Val == "false" orelse Val == <<"false">> orelse Val == false ->
+            Args#mrargs{update=false};
+        "update" when Val == "lazy" orelse Val == <<"lazy">> ->
+            Args#mrargs{update=lazy};
+        "update" ->
+            throw({query_parse_error, <<"Invalid value for `update`.">>});
+        "descending" ->
+            case parse_boolean(Val) of
+                true -> Args#mrargs{direction=rev};
+                _ -> Args#mrargs{direction=fwd}
+            end;
+        "skip" ->
+            Args#mrargs{skip=parse_pos_int(Val)};
+        "group" ->
+            Args#mrargs{group=parse_boolean(Val)};
+        "group_level" ->
+            Args#mrargs{group_level=parse_pos_int(Val)};
+        "inclusive_end" ->
+            Args#mrargs{inclusive_end=parse_boolean(Val)};
+        "include_docs" ->
+            Args#mrargs{include_docs=parse_boolean(Val)};
+        "attachments" ->
+            case parse_boolean(Val) of
+            true ->
+                Opts = Args#mrargs.doc_options,
+                Args#mrargs{doc_options=[attachments|Opts]};
+            false ->
+                Args
+            end;
+        "att_encoding_info" ->
+            case parse_boolean(Val) of
+            true ->
+                Opts = Args#mrargs.doc_options,
+                Args#mrargs{doc_options=[att_encoding_info|Opts]};
+            false ->
+                Args
+            end;
+        "update_seq" ->
+            Args#mrargs{update_seq=parse_boolean(Val)};
+        "conflicts" ->
+            Args#mrargs{conflicts=parse_boolean(Val)};
+        "callback" ->
+            Args#mrargs{callback=couch_util:to_binary(Val)};
+        "sorted" ->
+            Args#mrargs{sorted=parse_boolean(Val)};
+        "partition" ->
+            Partition = couch_util:to_binary(Val),
+            couch_partition:validate_partition(Partition),
+            couch_views_util:set_extra(Args, partition, Partition);
+        _ ->
+            BKey = couch_util:to_binary(Key),
+            BVal = couch_util:to_binary(Val),
+            Args#mrargs{extra=[{BKey, BVal} | Args#mrargs.extra]}
+    end.
+
+
+parse_boolean(true) ->
+    true;
+parse_boolean(false) ->
+    false;
+
+parse_boolean(Val) when is_binary(Val) ->
+    parse_boolean(?b2l(Val));
+
+parse_boolean(Val) ->
+    case string:to_lower(Val) of
+    "true" -> true;
+    "false" -> false;
+    _ ->
+        Msg = io_lib:format("Invalid boolean parameter: ~p", [Val]),
+        throw({query_parse_error, ?l2b(Msg)})
+    end.
+
+parse_int(Val) when is_integer(Val) ->
+    Val;
+parse_int(Val) ->
+    case (catch list_to_integer(Val)) of
+    IntVal when is_integer(IntVal) ->
+        IntVal;
+    _ ->
+        Msg = io_lib:format("Invalid value for integer: ~p", [Val]),
+        throw({query_parse_error, ?l2b(Msg)})
+    end.
+
+parse_pos_int(Val) ->
+    case parse_int(Val) of
+    IntVal when IntVal >= 0 ->
+        IntVal;
+    _ ->
+        Fmt = "Invalid value for positive integer: ~p",
+        Msg = io_lib:format(Fmt, [Val]),
+        throw({query_parse_error, ?l2b(Msg)})
+    end.
diff --git a/src/couch_views/src/couch_views_util.erl b/src/couch_views/src/couch_views_util.erl
index 7040020..287d4ba 100644
--- a/src/couch_views/src/couch_views_util.erl
+++ b/src/couch_views/src/couch_views_util.erl
@@ -19,12 +19,16 @@
     validate_args/1,
     validate_args/2,
     is_paginated/1,
-    active_tasks_info/5
+    active_tasks_info/5,
+    set_view_type/3,
+    set_extra/3,
+    get_view_queries/1,
+    get_view_keys/1,
+    extract_view/4
 ]).
 
 
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
 -include("couch_views.hrl").
 
 
@@ -80,10 +84,53 @@ ddoc_to_mrst(DbName, #doc{id=Id, body={Fields}}) ->
         design_opts=DesignOpts,
         partitioned=Partitioned
     },
-    SigInfo = {Views1, Language, DesignOpts, couch_index_util:sort_lib(Lib)},
+    SigInfo = {Views1, Language, DesignOpts, sort_lib(Lib)},
     {ok, IdxState#mrst{sig=couch_hash:md5_hash(term_to_binary(SigInfo))}}.
 
 
+set_view_type(_Args, _ViewName, []) ->
+    throw({not_found, missing_named_view});
+
+set_view_type(Args, ViewName, [View | Rest]) ->
+    RedNames = [N || {N, _} <- View#mrview.reduce_funs],
+    case lists:member(ViewName, RedNames) of
+        true ->
+            case Args#mrargs.reduce of
+                false -> Args#mrargs{view_type=map};
+                _ -> Args#mrargs{view_type=red}
+            end;
+        false ->
+            case lists:member(ViewName, View#mrview.map_names) of
+                true -> Args#mrargs{view_type=map};
+                false -> set_view_type(Args, ViewName, Rest)
+            end
+    end.
+
+
+set_extra(#mrargs{} = Args, Key, Value) ->
+    Extra0 = Args#mrargs.extra,
+    Extra1 = lists:ukeysort(1, [{Key, Value} | Extra0]),
+    Args#mrargs{extra = Extra1}.
+
+
+extract_view(_Lang, _Args, _ViewName, []) ->
+    throw({not_found, missing_named_view});
+
+extract_view(Lang, #mrargs{view_type=map}=Args, Name, [View | Rest]) ->
+    Names = View#mrview.map_names ++ [N || {N, _} <- View#mrview.reduce_funs],
+    case lists:member(Name, Names) of
+        true -> {map, View, Args};
+        _ -> extract_view(Lang, Args, Name, Rest)
+    end;
+
+extract_view(Lang, #mrargs{view_type=red}=Args, Name, [View | Rest]) ->
+    RedNames = [N || {N, _} <- View#mrview.reduce_funs],
+    case lists:member(Name, RedNames) of
+        true -> {red, {index_of(Name, RedNames), Lang, View}, Args};
+        false -> extract_view(Lang, Args, Name, Rest)
+    end.
+
+
 collate_fun(View) ->
     #mrview{
         options = Options
@@ -122,7 +169,7 @@ validate_args(Args) ->
     validate_args(Args, []).
 
 
-% This is mostly a copy of couch_mrview_util:validate_args/1 but it doesn't
+% This is mostly a copy of couch_validate:validate_args/1 but it doesn't
 % update start / end keys and also throws a not_implemented error for reduce
 %
 validate_args(#mrargs{} = Args, Opts) ->
@@ -366,3 +413,53 @@ convert_seq_to_stamp(Seq) ->
     VS = integer_to_list(Stamp) ++ "-" ++ integer_to_list(Batch) ++ "-"
             ++ integer_to_list(DocNumber),
     list_to_binary(VS).
+
+
+get_view_queries({Props}) ->
+    case couch_util:get_value(<<"queries">>, Props) of
+        undefined ->
+            undefined;
+        Queries when is_list(Queries) ->
+            Queries;
+        _ ->
+            throw({bad_request, "`queries` member must be an array."})
+    end.
+
+
+get_view_keys({Props}) ->
+    case couch_util:get_value(<<"keys">>, Props) of
+        undefined ->
+            undefined;
+        Keys when is_list(Keys) ->
+            Keys;
+        _ ->
+            throw({bad_request, "`keys` member must be an array."})
+    end.
+
+
+sort_lib({Lib}) ->
+    sort_lib(Lib, []).
+
+sort_lib([], LAcc) ->
+    lists:keysort(1, LAcc);
+
+sort_lib([{LName, {LObj}}|Rest], LAcc) ->
+    LSorted = sort_lib(LObj, []), % descend into nested object
+    sort_lib(Rest, [{LName, LSorted}|LAcc]);
+
+sort_lib([{LName, LCode}|Rest], LAcc) ->
+    sort_lib(Rest, [{LName, LCode}|LAcc]).
+
+
+index_of(Key, List) ->
+    index_of(Key, List, 1).
+
+
+index_of(_, [], _) ->
+    throw({error, missing_named_view});
+
+index_of(Key, [Key | _], Idx) ->
+    Idx;
+
+index_of(Key, [_ | Rest], Idx) ->
+    index_of(Key, Rest, Idx+1).
diff --git a/src/couch_views/src/couch_views_validate.erl b/src/couch_views/src/couch_views_validate.erl
new file mode 100644
index 0000000..558f65d
--- /dev/null
+++ b/src/couch_views/src/couch_views_validate.erl
@@ -0,0 +1,460 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_validate).
+
+
+-export([
+    validate_args/1,
+    validate_args/3,
+    validate_ddoc/2
+]).
+
+
+-define(LOWEST_KEY, null).
+-define(HIGHEST_KEY, {<<255, 255, 255, 255>>}).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include("couch_views.hrl").
+
+
+% There is another almost identical validate_args in couch_views_util. They
+% should probably be merged at some point in the future.
+%
+validate_args(Args) ->
+    GroupLevel = determine_group_level(Args),
+    Reduce = Args#mrargs.reduce,
+    case Reduce == undefined orelse is_boolean(Reduce) of
+        true -> ok;
+        _ -> mrverror(<<"Invalid `reduce` value.">>)
+    end,
+
+    case {Args#mrargs.view_type, Reduce} of
+        {map, true} -> mrverror(<<"Reduce is invalid for map-only views.">>);
+        _ -> ok
+    end,
+
+    case {Args#mrargs.view_type, GroupLevel, Args#mrargs.keys} of
+        {red, exact, _} -> ok;
+        {red, _, KeyList} when is_list(KeyList) ->
+            Msg = <<"Multi-key fetches for reduce views must use `group=true`">>,
+            mrverror(Msg);
+        _ -> ok
+    end,
+
+    case Args#mrargs.keys of
+        Keys when is_list(Keys) -> ok;
+        undefined -> ok;
+        _ -> mrverror(<<"`keys` must be an array of strings.">>)
+    end,
+
+    case {Args#mrargs.keys, Args#mrargs.start_key,
+          Args#mrargs.end_key} of
+        {undefined, _, _} -> ok;
+        {[], _, _} -> ok;
+        {[_|_], undefined, undefined} -> ok;
+        _ -> mrverror(<<"`keys` is incompatible with `key`"
+                        ", `start_key` and `end_key`">>)
+    end,
+
+    case Args#mrargs.start_key_docid of
+        undefined -> ok;
+        SKDocId0 when is_binary(SKDocId0) -> ok;
+        _ -> mrverror(<<"`start_key_docid` must be a string.">>)
+    end,
+
+    case Args#mrargs.end_key_docid of
+        undefined -> ok;
+        EKDocId0 when is_binary(EKDocId0) -> ok;
+        _ -> mrverror(<<"`end_key_docid` must be a string.">>)
+    end,
+
+    case Args#mrargs.direction of
+        fwd -> ok;
+        rev -> ok;
+        _ -> mrverror(<<"Invalid direction.">>)
+    end,
+
+    case {Args#mrargs.limit >= 0, Args#mrargs.limit == undefined} of
+        {true, _} -> ok;
+        {_, true} -> ok;
+        _ -> mrverror(<<"`limit` must be a positive integer.">>)
+    end,
+
+    case Args#mrargs.skip < 0 of
+        true -> mrverror(<<"`skip` must be >= 0">>);
+        _ -> ok
+    end,
+
+    case {Args#mrargs.view_type, GroupLevel} of
+        {red, exact} -> ok;
+        {_, 0} -> ok;
+        {red, Int} when is_integer(Int), Int >= 0 -> ok;
+        {red, _} -> mrverror(<<"`group_level` must be >= 0">>);
+        {map, _} -> mrverror(<<"Invalid use of grouping on a map view.">>)
+    end,
+
+    case Args#mrargs.stable of
+        true -> ok;
+        false -> ok;
+        _ -> mrverror(<<"Invalid value for `stable`.">>)
+    end,
+
+    case Args#mrargs.update of
+        true -> ok;
+        false -> ok;
+        lazy -> ok;
+        _ -> mrverror(<<"Invalid value for `update`.">>)
+    end,
+
+    case is_boolean(Args#mrargs.inclusive_end) of
+        true -> ok;
+        _ -> mrverror(<<"Invalid value for `inclusive_end`.">>)
+    end,
+
+    case {Args#mrargs.view_type, Args#mrargs.include_docs} of
+        {red, true} -> mrverror(<<"`include_docs` is invalid for reduce">>);
+        {_, ID} when is_boolean(ID) -> ok;
+        _ -> mrverror(<<"Invalid value for `include_docs`">>)
+    end,
+
+    case {Args#mrargs.view_type, Args#mrargs.conflicts} of
+        {_, undefined} -> ok;
+        {map, V} when is_boolean(V) -> ok;
+        {red, undefined} -> ok;
+        {map, _} -> mrverror(<<"Invalid value for `conflicts`.">>);
+        {red, _} -> mrverror(<<"`conflicts` is invalid for reduce views.">>)
+    end,
+
+    SKDocId = case {Args#mrargs.direction, Args#mrargs.start_key_docid} of
+        {fwd, undefined} -> <<>>;
+        {rev, undefined} -> <<255>>;
+        {_, SKDocId1} -> SKDocId1
+    end,
+
+    EKDocId = case {Args#mrargs.direction, Args#mrargs.end_key_docid} of
+        {fwd, undefined} -> <<255>>;
+        {rev, undefined} -> <<>>;
+        {_, EKDocId1} -> EKDocId1
+    end,
+
+    case is_boolean(Args#mrargs.sorted) of
+        true -> ok;
+        _ -> mrverror(<<"Invalid value for `sorted`.">>)
+    end,
+
+    Args#mrargs{
+        start_key_docid=SKDocId,
+        end_key_docid=EKDocId,
+        group_level=GroupLevel
+    }.
+
+
+validate_args(Db, DDoc, Args0) ->
+    {ok, State} = couch_views_util:ddoc_to_mrst(fabric2_db:name(Db), DDoc),
+    Args1 = apply_limit(State#mrst.partitioned, Args0),
+    validate_args(State, Args1).
+
+
+validate_ddoc(#{} = Db, DDoc) ->
+    DbName = fabric2_db:name(Db),
+    IsPartitioned = fabric2_db:is_partitioned(Db),
+    validate_ddoc(DbName, IsPartitioned, DDoc).
+
+
+% Private functions
+
+validate_ddoc(DbName, _IsDbPartitioned,  DDoc) ->
+    ok = validate_ddoc_fields(DDoc#doc.body),
+    GetName = fun
+        (#mrview{map_names = [Name | _]}) -> Name;
+        (#mrview{reduce_funs = [{Name, _} | _]}) -> Name;
+        (_) -> null
+    end,
+    ValidateView = fun(Ctx, #mrview{def=MapSrc, reduce_funs=Reds}=View) ->
+        couch_eval:try_compile(Ctx, map, GetName(View), MapSrc),
+        lists:foreach(fun
+            ({_RedName, <<"_sum", _/binary>>}) ->
+                ok;
+            ({_RedName, <<"_count", _/binary>>}) ->
+                ok;
+            ({_RedName, <<"_stats", _/binary>>}) ->
+                ok;
+            ({_RedName, <<"_approx_count_distinct", _/binary>>}) ->
+                ok;
+            ({_RedName, <<"_", _/binary>> = Bad}) ->
+                Msg = ["`", Bad, "` is not a supported reduce function."],
+                throw({invalid_design_doc, Msg});
+            ({RedName, RedSrc}) ->
+                couch_eval:try_compile(Ctx, reduce, RedName, RedSrc)
+        end, Reds)
+    end,
+    {ok, #mrst{
+        language = Lang,
+        views = Views
+    }} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+
+    Views =/= [] andalso couch_eval:with_context(#{language => Lang}, fun (Ctx) ->
+        lists:foreach(fun(V) -> ValidateView(Ctx, V) end, Views)
+    end),
+    ok.
+
+
+validate_args(#mrst{} = State, Args0) ->
+    Args = validate_args(Args0),
+
+    ViewPartitioned = State#mrst.partitioned,
+    Partition = get_extra(Args, partition),
+
+    case {ViewPartitioned, Partition} of
+        {true, undefined} ->
+            Msg1 = <<"`partition` parameter is mandatory "
+                    "for queries to this view.">>,
+            mrverror(Msg1);
+        {true, _} ->
+            apply_partition(Args, Partition);
+        {false, undefined} ->
+            Args;
+        {false, Value} when is_binary(Value) ->
+            Msg2 = <<"`partition` parameter is not "
+                    "supported in this design doc">>,
+            mrverror(Msg2)
+    end.
+
+
+validate_ddoc_fields(DDoc) ->
+    MapFuncType = map_function_type(DDoc),
+    lists:foreach(fun(Path) ->
+        validate_ddoc_fields(DDoc, Path)
+    end, [
+        [{<<"filters">>, object}, {any, [object, string]}],
+        [{<<"language">>, string}],
+        [{<<"lists">>, object}, {any, [object, string]}],
+        [{<<"options">>, object}],
+        [{<<"options">>, object}, {<<"include_design">>, boolean}],
+        [{<<"options">>, object}, {<<"local_seq">>, boolean}],
+        [{<<"options">>, object}, {<<"partitioned">>, boolean}],
+        [{<<"rewrites">>, [string, array]}],
+        [{<<"shows">>, object}, {any, [object, string]}],
+        [{<<"updates">>, object}, {any, [object, string]}],
+        [{<<"validate_doc_update">>, string}],
+        [{<<"views">>, object}, {<<"lib">>, object}],
+        [{<<"views">>, object}, {any, object}, {<<"map">>, MapFuncType}],
+        [{<<"views">>, object}, {any, object}, {<<"reduce">>, string}]
+    ]),
+    require_map_function_for_views(DDoc),
+    ok.
+
+
+require_map_function_for_views({Props}) ->
+    case couch_util:get_value(<<"views">>, Props) of
+        undefined -> ok;
+        {Views} ->
+            lists:foreach(fun
+                ({<<"lib">>, _}) -> ok;
+                ({Key, {Value}}) ->
+                    case couch_util:get_value(<<"map">>, Value) of
+                        undefined -> throw({invalid_design_doc,
+                            <<"View `", Key/binary, "` must contain map function">>});
+                        _ -> ok
+                    end
+            end, Views),
+            ok
+    end.
+
+
+validate_ddoc_fields(DDoc, Path) ->
+    case validate_ddoc_fields(DDoc, Path, []) of
+        ok -> ok;
+        {error, {FailedPath0, Type0}} ->
+            FailedPath = iolist_to_binary(join(FailedPath0, <<".">>)),
+            Type = format_type(Type0),
+            throw({invalid_design_doc,
+                  <<"`", FailedPath/binary, "` field must have ",
+                     Type/binary, " type">>})
+    end.
+
+validate_ddoc_fields(undefined, _, _) ->
+    ok;
+
+validate_ddoc_fields(_, [], _) ->
+    ok;
+
+validate_ddoc_fields({KVS}=Props, [{any, Type} | Rest], Acc) ->
+    lists:foldl(fun
+        ({Key, _}, ok) ->
+            validate_ddoc_fields(Props, [{Key, Type} | Rest], Acc);
+        ({_, _}, {error, _}=Error) ->
+            Error
+    end, ok, KVS);
+
+validate_ddoc_fields({KVS}=Props, [{Key, Type} | Rest], Acc) ->
+    case validate_ddoc_field(Props, {Key, Type}) of
+        ok ->
+            validate_ddoc_fields(couch_util:get_value(Key, KVS),
+                                 Rest,
+                                 [Key | Acc]);
+        error ->
+            {error, {[Key | Acc], Type}};
+        {error, Key1} ->
+            {error, {[Key1 | Acc], Type}}
+    end.
+
+
+validate_ddoc_field(undefined, Type) when is_atom(Type) ->
+    ok;
+
+validate_ddoc_field(_, any) ->
+    ok;
+
+validate_ddoc_field(Value, Types) when is_list(Types) ->
+    lists:foldl(fun
+        (_, ok) -> ok;
+        (Type, _) -> validate_ddoc_field(Value, Type)
+    end, error, Types);
+validate_ddoc_field(Value, string) when is_binary(Value) ->
+    ok;
+
+validate_ddoc_field(Value, array) when is_list(Value) ->
+    ok;
+
+validate_ddoc_field({Value}, object) when is_list(Value) ->
+    ok;
+
+validate_ddoc_field(Value, boolean) when is_boolean(Value) ->
+    ok;
+
+validate_ddoc_field({Props}, {any, Type}) ->
+    validate_ddoc_field1(Props, Type);
+
+validate_ddoc_field({Props}, {Key, Type}) ->
+    validate_ddoc_field(couch_util:get_value(Key, Props), Type);
+
+validate_ddoc_field(_, _) ->
+    error.
+
+
+validate_ddoc_field1([], _) ->
+    ok;
+
+validate_ddoc_field1([{Key, Value} | Rest], Type) ->
+    case validate_ddoc_field(Value, Type) of
+        ok ->
+            validate_ddoc_field1(Rest, Type);
+        error ->
+            {error, Key}
+    end.
+
+
+map_function_type({Props}) ->
+    case couch_util:get_value(<<"language">>, Props) of
+        <<"query">> -> object;
+        _ -> string
+    end.
+
+
+format_type(Type) when is_atom(Type) ->
+    ?l2b(atom_to_list(Type));
+
+format_type(Types) when is_list(Types) ->
+    iolist_to_binary(join(lists:map(fun atom_to_list/1, Types), <<" or ">>)).
+
+
+join(L, Sep) ->
+    join(L, Sep, []).
+
+
+join([H|[]], _, Acc) ->
+    [H | Acc];
+
+join([H|T], Sep, Acc) ->
+    join(T, Sep, [Sep, H | Acc]).
+
+
+determine_group_level(#mrargs{group=undefined, group_level=undefined}) ->
+    0;
+
+determine_group_level(#mrargs{group=false, group_level=undefined}) ->
+    0;
+
+determine_group_level(#mrargs{group=false, group_level=Level}) when Level > 0 ->
+    mrverror(<<"Can't specify group=false and group_level>0 at the same time">>);
+
+determine_group_level(#mrargs{group=true, group_level=undefined}) ->
+    exact;
+
+determine_group_level(#mrargs{group_level=GroupLevel}) ->
+    GroupLevel.
+
+
+mrverror(Mesg) ->
+    throw({query_parse_error, Mesg}).
+
+
+apply_partition(#mrargs{keys=[{p, _, _} | _]} = Args, _Partition) ->
+    Args; % already applied
+
+apply_partition(#mrargs{keys=Keys} = Args, Partition) when Keys /= undefined ->
+    Args#mrargs{keys=[{p, Partition, K} || K <- Keys]};
+
+apply_partition(#mrargs{start_key={p, _, _}, end_key={p, _, _}} = Args, _Partition) ->
+    Args; % already applied.
+
+apply_partition(Args, Partition) ->
+    #mrargs{
+        direction = Dir,
+        start_key = StartKey,
+        end_key = EndKey
+    } = Args,
+
+    {DefSK, DefEK} = case Dir of
+        fwd -> {?LOWEST_KEY, ?HIGHEST_KEY};
+        rev -> {?HIGHEST_KEY, ?LOWEST_KEY}
+    end,
+
+    SK0 = if StartKey /= undefined -> StartKey; true -> DefSK end,
+    EK0 = if EndKey /= undefined -> EndKey; true -> DefEK end,
+
+    Args#mrargs{
+        start_key = {p, Partition, SK0},
+        end_key = {p, Partition, EK0}
+    }.
+
+
+get_extra(#mrargs{} = Args, Key) ->
+    couch_util:get_value(Key, Args#mrargs.extra).
+
+
+apply_limit(ViewPartitioned, Args) ->
+    Options = Args#mrargs.extra,
+    IgnorePQLimit = lists:keyfind(ignore_partition_query_limit, 1, Options),
+    LimitType = case {ViewPartitioned, IgnorePQLimit} of
+        {true, false} -> "partition_query_limit";
+        {true, _} -> "query_limit";
+        {false, _} -> "query_limit"
+    end,
+
+    MaxLimit = config:get_integer("query_server_config",
+        LimitType, ?MAX_VIEW_LIMIT),
+
+    % Set the highest limit possible if a user has not
+    % specified a limit
+    Args1 = case Args#mrargs.limit == ?MAX_VIEW_LIMIT of
+        true -> Args#mrargs{limit = MaxLimit};
+        false -> Args
+    end,
+
+    if Args1#mrargs.limit =< MaxLimit -> Args1; true ->
+        Fmt = "Limit is too large, must not exceed ~p",
+        mrverror(io_lib:format(Fmt, [MaxLimit]))
+    end.

[couchdb] 12/24: Remove rewrite support from couch_js and couch_query_servers

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 93cd8b5ecffadb7109320405ab623e044350bafe
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Apr 14 01:44:39 2021 -0400

    Remove rewrite support from couch_js and couch_query_servers
---
 src/couch/src/couch_query_servers.erl       | 79 ----------------------------
 src/couch_js/src/couch_js_query_servers.erl | 80 -----------------------------
 2 files changed, 159 deletions(-)

diff --git a/src/couch/src/couch_query_servers.erl b/src/couch/src/couch_query_servers.erl
index 5cafcb0..d252382 100644
--- a/src/couch/src/couch_query_servers.erl
+++ b/src/couch/src/couch_query_servers.erl
@@ -18,7 +18,6 @@
 -export([filter_docs/5]).
 -export([filter_view/3]).
 -export([finalize/2]).
--export([rewrite/3]).
 
 -export([with_ddoc_proc/2, proc_prompt/2, ddoc_prompt/3, ddoc_proc_prompt/3, json_doc/1]).
 
@@ -392,84 +391,6 @@ validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) ->
     end.
 
 
-rewrite(Req, Db, DDoc) ->
-    Fields = [F || F <- chttpd_external:json_req_obj_fields(),
-              F =/= <<"info">>, F =/= <<"form">>,
-              F =/= <<"uuid">>, F =/= <<"id">>],
-    JsonReq = chttpd_external:json_req_obj(Req, Db, null, Fields),
-    case couch_query_servers:ddoc_prompt(DDoc, [<<"rewrites">>], [JsonReq]) of
-        {[{<<"forbidden">>, Message}]} ->
-            throw({forbidden, Message});
-        {[{<<"unauthorized">>, Message}]} ->
-            throw({unauthorized, Message});
-        [<<"no_dispatch_rule">>] ->
-            undefined;
-        [<<"ok">>, {V}=Rewrite] when is_list(V) ->
-            ok = validate_rewrite_response(Rewrite),
-            Rewrite;
-        [<<"ok">>, _]  ->
-            throw_rewrite_error(<<"bad rewrite">>);
-        V ->
-            couch_log:error("bad rewrite return ~p", [V]),
-            throw({unknown_error, V})
-    end.
-
-validate_rewrite_response({Fields}) when is_list(Fields) ->
-    validate_rewrite_response_fields(Fields).
-
-validate_rewrite_response_fields([{Key, Value} | Rest]) ->
-    validate_rewrite_response_field(Key, Value),
-    validate_rewrite_response_fields(Rest);
-validate_rewrite_response_fields([]) ->
-    ok.
-
-validate_rewrite_response_field(<<"method">>, Method) when is_binary(Method) ->
-    ok;
-validate_rewrite_response_field(<<"method">>, _) ->
-    throw_rewrite_error(<<"bad method">>);
-validate_rewrite_response_field(<<"path">>, Path) when is_binary(Path) ->
-    ok;
-validate_rewrite_response_field(<<"path">>, _) ->
-    throw_rewrite_error(<<"bad path">>);
-validate_rewrite_response_field(<<"body">>, Body) when is_binary(Body) ->
-    ok;
-validate_rewrite_response_field(<<"body">>, _) ->
-    throw_rewrite_error(<<"bad body">>);
-validate_rewrite_response_field(<<"headers">>, {Props}=Headers) when is_list(Props) ->
-    validate_object_fields(Headers);
-validate_rewrite_response_field(<<"headers">>, _) ->
-    throw_rewrite_error(<<"bad headers">>);
-validate_rewrite_response_field(<<"query">>, {Props}=Query) when is_list(Props) ->
-    validate_object_fields(Query);
-validate_rewrite_response_field(<<"query">>, _) ->
-    throw_rewrite_error(<<"bad query">>);
-validate_rewrite_response_field(<<"code">>, Code) when is_integer(Code) andalso Code >= 200 andalso Code < 600 ->
-    ok;
-validate_rewrite_response_field(<<"code">>, _) ->
-    throw_rewrite_error(<<"bad code">>);
-validate_rewrite_response_field(K, V) ->
-    couch_log:debug("unknown rewrite field ~p=~p", [K, V]),
-    ok.
-
-validate_object_fields({Props}) when is_list(Props) ->
-    lists:foreach(fun
-        ({Key, Value}) when is_binary(Key) andalso is_binary(Value) ->
-            ok;
-        ({Key, Value}) ->
-            Reason = io_lib:format(
-                "object key/value must be strings ~p=~p", [Key, Value]),
-            throw_rewrite_error(Reason);
-        (Value) ->
-            throw_rewrite_error(io_lib:format("bad value ~p", [Value]))
-    end, Props).
-
-
-throw_rewrite_error(Reason) when is_list(Reason)->
-    throw_rewrite_error(iolist_to_binary(Reason));
-throw_rewrite_error(Reason) when is_binary(Reason) ->
-    throw({rewrite_error, Reason}).
-
-
 json_doc_options() ->
     json_doc_options([]).
 
diff --git a/src/couch_js/src/couch_js_query_servers.erl b/src/couch_js/src/couch_js_query_servers.erl
index 12dc864..80ceb3d 100644
--- a/src/couch_js/src/couch_js_query_servers.erl
+++ b/src/couch_js/src/couch_js_query_servers.erl
@@ -18,7 +18,6 @@
 -export([filter_docs/5]).
 -export([filter_view/3]).
 -export([finalize/2]).
--export([rewrite/3]).
 
 -export([with_ddoc_proc/2, proc_prompt/2, ddoc_prompt/3, ddoc_proc_prompt/3, json_doc/1]).
 
@@ -386,85 +385,6 @@ validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) ->
             throw({unknown_error, Message})
     end.
 
-
-rewrite(Req, Db, DDoc) ->
-    Fields = [F || F <- chttpd_external:json_req_obj_fields(),
-              F =/= <<"info">>, F =/= <<"form">>,
-              F =/= <<"uuid">>, F =/= <<"id">>],
-    JsonReq = chttpd_external:json_req_obj(Req, Db, null, Fields),
-    case ddoc_prompt(DDoc, [<<"rewrites">>], [JsonReq]) of
-        {[{<<"forbidden">>, Message}]} ->
-            throw({forbidden, Message});
-        {[{<<"unauthorized">>, Message}]} ->
-            throw({unauthorized, Message});
-        [<<"no_dispatch_rule">>] ->
-            undefined;
-        [<<"ok">>, {V}=Rewrite] when is_list(V) ->
-            ok = validate_rewrite_response(Rewrite),
-            Rewrite;
-        [<<"ok">>, _]  ->
-            throw_rewrite_error(<<"bad rewrite">>);
-        V ->
-            couch_log:error("bad rewrite return ~p", [V]),
-            throw({unknown_error, V})
-    end.
-
-validate_rewrite_response({Fields}) when is_list(Fields) ->
-    validate_rewrite_response_fields(Fields).
-
-validate_rewrite_response_fields([{Key, Value} | Rest]) ->
-    validate_rewrite_response_field(Key, Value),
-    validate_rewrite_response_fields(Rest);
-validate_rewrite_response_fields([]) ->
-    ok.
-
-validate_rewrite_response_field(<<"method">>, Method) when is_binary(Method) ->
-    ok;
-validate_rewrite_response_field(<<"method">>, _) ->
-    throw_rewrite_error(<<"bad method">>);
-validate_rewrite_response_field(<<"path">>, Path) when is_binary(Path) ->
-    ok;
-validate_rewrite_response_field(<<"path">>, _) ->
-    throw_rewrite_error(<<"bad path">>);
-validate_rewrite_response_field(<<"body">>, Body) when is_binary(Body) ->
-    ok;
-validate_rewrite_response_field(<<"body">>, _) ->
-    throw_rewrite_error(<<"bad body">>);
-validate_rewrite_response_field(<<"headers">>, {Props}=Headers) when is_list(Props) ->
-    validate_object_fields(Headers);
-validate_rewrite_response_field(<<"headers">>, _) ->
-    throw_rewrite_error(<<"bad headers">>);
-validate_rewrite_response_field(<<"query">>, {Props}=Query) when is_list(Props) ->
-    validate_object_fields(Query);
-validate_rewrite_response_field(<<"query">>, _) ->
-    throw_rewrite_error(<<"bad query">>);
-validate_rewrite_response_field(<<"code">>, Code) when is_integer(Code) andalso Code >= 200 andalso Code < 600 ->
-    ok;
-validate_rewrite_response_field(<<"code">>, _) ->
-    throw_rewrite_error(<<"bad code">>);
-validate_rewrite_response_field(K, V) ->
-    couch_log:debug("unknown rewrite field ~p=~p", [K, V]),
-    ok.
-
-validate_object_fields({Props}) when is_list(Props) ->
-    lists:foreach(fun
-        ({Key, Value}) when is_binary(Key) andalso is_binary(Value) ->
-            ok;
-        ({Key, Value}) ->
-            Reason = io_lib:format(
-                "object key/value must be strings ~p=~p", [Key, Value]),
-            throw_rewrite_error(Reason);
-        (Value) ->
-            throw_rewrite_error(io_lib:format("bad value ~p", [Value]))
-    end, Props).
-
-
-throw_rewrite_error(Reason) when is_list(Reason)->
-    throw_rewrite_error(iolist_to_binary(Reason));
-throw_rewrite_error(Reason) when is_binary(Reason) ->
-    throw({rewrite_error, Reason}).
-
-
 json_doc_options() ->
     json_doc_options([]).
 

[couchdb] 08/24: Update couch_flags to remove knowledge about shards

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit d3176243a2adfef16b450d59ff99e5cd5a3d141d
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Apr 14 00:24:56 2021 -0400

    Update couch_flags to remove knowledge about shards
    
    Remove shard handling from `couch_flags`.
    
    `couch_db:normalize_dbname/1` call is not necessary as db names are not shards
    and do not have the `.couch` extension any more.
---
 src/couch/src/couch_flags.erl | 16 ++++------------
 1 file changed, 4 insertions(+), 12 deletions(-)

diff --git a/src/couch/src/couch_flags.erl b/src/couch/src/couch_flags.erl
index 5cfe7f6..5bd133e 100644
--- a/src/couch/src/couch_flags.erl
+++ b/src/couch/src/couch_flags.erl
@@ -61,14 +61,10 @@
 ]).
 
 -include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
--include("couch_db_int.hrl").
 
 -type subject()
-    :: #db{}
+    :: map()
         | #httpd{}
-        | #shard{}
-        | #ordered_shard{}
         | string()
         | binary().
 
@@ -80,7 +76,7 @@ enabled(Subject) ->
     Key = maybe_handle(subject_key, [Subject], fun subject_key/1),
     Handle = couch_epi:get_handle({flags, config}),
     lists:usort(enabled(Handle, {<<"/", Key/binary>>})
-        ++ enabled(Handle, {couch_db:normalize_dbname(Key)})).
+        ++ enabled(Handle, {Key})).
 
 -spec is_enabled(FlagId :: atom(), subject()) -> boolean().
 
@@ -104,16 +100,12 @@ enabled(Handle, Key) ->
 
 -spec subject_key(subject()) -> binary().
 
-subject_key(#db{name = Name}) ->
-    subject_key(Name);
+subject_key(#{} = Db) ->
+    subject_key(fabric2_db:name(Db));
 subject_key(#httpd{path_parts=[Name | _Rest]}) ->
     subject_key(Name);
 subject_key(#httpd{path_parts=[]}) ->
     <<>>;
-subject_key(#shard{name = Name}) ->
-    subject_key(Name);
-subject_key(#ordered_shard{name = Name}) ->
-    subject_key(Name);
 subject_key(Name) when is_list(Name) ->
     subject_key(list_to_binary(Name));
 subject_key(Name) when is_binary(Name) ->

[couchdb] 05/24: Remove commented out tests from couch_att

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 41fa9a7ad84e9e6f92e873f7252e95c387416eca
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Apr 14 00:10:08 2021 -0400

    Remove commented out tests from couch_att
    
    Many of those tests check attachemnt upgrades from older 1-3.x formats, check
    the `follows` functionality which is not supported in FDB (we don't stream
    attachments but buffer them outside transactions), or import couch_bt_engine.
---
 src/couch/src/couch_att.erl | 189 --------------------------------------------
 1 file changed, 189 deletions(-)

diff --git a/src/couch/src/couch_att.erl b/src/couch/src/couch_att.erl
index b4c95e9..9009b52 100644
--- a/src/couch/src/couch_att.erl
+++ b/src/couch/src/couch_att.erl
@@ -690,192 +690,3 @@ validate_attachment_size(AttName, AttSize, MaxAttSize)
     throw({request_entity_too_large, {attachment, AttName}});
 validate_attachment_size(_AttName, _AttSize, _MAxAttSize) ->
     ok.
-
-
-%% -ifdef(TEST).
-%% -include_lib("eunit/include/eunit.hrl").
-%%
-%% % Eww...
-%% -include("couch_bt_engine.hrl").
-%%
-%% %% Test utilities
-%%
-%%
-%% empty_att() -> new().
-%%
-%%
-%% upgraded_empty_att() ->
-%%     new([{headers, undefined}]).
-%%
-%%
-%% %% Test groups
-%%
-%%
-%% attachment_upgrade_test_() ->
-%%     {"Lazy record upgrade tests", [
-%%         {"Existing record fields don't upgrade",
-%%             {with, empty_att(), [fun test_non_upgrading_fields/1]}
-%%         },
-%%         {"New fields upgrade",
-%%             {with, empty_att(), [fun test_upgrading_fields/1]}
-%%         }
-%%     ]}.
-%%
-%%
-%% attachment_defaults_test_() ->
-%%     {"Attachment defaults tests", [
-%%         {"Records retain old default values", [
-%%             {with, empty_att(), [fun test_legacy_defaults/1]}
-%%         ]},
-%%         {"Upgraded records inherit defaults", [
-%%             {with, upgraded_empty_att(), [fun test_legacy_defaults/1]}
-%%         ]},
-%%         {"Undefined entries are elided on upgrade", [
-%%             {with, upgraded_empty_att(), [fun test_elided_entries/1]}
-%%         ]}
-%%     ]}.
-%%
-%% attachment_field_api_test_() ->
-%%     {"Basic attachment field api", [
-%%         fun test_construction/0,
-%%         fun test_store_and_fetch/0,
-%%         fun test_transform/0
-%%     ]}.
-%%
-%%
-%% attachment_disk_term_test_() ->
-%%     BaseAttachment = new([
-%%         {name, <<"empty">>},
-%%         {type, <<"application/octet-stream">>},
-%%         {att_len, 0},
-%%         {disk_len, 0},
-%%         {md5, <<212,29,140,217,143,0,178,4,233,128,9,152,236,248,66,126>>},
-%%         {revpos, 4},
-%%         {data, {stream, {couch_bt_engine_stream, {fake_fd, fake_sp}}}},
-%%         {encoding, identity}
-%%     ]),
-%%     BaseDiskTerm = {
-%%         <<"empty">>,
-%%         <<"application/octet-stream">>,
-%%         fake_sp,
-%%         0, 0, 4,
-%%         <<212,29,140,217,143,0,178,4,233,128,9,152,236,248,66,126>>,
-%%         identity
-%%     },
-%%     Headers = [{<<"X-Foo">>, <<"bar">>}],
-%%     ExtendedAttachment = store(headers, Headers, BaseAttachment),
-%%     ExtendedDiskTerm = {BaseDiskTerm, [{headers, Headers}]},
-%%     FakeDb = test_util:fake_db([{engine, {couch_bt_engine, #st{fd=fake_fd}}}]),
-%%     {"Disk term tests", [
-%%         ?_assertEqual(BaseDiskTerm, to_disk_term(BaseAttachment)),
-%%         ?_assertEqual(BaseAttachment, from_disk_term(FakeDb, BaseDiskTerm)),
-%%         ?_assertEqual(ExtendedDiskTerm, to_disk_term(ExtendedAttachment)),
-%%         ?_assertEqual(ExtendedAttachment, from_disk_term(FakeDb, ExtendedDiskTerm))
-%%     ]}.
-%%
-%%
-%% attachment_json_term_test_() ->
-%%     Props = [
-%%         {<<"content_type">>, <<"application/json">>},
-%%         {<<"digest">>, <<"md5-QCNtWUNXV0UzJnEjMk92YUk1JA==">>},
-%%         {<<"length">>, 14},
-%%         {<<"revpos">>, 1}
-%%     ],
-%%     PropsInline = [{<<"data">>, <<"eyJhbnN3ZXIiOiA0Mn0=">>}] ++ Props,
-%%     InvalidProps = [{<<"data">>, <<"!Base64Encoded$">>}] ++ Props,
-%%     Att = couch_att:new([
-%%         {name, <<"attachment.json">>},
-%%         {type, <<"application/json">>}
-%%     ]),
-%%     ResultStub = couch_att:new([
-%%         {name, <<"attachment.json">>},
-%%         {type, <<"application/json">>},
-%%         {att_len, 14},
-%%         {disk_len, 14},
-%%         {md5, <<"@#mYCWWE3&q#2OvaI5$">>},
-%%         {revpos, 1},
-%%         {data, stub},
-%%         {encoding, identity}
-%%     ]),
-%%     ResultFollows = ResultStub#att{data = follows},
-%%     ResultInline = ResultStub#att{md5 = <<>>, data = <<"{\"answer\": 42}">>},
-%%     {"JSON term tests", [
-%%         ?_assertEqual(ResultStub, stub_from_json(Att, Props)),
-%%         ?_assertEqual(ResultFollows, follow_from_json(Att, Props)),
-%%         ?_assertEqual(ResultInline, inline_from_json(Att, PropsInline)),
-%%         ?_assertThrow({bad_request, _}, inline_from_json(Att, Props)),
-%%         ?_assertThrow({bad_request, _}, inline_from_json(Att, InvalidProps))
-%%     ]}.
-%%
-%%
-%% attachment_stub_merge_test_() ->
-%%     %% Stub merging needs to demonstrate revpos matching, skipping, and missing
-%%     %% attachment errors.
-%%     {"Attachment stub merging tests", []}.
-%%
-%%
-%% %% Test generators
-%%
-%%
-%% test_non_upgrading_fields(Attachment) ->
-%%     Pairs = [
-%%         {name, "cat.gif"},
-%%         {type, "text/very-very-plain"},
-%%         {att_len, 1024},
-%%         {disk_len, 42},
-%%         {md5, <<"md5-hashhashhash">>},
-%%         {revpos, 4},
-%%         {data, stub},
-%%         {encoding, gzip}
-%%     ],
-%%     lists:foreach(
-%%         fun({Field, Value}) ->
-%%             ?assertMatch(#att{}, Attachment),
-%%             Updated = store(Field, Value, Attachment),
-%%             ?assertMatch(#att{}, Updated)
-%%         end,
-%%     Pairs).
-%%
-%%
-%% test_upgrading_fields(Attachment) ->
-%%     ?assertMatch(#att{}, Attachment),
-%%     UpdatedHeaders = store(headers, [{<<"Ans">>, <<"42">>}], Attachment),
-%%     ?assertMatch(X when is_list(X), UpdatedHeaders),
-%%     UpdatedHeadersUndefined = store(headers, undefined, Attachment),
-%%     ?assertMatch(X when is_list(X), UpdatedHeadersUndefined).
-%%
-%%
-%% test_legacy_defaults(Attachment) ->
-%%     ?assertEqual(<<>>, fetch(md5, Attachment)),
-%%     ?assertEqual(0, fetch(revpos, Attachment)),
-%%     ?assertEqual(identity, fetch(encoding, Attachment)).
-%%
-%%
-%% test_elided_entries(Attachment) ->
-%%     ?assertNot(lists:keymember(name, 1, Attachment)),
-%%     ?assertNot(lists:keymember(type, 1, Attachment)),
-%%     ?assertNot(lists:keymember(att_len, 1, Attachment)),
-%%     ?assertNot(lists:keymember(disk_len, 1, Attachment)),
-%%     ?assertNot(lists:keymember(data, 1, Attachment)).
-%%
-%%
-%% test_construction() ->
-%%     ?assert(new() == new()),
-%%     Initialized = new([{name, <<"foo.bar">>}, {type, <<"application/qux">>}]),
-%%     ?assertEqual(<<"foo.bar">>, fetch(name, Initialized)),
-%%     ?assertEqual(<<"application/qux">>, fetch(type, Initialized)).
-%%
-%%
-%% test_store_and_fetch() ->
-%%     Attachment = empty_att(),
-%%     ?assertEqual(<<"abc">>, fetch(name, store(name, <<"abc">>, Attachment))),
-%%     ?assertEqual(42, fetch(ans, store(ans, 42, Attachment))).
-%%
-%%
-%% test_transform() ->
-%%     Attachment = new([{counter, 0}]),
-%%     Transformed = transform(counter, fun(Count) -> Count + 1 end, Attachment),
-%%     ?assertEqual(1, fetch(counter, Transformed)).
-%%
-%%
-%% -endif.

[couchdb] 13/24: Update couch_util to remove couch_db and mem3 calls

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 9ac2ae5bd15954ee389d9bd556cde1be9e0b6694
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Apr 14 01:45:51 2021 -0400

    Update couch_util to remove couch_db and mem3 calls
    
    Also remove the with_db/2 function as it's not used any longer
---
 src/couch/src/couch_util.erl | 26 +++-----------------------
 1 file changed, 3 insertions(+), 23 deletions(-)

diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl
index 8d64339..af7b7ff 100644
--- a/src/couch/src/couch_util.erl
+++ b/src/couch/src/couch_util.erl
@@ -28,7 +28,6 @@
 -export([url_strip_password/1]).
 -export([encode_doc_id/1]).
 -export([normalize_ddoc_id/1]).
--export([with_db/2]).
 -export([rfc1123_date/0, rfc1123_date/1]).
 -export([integer_to_boolean/1, boolean_to_integer/1]).
 -export([validate_positive_int/1]).
@@ -260,9 +259,9 @@ json_apply_field({Key, NewValue}, [], Acc) ->
     {[{Key, NewValue}|Acc]}.
 
 json_user_ctx(Db) ->
-    ShardName = couch_db:name(Db),
-    Ctx = couch_db:get_user_ctx(Db),
-    {[{<<"db">>, mem3:dbname(ShardName)},
+    #{name := DbName} = Db,
+    Ctx = fabric2_db:get_user_ctx(Db),
+    {[{<<"db">>, DbName},
             {<<"name">>,Ctx#user_ctx.name},
             {<<"roles">>,Ctx#user_ctx.roles}]}.
 
@@ -565,25 +564,6 @@ normalize_ddoc_id(<<"_design/", _/binary>> = DDocId) ->
 normalize_ddoc_id(DDocId) when is_binary(DDocId) ->
     <<"_design/", DDocId/binary>>.
 
-with_db(DbName, Fun)  when is_binary(DbName) ->
-    case couch_db:open_int(DbName, [?ADMIN_CTX]) of
-        {ok, Db} ->
-            try
-                Fun(Db)
-            after
-                catch couch_db:close(Db)
-            end;
-        Else ->
-            throw(Else)
-    end;
-with_db(Db, Fun) ->
-    case couch_db:is_db(Db) of
-        true ->
-            Fun(Db);
-        false ->
-            erlang:error({invalid_db, Db})
-    end.
-
 rfc1123_date() ->
     {{YYYY,MM,DD},{Hour,Min,Sec}} = calendar:universal_time(),
     DayNumber = calendar:day_of_the_week({YYYY,MM,DD}),

[couchdb] 09/24: Clean up couch_doc

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit e31ae8d99173bc95801a06903764980b8c6ae7a0
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Apr 14 00:27:07 2021 -0400

    Clean up couch_doc
    
    The main change is to remove `validate_docid/1,2` and use
    `fabric2_db:validate_docid/1` instead.
    
    `with_ejson_body` is also not needed as request bodies are parsed
    to ejson and fabric2_fdb also deserializes to ejson.
---
 src/chttpd/src/chttpd_show.erl    |  2 +-
 src/couch/src/couch_doc.erl       | 59 ++-------------------------------------
 src/couch/src/couch_partition.erl |  2 +-
 3 files changed, 5 insertions(+), 58 deletions(-)

diff --git a/src/chttpd/src/chttpd_show.erl b/src/chttpd/src/chttpd_show.erl
index 295d753..b17309a 100644
--- a/src/chttpd/src/chttpd_show.erl
+++ b/src/chttpd/src/chttpd_show.erl
@@ -65,7 +65,7 @@ send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId) ->
                 Options = [{user_ctx, Req#httpd.user_ctx}]
             end,
             NewDoc = couch_db:doc_from_json_obj_validate(Db, {NewJsonDoc}),
-            couch_doc:validate_docid(NewDoc#doc.id),
+            fabric2_db:validate_docid(NewDoc#doc.id),
             {UpdateResult, NewRev} = fabric:update_doc(Db, NewDoc, Options),
             chttpd_stats:incr_writes(),
             NewRevStr = couch_doc:rev_to_str(NewRev),
diff --git a/src/couch/src/couch_doc.erl b/src/couch/src/couch_doc.erl
index 7224921..4d0a13d 100644
--- a/src/couch/src/couch_doc.erl
+++ b/src/couch/src/couch_doc.erl
@@ -16,14 +16,13 @@
 -export([from_json_obj/1, from_json_obj_validate/1]).
 -export([from_json_obj/2, from_json_obj_validate/2]).
 -export([to_json_obj/2, has_stubs/1, merge_stubs/2]).
--export([validate_docid/1, validate_docid/2, get_validate_doc_fun/1]).
+-export([get_validate_doc_fun/1]).
 -export([doc_from_multi_part_stream/2, doc_from_multi_part_stream/3]).
 -export([doc_from_multi_part_stream/4]).
 -export([doc_to_multi_part_stream/5, len_doc_to_multi_part_stream/4]).
 -export([restart_open_doc_revs/3]).
 -export([to_path/1]).
 
--export([with_ejson_body/1]).
 -export([is_deleted/1]).
 
 
@@ -115,7 +114,7 @@ to_json_attachments(Atts, OutputData, Follows, ShowEnc) ->
     [{<<"_attachments">>, {Props}}].
 
 to_json_obj(Doc, Options) ->
-    doc_to_json_obj(with_ejson_body(Doc), Options).
+    doc_to_json_obj(Doc, Options).
 
 doc_to_json_obj(#doc{id=Id,deleted=Del,body=Body,revs={Start, RevIds},
             meta=Meta}=Doc,Options)->
@@ -198,58 +197,12 @@ parse_revs(_) ->
     throw({bad_request, "Invalid list of revisions"}).
 
 
-validate_docid(DocId, DbName) ->
-    case DbName =:= ?l2b(config:get("mem3", "shards_db", "_dbs")) andalso
-        couch_db:is_system_db_name(DocId) of
-        true ->
-            ok;
-        false ->
-            validate_docid(DocId)
-    end.
-
-validate_docid(<<"">>) ->
-    throw({illegal_docid, <<"Document id must not be empty">>});
-validate_docid(<<"_design/">>) ->
-    throw({illegal_docid, <<"Illegal document id `_design/`">>});
-validate_docid(<<"_local/">>) ->
-    throw({illegal_docid, <<"Illegal document id `_local/`">>});
-validate_docid(Id) when is_binary(Id) ->
-    MaxLen = case config:get("couchdb", "max_document_id_length", "infinity") of
-        "infinity" -> infinity;
-        IntegerVal -> list_to_integer(IntegerVal)
-    end,
-    case MaxLen > 0 andalso byte_size(Id) > MaxLen of
-        true -> throw({illegal_docid, <<"Document id is too long">>});
-        false -> ok
-    end,
-    case couch_util:validate_utf8(Id) of
-        false -> throw({illegal_docid, <<"Document id must be valid UTF-8">>});
-        true -> ok
-    end,
-    case Id of
-    <<"_design/", _/binary>> -> ok;
-    <<"_local/", _/binary>> -> ok;
-    <<"_", _/binary>> ->
-        case couch_db_plugin:validate_docid(Id) of
-            true ->
-                ok;
-            false ->
-                throw(
-                  {illegal_docid,
-                   <<"Only reserved document ids may start with underscore.">>})
-        end;
-    _Else -> ok
-    end;
-validate_docid(Id) ->
-    couch_log:debug("Document id is not a string: ~p", [Id]),
-    throw({illegal_docid, <<"Document id must be a string">>}).
-
 transfer_fields([], #doc{body=Fields}=Doc, _) ->
     % convert fields back to json object
     Doc#doc{body={lists:reverse(Fields)}};
 
 transfer_fields([{<<"_id">>, Id} | Rest], Doc, DbName) ->
-    validate_docid(Id, DbName),
+    fabric2_db:validate_docid(Id),
     transfer_fields(Rest, Doc#doc{id=Id}, DbName);
 
 transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs={0, []}}=Doc, DbName) ->
@@ -518,9 +471,3 @@ flush_parser_messages(Ref) ->
     after 0 ->
         ok
     end.
-
-
-with_ejson_body(#doc{body = Body} = Doc) when is_binary(Body) ->
-    Doc#doc{body = couch_compress:decompress(Body)};
-with_ejson_body(#doc{body = {_}} = Doc) ->
-    Doc.
diff --git a/src/couch/src/couch_partition.erl b/src/couch/src/couch_partition.erl
index f2efcaa..cb78323 100644
--- a/src/couch/src/couch_partition.erl
+++ b/src/couch/src/couch_partition.erl
@@ -122,7 +122,7 @@ validate_docid(DocId) when is_binary(DocId) ->
             throw({illegal_docid, <<"Doc id must be of form partition:id">>});
         {Partition, PartitionedDocId} ->
             validate_partition(Partition),
-            couch_doc:validate_docid(PartitionedDocId)
+            fabric2_db:validate_docid(PartitionedDocId)
     end.
 
 

[couchdb] 20/24: Clean up couch_auth_cache

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit c0dba424887633425a1c0563979ccacb93231794
Author: Nick Vatamaniuc <va...@apache.org>
AuthorDate: Thu Apr 15 01:07:55 2021 -0400

    Clean up couch_auth_cache
    
    couch_auth_cache only handles reading server admin credentials from config files and returns the auth design doc (used in chttpd_auth_cache).
    
    Node local `_user` docs logic has been removed. Validation to check
    for _conflicts is also not needed as the "docs" proplists created from
    the config server admin section don't have conflicts.
---
 src/couch/src/couch_auth_cache.erl | 93 +-------------------------------------
 1 file changed, 2 insertions(+), 91 deletions(-)

diff --git a/src/couch/src/couch_auth_cache.erl b/src/couch/src/couch_auth_cache.erl
index c564cee..919d561 100644
--- a/src/couch/src/couch_auth_cache.erl
+++ b/src/couch/src/couch_auth_cache.erl
@@ -16,11 +16,9 @@
 -export([
     get_user_creds/1,
     get_user_creds/2,
-    update_user_creds/3,
     get_admin/1,
     add_roles/2,
-    auth_design_doc/1,
-    ensure_users_db_exists/0
+    auth_design_doc/1
 ]).
 
 
@@ -41,25 +39,7 @@ get_user_creds(Req, UserName) when is_list(UserName) ->
     get_user_creds(Req, ?l2b(UserName));
 
 get_user_creds(_Req, UserName) ->
-    UserCreds = case get_admin(UserName) of
-    nil ->
-        get_from_db(UserName);
-    Props ->
-        case get_from_db(UserName) of
-        nil ->
-            Props;
-        UserProps when is_list(UserProps) ->
-            add_roles(Props, couch_util:get_value(<<"roles">>, UserProps))
-        end
-    end,
-    validate_user_creds(UserCreds).
-
-update_user_creds(_Req, UserDoc, _AuthCtx) ->
-    ok = ensure_users_db_exists(),
-    couch_util:with_db(users_db(), fun(UserDb) ->
-        {ok, _NewRev} = couch_db:update_doc(UserDb, UserDoc, []),
-        ok
-    end).
+    get_admin(UserName).
 
 add_roles(Props, ExtraRoles) ->
     CurrentRoles = couch_util:get_value(<<"roles">>, Props),
@@ -94,75 +74,6 @@ make_admin_doc(DerivedKey, Salt, Iterations) ->
      {<<"password_scheme">>, <<"pbkdf2">>},
      {<<"derived_key">>, ?l2b(DerivedKey)}].
 
-
-get_from_db(UserName) ->
-    ok = ensure_users_db_exists(),
-    couch_util:with_db(users_db(), fun(Db) ->
-        DocId = <<"org.couchdb.user:", UserName/binary>>,
-        try
-            {ok, Doc} = couch_db:open_doc(Db, DocId, [conflicts]),
-            {DocProps} = couch_doc:to_json_obj(Doc, []),
-            DocProps
-        catch
-        _:_Error ->
-            nil
-        end
-    end).
-
-
-validate_user_creds(nil) ->
-    nil;
-validate_user_creds(UserCreds) ->
-    case couch_util:get_value(<<"_conflicts">>, UserCreds) of
-    undefined ->
-        ok;
-    _ConflictList ->
-        throw({unauthorized,
-            <<"User document conflicts must be resolved before the document",
-              " is used for authentication purposes.">>
-        })
-    end,
-    {ok, UserCreds, nil}.
-
-
-users_db() ->
-    DbNameList = config:get("couch_httpd_auth", "authentication_db", "_users"),
-    ?l2b(DbNameList).
-
-
-ensure_users_db_exists() ->
-    Options = [?ADMIN_CTX, nologifmissing],
-    case couch_db:open(users_db(), Options) of
-    {ok, Db} ->
-        ensure_auth_ddoc_exists(Db, <<"_design/_auth">>),
-        couch_db:close(Db);
-    _Error ->
-        {ok, Db} = couch_db:create(users_db(), Options),
-        ok = ensure_auth_ddoc_exists(Db, <<"_design/_auth">>),
-        couch_db:close(Db)
-    end,
-    ok.
-
-
-ensure_auth_ddoc_exists(Db, DDocId) ->
-    case couch_db:open_doc(Db, DDocId) of
-    {not_found, _Reason} ->
-        {ok, AuthDesign} = auth_design_doc(DDocId),
-        {ok, _Rev} = couch_db:update_doc(Db, AuthDesign, []);
-    {ok, Doc} ->
-        {Props} = couch_doc:to_json_obj(Doc, []),
-        case couch_util:get_value(<<"validate_doc_update">>, Props, []) of
-            ?AUTH_DB_DOC_VALIDATE_FUNCTION ->
-                ok;
-            _ ->
-                Props1 = lists:keyreplace(<<"validate_doc_update">>, 1, Props,
-                    {<<"validate_doc_update">>,
-                    ?AUTH_DB_DOC_VALIDATE_FUNCTION}),
-                couch_db:update_doc(Db, couch_doc:from_json_obj({Props1}), [])
-        end
-    end,
-    ok.
-
 auth_design_doc(DocId) ->
     DocProps = [
         {<<"_id">>, DocId},

[couchdb] 01/24: Delete non-functional 3.x applications and modules from main

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit f9f7f216f38569320558e8c2268d066c772c1e3f
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Tue Apr 13 19:12:12 2021 -0400

    Delete non-functional 3.x applications and modules from main
    
    Affected applications fall in a few categories:
    
    1. Applications which are not used any more:
    
      * rexi
      * mem3
      * ddoc_cache
      * smoosh
      * ken
      * ioq
      * dreyfus
      * couch_event
      * snappy
      * kash
      * couch_plugins
    
    2. Applications we intend to keep for 4.x, but in their current form they rely
    on 3.x architecture (mem3, couch_file, clustering). These applications when
    they are ready to be implemented should be copied from 3.x as they will be more
    recent there:
    
      * couch_peruser
      * global_changes
      * setup
    
    3. Applications used only for utility functions, those function will be moved
    to other applications:
    
      * couch_mrview
      * couch_index
    
    4. Applications where some modules have been removed and some stayed:
    
      * couch:
        - removed: couch_db_*, couch_btree, couch_file, couch_lru, etc.
    
        - kept: couch_doc, key tree and some others stayed
    
        - couch_server: stripped down only to functions which are still used,
          serving uuids, returning couch version  and hashing passwords. It's a
          candidate to update in a separate PR to split that functionality to other
          modules (couch_passwords, fabric2_server).
    
      * fabric:
        - removed: all fabric_rpc modules
    
        - kept: fabric2_* modules
    
      * mango:
        - removed clouseau pings and check for "text" service
---
 emilio.config                                      |    4 +-
 mix.exs                                            |    5 +-
 rebar.config.script                                |   17 -
 rel/apps/couch_epi.config                          |    7 +-
 rel/reltool.config                                 |   32 -
 src/chttpd/src/chttpd_rewrite.erl                  |  487 -----
 src/couch/src/couch.app.src                        |    7 +-
 src/couch/src/couch_bt_engine.erl                  | 1246 ------------
 src/couch/src/couch_bt_engine.hrl                  |   27 -
 src/couch/src/couch_bt_engine_compactor.erl        |  590 ------
 src/couch/src/couch_bt_engine_header.erl           |  451 -----
 src/couch/src/couch_bt_engine_stream.erl           |   70 -
 src/couch/src/couch_btree.erl                      |  855 --------
 src/couch/src/couch_changes.erl                    |  724 -------
 src/couch/src/couch_compress.erl                   |   99 -
 src/couch/src/couch_db.erl                         | 2086 --------------------
 src/couch/src/couch_db_engine.erl                  | 1105 -----------
 src/couch/src/couch_db_header.erl                  |  405 ----
 src/couch/src/couch_db_int.hrl                     |   76 -
 src/couch/src/couch_db_plugin.erl                  |   96 -
 src/couch/src/couch_db_split.erl                   |  503 -----
 src/couch/src/couch_db_updater.erl                 |  955 ---------
 src/couch/src/couch_emsort.erl                     |  318 ---
 src/couch/src/couch_event_sup.erl                  |   74 -
 src/couch/src/couch_file.erl                       |  804 --------
 src/couch/src/couch_httpd_db.erl                   | 1263 ------------
 src/couch/src/couch_httpd_misc_handlers.erl        |  269 ---
 src/couch/src/couch_httpd_rewrite.erl              |  484 -----
 src/couch/src/couch_lru.erl                        |   67 -
 src/couch/src/couch_multidb_changes.erl            |  903 ---------
 src/couch/src/couch_server_int.hrl                 |   23 -
 src/couch/src/couch_stream.erl                     |  322 ---
 src/couch/src/couch_task_status.erl                |  171 --
 src/couch/src/couch_users_db.erl                   |  137 --
 src/couch/test/eunit/couch_auth_cache_tests.erl    |  349 ----
 .../test/eunit/couch_bt_engine_compactor_tests.erl |  129 --
 src/couch/test/eunit/couch_bt_engine_tests.erl     |   20 -
 .../test/eunit/couch_bt_engine_upgrade_tests.erl   |  244 ---
 src/couch/test/eunit/couch_btree_tests.erl         |  572 ------
 src/couch/test/eunit/couch_changes_tests.erl       |  962 ---------
 src/couch/test/eunit/couch_db_doc_tests.erl        |  121 --
 src/couch/test/eunit/couch_db_plugin_tests.erl     |  205 --
 .../test/eunit/couch_db_props_upgrade_tests.erl    |   83 -
 src/couch/test/eunit/couch_db_split_tests.erl      |  331 ----
 src/couch/test/eunit/couch_db_tests.erl            |  198 --
 src/couch/test/eunit/couch_file_tests.erl          |  551 ------
 src/couch/test/eunit/couch_index_tests.erl         |  232 ---
 src/couch/test/eunit/couch_server_tests.erl        |  294 ---
 src/couch/test/eunit/couch_stream_tests.erl        |  124 --
 src/couch/test/eunit/couch_task_status_tests.erl   |  233 ---
 src/couch/test/eunit/couchdb_attachments_tests.erl |  765 -------
 src/couch/test/eunit/couchdb_db_tests.erl          |   91 -
 src/couch/test/eunit/couchdb_design_doc_tests.erl  |   87 -
 .../test/eunit/couchdb_file_compression_tests.erl  |  250 ---
 .../test/eunit/couchdb_location_header_tests.erl   |   78 -
 src/couch/test/eunit/couchdb_mrview_tests.erl      |  261 ---
 .../test/eunit/couchdb_update_conflicts_tests.erl  |  280 ---
 src/couch/test/eunit/couchdb_vhosts_tests.erl      |  271 ---
 src/couch/test/eunit/couchdb_views_tests.erl       |  668 -------
 .../test/eunit/fixtures/os_daemon_configer.escript |    3 +-
 src/couch/test/eunit/global_changes_tests.erl      |  159 --
 src/couch/test/exunit/couch_compress_tests.exs     |  113 --
 src/couch/test/exunit/fabric_test.exs              |  101 -
 src/couch_event/.gitignore                         |    2 -
 src/couch_event/LICENSE                            |  202 --
 src/couch_event/README.md                          |    3 -
 src/couch_event/rebar.config                       |    1 -
 src/couch_event/src/couch_event.app.src            |   22 -
 src/couch_event/src/couch_event.erl                |   65 -
 src/couch_event/src/couch_event_app.erl            |   27 -
 src/couch_event/src/couch_event_int.hrl            |   19 -
 src/couch_event/src/couch_event_listener.erl       |  238 ---
 src/couch_event/src/couch_event_listener_mfa.erl   |  107 -
 src/couch_event/src/couch_event_os_listener.erl    |   76 -
 src/couch_event/src/couch_event_server.erl         |  156 --
 src/couch_event/src/couch_event_sup2.erl           |   44 -
 src/couch_index/.gitignore                         |    3 -
 src/couch_index/LICENSE                            |  202 --
 src/couch_index/rebar.config                       |    2 -
 src/couch_index/src/couch_index.app.src            |   19 -
 src/couch_index/src/couch_index.erl                |  639 ------
 src/couch_index/src/couch_index_app.erl            |   21 -
 src/couch_index/src/couch_index_compactor.erl      |  135 --
 src/couch_index/src/couch_index_epi.erl            |   50 -
 src/couch_index/src/couch_index_plugin.erl         |   51 -
 .../src/couch_index_plugin_couch_db.erl            |   26 -
 src/couch_index/src/couch_index_server.erl         |  303 ---
 src/couch_index/src/couch_index_sup.erl            |   24 -
 src/couch_index/src/couch_index_updater.erl        |  239 ---
 src/couch_index/src/couch_index_util.erl           |   78 -
 .../test/eunit/couch_index_compaction_tests.erl    |  117 --
 .../test/eunit/couch_index_ddoc_updated_tests.erl  |  145 --
 src/couch_js/src/couch_js.app.src                  |    3 +-
 src/couch_mrview/LICENSE                           |  202 --
 src/couch_mrview/include/couch_mrview.hrl          |  114 --
 src/couch_mrview/priv/stats_descriptions.cfg       |   24 -
 src/couch_mrview/rebar.config                      |    2 -
 src/couch_mrview/src/couch_mrview.app.src          |   18 -
 src/couch_mrview/src/couch_mrview.erl              |  692 -------
 src/couch_mrview/src/couch_mrview_cleanup.erl      |   59 -
 src/couch_mrview/src/couch_mrview_compactor.erl    |  294 ---
 src/couch_mrview/src/couch_mrview_http.erl         |  650 ------
 src/couch_mrview/src/couch_mrview_index.erl        |  329 ---
 src/couch_mrview/src/couch_mrview_show.erl         |  468 -----
 src/couch_mrview/src/couch_mrview_test_util.erl    |  123 --
 .../src/couch_mrview_update_notifier.erl           |   49 -
 src/couch_mrview/src/couch_mrview_updater.erl      |  373 ----
 src/couch_mrview/src/couch_mrview_util.erl         | 1180 -----------
 .../test/eunit/couch_mrview_all_docs_tests.erl     |  140 --
 .../test/eunit/couch_mrview_collation_tests.erl    |  207 --
 .../test/eunit/couch_mrview_compact_tests.erl      |  115 --
 .../test/eunit/couch_mrview_ddoc_updated_tests.erl |  145 --
 .../eunit/couch_mrview_ddoc_validation_tests.erl   |  422 ----
 .../test/eunit/couch_mrview_design_docs_tests.erl  |  136 --
 .../test/eunit/couch_mrview_http_tests.erl         |   28 -
 .../test/eunit/couch_mrview_index_info_tests.erl   |  111 --
 .../test/eunit/couch_mrview_local_docs_tests.erl   |  148 --
 .../test/eunit/couch_mrview_map_views_tests.erl    |  144 --
 .../eunit/couch_mrview_purge_docs_fabric_tests.erl |  286 ---
 .../test/eunit/couch_mrview_purge_docs_tests.erl   |  575 ------
 .../test/eunit/couch_mrview_red_views_tests.erl    |   95 -
 .../test/eunit/couch_mrview_util_tests.erl         |   39 -
 src/couch_peruser/.gitignore                       |    9 -
 src/couch_peruser/LICENSE                          |  202 --
 src/couch_peruser/README.md                        |   34 -
 src/couch_peruser/src/couch_peruser.app.src        |   20 -
 src/couch_peruser/src/couch_peruser.erl            |  423 ----
 src/couch_peruser/src/couch_peruser_app.erl        |   26 -
 src/couch_peruser/src/couch_peruser_sup.erl        |   29 -
 .../test/eunit/couch_peruser_test.erl              |  538 -----
 src/couch_plugins/LICENSE                          |  202 --
 src/couch_plugins/Makefile.am                      |   40 -
 src/couch_plugins/README.md                        |  159 --
 src/couch_plugins/src/couch_plugins.app.src        |   22 -
 src/couch_plugins/src/couch_plugins.erl            |  304 ---
 src/couch_plugins/src/couch_plugins_httpd.erl      |   65 -
 src/couch_pse_tests/src/couch_pse_tests.app.src    |   20 -
 src/couch_pse_tests/src/cpse_gather.erl            |   95 -
 src/couch_pse_tests/src/cpse_test_attachments.erl  |   99 -
 src/couch_pse_tests/src/cpse_test_compaction.erl   |  318 ---
 .../src/cpse_test_copy_purge_infos.erl             |   82 -
 src/couch_pse_tests/src/cpse_test_fold_changes.erl |  185 --
 src/couch_pse_tests/src/cpse_test_fold_docs.erl    |  400 ----
 .../src/cpse_test_fold_purge_infos.erl             |  167 --
 .../src/cpse_test_get_set_props.erl                |   95 -
 .../src/cpse_test_open_close_delete.erl            |   77 -
 .../src/cpse_test_purge_bad_checkpoints.erl        |   80 -
 src/couch_pse_tests/src/cpse_test_purge_docs.erl   |  464 -----
 .../src/cpse_test_purge_replication.erl            |  215 --
 src/couch_pse_tests/src/cpse_test_purge_seqs.erl   |  129 --
 .../src/cpse_test_read_write_docs.erl              |  311 ---
 src/couch_pse_tests/src/cpse_test_ref_counting.erl |  113 --
 src/couch_pse_tests/src/cpse_util.erl              |  677 -------
 src/ddoc_cache/LICENSE                             |  202 --
 src/ddoc_cache/README.md                           |    4 -
 src/ddoc_cache/priv/stats_descriptions.cfg         |   12 -
 src/ddoc_cache/src/ddoc_cache.app.src              |   32 -
 src/ddoc_cache/src/ddoc_cache.erl                  |   60 -
 src/ddoc_cache/src/ddoc_cache.hrl                  |   40 -
 src/ddoc_cache/src/ddoc_cache_app.erl              |   25 -
 src/ddoc_cache/src/ddoc_cache_entry.erl            |  374 ----
 src/ddoc_cache/src/ddoc_cache_entry_custom.erl     |   37 -
 src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl     |   46 -
 src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl |   47 -
 .../src/ddoc_cache_entry_validation_funs.erl       |   44 -
 src/ddoc_cache/src/ddoc_cache_lru.erl              |  333 ----
 src/ddoc_cache/src/ddoc_cache_opener.erl           |   66 -
 src/ddoc_cache/src/ddoc_cache_sup.erl              |   46 -
 src/ddoc_cache/src/ddoc_cache_value.erl            |   27 -
 .../test/eunit/ddoc_cache_basic_test.erl           |  175 --
 .../test/eunit/ddoc_cache_coverage_test.erl        |   77 -
 .../test/eunit/ddoc_cache_disabled_test.erl        |   62 -
 .../test/eunit/ddoc_cache_entry_test.erl           |  159 --
 src/ddoc_cache/test/eunit/ddoc_cache_ev.erl        |   21 -
 .../test/eunit/ddoc_cache_eviction_test.erl        |   96 -
 src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl  |  219 --
 .../test/eunit/ddoc_cache_no_cache_test.erl        |   87 -
 .../test/eunit/ddoc_cache_open_error_test.erl      |   46 -
 src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl |  107 -
 .../test/eunit/ddoc_cache_opener_test.erl          |   33 -
 .../test/eunit/ddoc_cache_refresh_test.erl         |  174 --
 .../test/eunit/ddoc_cache_remove_test.erl          |  224 ---
 src/ddoc_cache/test/eunit/ddoc_cache_test.hrl      |   26 -
 src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl     |  111 --
 src/dreyfus/.gitignore                             |    4 -
 src/dreyfus/LICENSE.txt                            |  202 --
 src/dreyfus/README.md                              |   78 -
 src/dreyfus/include/dreyfus.hrl                    |   74 -
 src/dreyfus/priv/stats_descriptions.cfg            |   65 -
 src/dreyfus/src/clouseau_rpc.erl                   |  109 -
 src/dreyfus/src/dreyfus.app.src                    |   22 -
 src/dreyfus/src/dreyfus_app.erl                    |   24 -
 src/dreyfus/src/dreyfus_bookmark.erl               |   90 -
 src/dreyfus/src/dreyfus_config.erl                 |   15 -
 src/dreyfus/src/dreyfus_epi.erl                    |   46 -
 src/dreyfus/src/dreyfus_fabric.erl                 |  205 --
 src/dreyfus/src/dreyfus_fabric_cleanup.erl         |   78 -
 src/dreyfus/src/dreyfus_fabric_group1.erl          |  129 --
 src/dreyfus/src/dreyfus_fabric_group2.erl          |  158 --
 src/dreyfus/src/dreyfus_fabric_info.erl            |  108 -
 src/dreyfus/src/dreyfus_fabric_search.erl          |  270 ---
 src/dreyfus/src/dreyfus_httpd.erl                  |  614 ------
 src/dreyfus/src/dreyfus_httpd_handlers.erl         |   29 -
 src/dreyfus/src/dreyfus_index.erl                  |  391 ----
 src/dreyfus/src/dreyfus_index_manager.erl          |  153 --
 src/dreyfus/src/dreyfus_index_updater.erl          |  181 --
 src/dreyfus/src/dreyfus_plugin_couch_db.erl        |   26 -
 src/dreyfus/src/dreyfus_rpc.erl                    |  130 --
 src/dreyfus/src/dreyfus_sup.erl                    |   32 -
 src/dreyfus/src/dreyfus_util.erl                   |  441 -----
 src/dreyfus/test/dreyfus_blacklist_await_test.erl  |   76 -
 .../test/dreyfus_blacklist_request_test.erl        |   96 -
 src/dreyfus/test/dreyfus_config_test.erl           |   71 -
 src/dreyfus/test/dreyfus_purge_test.erl            |  867 --------
 src/dreyfus/test/dreyfus_test_util.erl             |   13 -
 src/dreyfus/test/elixir/mix.exs                    |   30 -
 src/dreyfus/test/elixir/mix.lock                   |    5 -
 src/dreyfus/test/elixir/run                        |    4 -
 .../test/elixir/test/partition_search_test.exs     |  247 ---
 src/dreyfus/test/elixir/test/search_test.exs       |  226 ---
 src/dreyfus/test/elixir/test/test_helper.exs       |    4 -
 src/fabric/include/fabric.hrl                      |   46 -
 src/fabric/src/fabric.app.src                      |    2 -
 src/fabric/src/fabric.erl                          |  720 -------
 src/fabric/src/fabric_db_create.erl                |  228 ---
 src/fabric/src/fabric_db_delete.erl                |   98 -
 src/fabric/src/fabric_db_doc_count.erl             |   62 -
 src/fabric/src/fabric_db_info.erl                  |  171 --
 src/fabric/src/fabric_db_meta.erl                  |  198 --
 src/fabric/src/fabric_db_partition_info.erl        |  155 --
 src/fabric/src/fabric_db_update_listener.erl       |  177 --
 src/fabric/src/fabric_design_doc_count.erl         |   62 -
 src/fabric/src/fabric_dict.erl                     |   61 -
 src/fabric/src/fabric_doc_attachments.erl          |  160 --
 src/fabric/src/fabric_doc_atts.erl                 |  170 --
 src/fabric/src/fabric_doc_missing_revs.erl         |   97 -
 src/fabric/src/fabric_doc_open.erl                 |  610 ------
 src/fabric/src/fabric_doc_open_revs.erl            |  799 --------
 src/fabric/src/fabric_doc_purge.erl                |  571 ------
 src/fabric/src/fabric_doc_update.erl               |  377 ----
 src/fabric/src/fabric_group_info.erl               |  139 --
 src/fabric/src/fabric_ring.erl                     |  519 -----
 src/fabric/src/fabric_rpc.erl                      |  664 -------
 src/fabric/src/fabric_streams.erl                  |  274 ---
 src/fabric/src/fabric_util.erl                     |  347 ----
 src/fabric/src/fabric_view.erl                     |  478 -----
 src/fabric/src/fabric_view_all_docs.erl            |  332 ----
 src/fabric/src/fabric_view_changes.erl             |  820 --------
 src/fabric/src/fabric_view_map.erl                 |  267 ---
 src/fabric/src/fabric_view_reduce.erl              |  165 --
 src/fabric/test/eunit/fabric_rpc_tests.erl         |  181 --
 src/global_changes/.gitignore                      |    2 -
 src/global_changes/LICENSE                         |  203 --
 src/global_changes/README.md                       |   27 -
 src/global_changes/priv/stats_descriptions.cfg     |   20 -
 src/global_changes/src/global_changes.app.src      |   32 -
 src/global_changes/src/global_changes_app.erl      |   28 -
 src/global_changes/src/global_changes_epi.erl      |   51 -
 src/global_changes/src/global_changes_httpd.erl    |  285 ---
 .../src/global_changes_httpd_handlers.erl          |   28 -
 src/global_changes/src/global_changes_listener.erl |  165 --
 src/global_changes/src/global_changes_plugin.erl   |   40 -
 src/global_changes/src/global_changes_server.erl   |  229 ---
 src/global_changes/src/global_changes_sup.erl      |   84 -
 src/global_changes/src/global_changes_util.erl     |   27 -
 .../test/eunit/global_changes_hooks_tests.erl      |  156 --
 src/ioq/.gitignore                                 |    2 -
 src/ioq/src/ioq.app.src                            |   21 -
 src/ioq/src/ioq.erl                                |  189 --
 src/ioq/src/ioq_app.erl                            |   21 -
 src/ioq/src/ioq_sup.erl                            |   24 -
 src/ken/README.md                                  |   12 -
 src/ken/rebar.config.script                        |   28 -
 src/ken/src/ken.app.src.script                     |   38 -
 src/ken/src/ken.erl                                |   29 -
 src/ken/src/ken_app.erl                            |   28 -
 src/ken/src/ken_event_handler.erl                  |   56 -
 src/ken/src/ken_server.erl                         |  579 ------
 src/ken/src/ken_sup.erl                            |   33 -
 src/ken/test/config.ini                            |    2 -
 src/ken/test/ken_server_test.erl                   |   97 -
 src/mango/src/mango_cursor_text.erl                |  334 ----
 src/mango/src/mango_idx_text.erl                   |  459 -----
 src/mem3/LICENSE                                   |  202 --
 src/mem3/README.md                                 |   43 -
 src/mem3/README_reshard.md                         |   93 -
 src/mem3/include/mem3.hrl                          |   59 -
 src/mem3/priv/stats_descriptions.cfg               |   12 -
 src/mem3/rebar.config.script                       |   22 -
 src/mem3/src/mem3.app.src                          |   40 -
 src/mem3/src/mem3.erl                              |  424 ----
 src/mem3/src/mem3_app.erl                          |   21 -
 src/mem3/src/mem3_cluster.erl                      |  161 --
 src/mem3/src/mem3_epi.erl                          |   51 -
 src/mem3/src/mem3_hash.erl                         |   73 -
 src/mem3/src/mem3_httpd.erl                        |   84 -
 src/mem3/src/mem3_httpd_handlers.erl               |   61 -
 src/mem3/src/mem3_nodes.erl                        |  155 --
 src/mem3/src/mem3_plugin_couch_db.erl              |   21 -
 src/mem3/src/mem3_rep.erl                          |  998 ----------
 src/mem3/src/mem3_reshard.erl                      |  913 ---------
 src/mem3/src/mem3_reshard.hrl                      |   74 -
 src/mem3/src/mem3_reshard_api.erl                  |  217 --
 src/mem3/src/mem3_reshard_dbdoc.erl                |  274 ---
 src/mem3/src/mem3_reshard_httpd.erl                |  317 ---
 src/mem3/src/mem3_reshard_index.erl                |  164 --
 src/mem3/src/mem3_reshard_job.erl                  |  716 -------
 src/mem3/src/mem3_reshard_job_sup.erl              |   55 -
 src/mem3/src/mem3_reshard_store.erl                |  286 ---
 src/mem3/src/mem3_reshard_sup.erl                  |   47 -
 src/mem3/src/mem3_reshard_validate.erl             |  126 --
 src/mem3/src/mem3_rpc.erl                          |  711 -------
 src/mem3/src/mem3_seeds.erl                        |  162 --
 src/mem3/src/mem3_shards.erl                       |  766 -------
 src/mem3/src/mem3_sup.erl                          |   40 -
 src/mem3/src/mem3_sync.erl                         |  323 ---
 src/mem3/src/mem3_sync_event.erl                   |   86 -
 src/mem3/src/mem3_sync_event_listener.erl          |  353 ----
 src/mem3/src/mem3_sync_nodes.erl                   |  115 --
 src/mem3/src/mem3_sync_security.erl                |  117 --
 src/mem3/src/mem3_util.erl                         |  650 ------
 src/mem3/test/eunit/mem3_cluster_test.erl          |  133 --
 src/mem3/test/eunit/mem3_hash_test.erl             |   23 -
 src/mem3/test/eunit/mem3_rep_test.erl              |  321 ---
 src/mem3/test/eunit/mem3_reshard_api_test.erl      |  847 --------
 .../test/eunit/mem3_reshard_changes_feed_test.erl  |  389 ----
 src/mem3/test/eunit/mem3_reshard_test.erl          |  834 --------
 src/mem3/test/eunit/mem3_ring_prop_tests.erl       |  151 --
 src/mem3/test/eunit/mem3_seeds_test.erl            |   69 -
 src/mem3/test/eunit/mem3_sync_security_test.erl    |   54 -
 src/mem3/test/eunit/mem3_util_test.erl             |  130 --
 src/rexi/README.md                                 |   23 -
 src/rexi/include/rexi.hrl                          |   20 -
 src/rexi/priv/stats_descriptions.cfg               |   24 -
 src/rexi/rebar.config                              |    2 -
 src/rexi/src/rexi.app.src                          |   28 -
 src/rexi/src/rexi.erl                              |  320 ---
 src/rexi/src/rexi_app.erl                          |   22 -
 src/rexi/src/rexi_buffer.erl                       |  104 -
 src/rexi/src/rexi_monitor.erl                      |   65 -
 src/rexi/src/rexi_server.erl                       |  193 --
 src/rexi/src/rexi_server_mon.erl                   |  176 --
 src/rexi/src/rexi_server_sup.erl                   |   29 -
 src/rexi/src/rexi_sup.erl                          |   64 -
 src/rexi/src/rexi_utils.erl                        |  105 -
 src/setup/.gitignore                               |    4 -
 src/setup/LICENSE                                  |  203 --
 src/setup/README.md                                |  210 --
 src/setup/src/setup.app.src                        |   27 -
 src/setup/src/setup.erl                            |  386 ----
 src/setup/src/setup_app.erl                        |   28 -
 src/setup/src/setup_epi.erl                        |   49 -
 src/setup/src/setup_httpd.erl                      |  180 --
 src/setup/src/setup_httpd_handlers.erl             |   32 -
 src/setup/src/setup_sup.erl                        |   44 -
 src/setup/test/t-frontend-setup.sh                 |   71 -
 src/setup/test/t-single-node-auto-setup.sh         |   24 -
 src/setup/test/t-single-node.sh                    |   46 -
 src/setup/test/t.sh                                |   63 -
 src/smoosh/README.md                               |  140 --
 src/smoosh/operator_guide.md                       |  396 ----
 src/smoosh/src/smoosh.app.src                      |   29 -
 src/smoosh/src/smoosh.erl                          |   69 -
 src/smoosh/src/smoosh_app.erl                      |   28 -
 src/smoosh/src/smoosh_channel.erl                  |  325 ---
 src/smoosh/src/smoosh_priority_queue.erl           |   86 -
 src/smoosh/src/smoosh_server.erl                   |  606 ------
 src/smoosh/src/smoosh_sup.erl                      |   38 -
 src/smoosh/src/smoosh_utils.erl                    |   92 -
 src/smoosh/test/exunit/scheduling_window_test.exs  |   79 -
 src/smoosh/test/exunit/test_helper.exs             |    2 -
 371 files changed, 6 insertions(+), 75257 deletions(-)

diff --git a/emilio.config b/emilio.config
index 0dad938..84a6571 100644
--- a/emilio.config
+++ b/emilio.config
@@ -8,13 +8,11 @@
     "src[\/]emilio[\/]*",
     "src[\/]folsom[\/]*",
     "src[\/]mochiweb[\/]*",
-    "src[\/]snappy[\/]*",
     "src[\/]ssl_verify_fun[\/]*",
     "src[\/]ibrowse[\/]*",
     "src[\/]jiffy[\/]*",
     "src[\/]meck[\/]*",
     "src[\/]proper[\/]*",
     "src[\/]recon[\/]*",
-    "src[\/]hyper[\/]*",
-    "src[\/]triq[\/]*"
+    "src[\/]hyper[\/]*"
 ]}.
diff --git a/mix.exs b/mix.exs
index 9cba1a4..12e0221 100644
--- a/mix.exs
+++ b/mix.exs
@@ -133,16 +133,13 @@ defmodule CouchDBTest.Mixfile do
       "b64url",
       "bear",
       "mochiweb",
-      "snappy",
       "rebar",
       "proper",
       "mochiweb",
       "meck",
-      "khash",
       "hyper",
       "fauxton",
-      "folsom",
-      "hqueue"
+      "folsom"
     ]
 
     deps |> Enum.map(fn app -> "src/#{app}" end)
diff --git a/rebar.config.script b/rebar.config.script
index e33a9e7..0f40cf0 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -118,33 +118,18 @@ SubDirs = [
     "src/chttpd",
     "src/couch",
     "src/couch_eval",
-    "src/couch_event",
-    "src/mem3",
-    "src/couch_index",
-    "src/couch_mrview",
     "src/couch_js",
     "src/couch_replicator",
-    "src/couch_plugins",
-    "src/couch_pse_tests",
     "src/couch_stats",
-    "src/couch_peruser",
     "src/couch_tests",
     "src/couch_views",
     "src/ctrace",
-    "src/ddoc_cache",
-    "src/dreyfus",
     "src/fabric",
     "src/aegis",
     "src/couch_jobs",
     "src/couch_expiring_cache",
-    "src/global_changes",
-    "src/ioq",
     "src/jwtf",
-    "src/ken",
     "src/mango",
-    "src/rexi",
-    "src/setup",
-    "src/smoosh",
     "src/ebtree",
     "src/couch_prometheus",
     "rel"
@@ -156,8 +141,6 @@ DepDescs = [
 {b64url,           "b64url",           {tag, "1.0.2"}},
 {erlfdb,           "erlfdb",           {tag, "v1.3.3"}},
 {ets_lru,          "ets-lru",          {tag, "1.1.0"}},
-{khash,            "khash",            {tag, "1.1.0"}},
-{snappy,           "snappy",           {tag, "CouchDB-1.0.4"}},
 
 %% Non-Erlang deps
 {docs,             {url, "https://github.com/apache/couchdb-documentation"},
diff --git a/rel/apps/couch_epi.config b/rel/apps/couch_epi.config
index f9f49e1..db85ef1 100644
--- a/rel/apps/couch_epi.config
+++ b/rel/apps/couch_epi.config
@@ -14,12 +14,7 @@
     couch_db_epi,
     fabric2_epi,
     chttpd_epi,
-    couch_index_epi,
     couch_views_epi,
     couch_replicator_epi,
-    dreyfus_epi,
-    global_changes_epi,
-    mango_epi,
-    mem3_epi,
-    setup_epi
+    mango_epi
 ]}.
diff --git a/rel/reltool.config b/rel/reltool.config
index 9c59aa5..7d35993 100644
--- a/rel/reltool.config
+++ b/rel/reltool.config
@@ -35,42 +35,26 @@
         couch,
         couch_epi,
         couch_jobs,
-        couch_index,
         couch_log,
-        couch_mrview,
-        couch_plugins,
         couch_replicator,
         couch_stats,
         couch_eval,
         couch_js,
-        couch_event,
-        couch_peruser,
         couch_views,
-        ddoc_cache,
-        dreyfus,
         ebtree,
         erlfdb,
         ets_lru,
         fabric,
         folsom,
-        global_changes,
         hyper,
         ibrowse,
-        ioq,
         jaeger_passage,
         jiffy,
         jwtf,
-        ken,
-        khash,
         local,
         mango,
-        mem3,
         mochiweb,
         passage,
-        rexi,
-        setup,
-        smoosh,
-        snappy,
         thrift_protocol,
         couch_prometheus,
         %% extra
@@ -109,40 +93,24 @@
     {app, couch_eval, [{incl_cond, include}]},
     {app, couch_js, [{incl_cond, include}]},
     {app, couch_jobs, [{incl_cond, include}]},
-    {app, couch_index, [{incl_cond, include}]},
     {app, couch_log, [{incl_cond, include}]},
-    {app, couch_mrview, [{incl_cond, include}]},
-    {app, couch_plugins, [{incl_cond, include}]},
     {app, couch_replicator, [{incl_cond, include}]},
     {app, couch_stats, [{incl_cond, include}]},
-    {app, couch_event, [{incl_cond, include}]},
-    {app, couch_peruser, [{incl_cond, include}]},
     {app, couch_views, [{incl_cond, include}]},
-    {app, ddoc_cache, [{incl_cond, include}]},
-    {app, dreyfus, [{incl_cond, include}]},
     {app, erlfdb, [{incl_cond, include}]},
     {app, ebtree, [{incl_cond, include}]},
     {app, ets_lru, [{incl_cond, include}]},
     {app, fabric, [{incl_cond, include}]},
     {app, folsom, [{incl_cond, include}]},
-    {app, global_changes, [{incl_cond, include}]},
     {app, hyper, [{incl_cond, include}]},
     {app, ibrowse, [{incl_cond, include}]},
-    {app, ioq, [{incl_cond, include}]},
     {app, jaeger_passage, [{incl_cond, include}]},
     {app, jiffy, [{incl_cond, include}]},
     {app, jwtf, [{incl_cond, include}]},
-    {app, ken, [{incl_cond, include}]},
     {app, local, [{incl_cond, include}]},
-    {app, khash, [{incl_cond, include}]},
     {app, mango, [{incl_cond, include}]},
-    {app, mem3, [{incl_cond, include}]},
     {app, mochiweb, [{incl_cond, include}]},
     {app, passage, [{incl_cond, include}]},
-    {app, rexi, [{incl_cond, include}]},
-    {app, setup, [{incl_cond, include}]},
-    {app, smoosh, [{incl_cond, include}]},
-    {app, snappy, [{incl_cond, include}]},
     {app, thrift_protocol, [{incl_cond, include}]},
     {app, couch_prometheus, [{incl_cond, include}]},
 
diff --git a/src/chttpd/src/chttpd_rewrite.erl b/src/chttpd/src/chttpd_rewrite.erl
deleted file mode 100644
index 1c2c1f3..0000000
--- a/src/chttpd/src/chttpd_rewrite.erl
+++ /dev/null
@@ -1,487 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-%
-% bind_path is based on bind method from Webmachine
-
-
-%% @doc Module for URL rewriting by pattern matching.
-
--module(chttpd_rewrite).
-
--compile(tuple_calls).
-
--export([handle_rewrite_req/3]).
--include_lib("couch/include/couch_db.hrl").
-
--define(SEPARATOR, $\/).
--define(MATCH_ALL, {bind, <<"*">>}).
-
-
-handle_rewrite_req(#httpd{}=Req, Db, DDoc) ->
-    RewritesSoFar = erlang:get(?REWRITE_COUNT),
-    MaxRewrites = config:get_integer("httpd", "rewrite_limit", 100),
-    case RewritesSoFar >= MaxRewrites of
-        true ->
-            throw({bad_request, <<"Exceeded rewrite recursion limit">>});
-        false ->
-            erlang:put(?REWRITE_COUNT, RewritesSoFar + 1)
-    end,
-    case get_rules(DDoc) of
-        Rules when is_list(Rules) ->
-            do_rewrite(Req, Rules);
-        Rules when is_binary(Rules) ->
-            case couch_query_servers:rewrite(Req, Db, DDoc) of
-                undefined ->
-                    chttpd:send_error(Req, 404, <<"rewrite_error">>,
-                        <<"Invalid path.">>);
-                Rewrite ->
-                    do_rewrite(Req, Rewrite)
-            end;
-        undefined ->
-            chttpd:send_error(Req, 404, <<"rewrite_error">>,
-                <<"Invalid path.">>)
-    end.
-
-
-get_rules(#doc{body={Props}}) ->
-    couch_util:get_value(<<"rewrites">>, Props).
-
-
-do_rewrite(#httpd{mochi_req=MochiReq}=Req, {Props}=Rewrite) when is_list(Props) ->
-    case couch_util:get_value(<<"code">>, Props) of
-        undefined ->
-            Method = rewrite_method(Req, Rewrite),
-            Headers = rewrite_headers(Req, Rewrite),
-            Path = ?b2l(rewrite_path(Req, Rewrite)),
-            NewMochiReq = mochiweb_request:new(MochiReq:get(socket),
-                                               Method,
-                                               Path,
-                                               MochiReq:get(version),
-                                               Headers),
-            Body = case couch_util:get_value(<<"body">>, Props) of
-                undefined -> erlang:get(mochiweb_request_body);
-                B -> B
-            end,
-            NewMochiReq:cleanup(),
-            case Body of
-                undefined -> [];
-                _ -> erlang:put(mochiweb_request_body, Body)
-            end,
-            couch_log:debug("rewrite to ~p", [Path]),
-            chttpd:handle_request_int(NewMochiReq);
-        Code ->
-            chttpd:send_response(
-                Req,
-                Code,
-                case couch_util:get_value(<<"headers">>, Props) of
-                    undefined -> [];
-                    {H1} -> H1
-                end,
-                rewrite_body(Rewrite))
-    end;
-do_rewrite(#httpd{method=Method,
-                  path_parts=[_DbName, <<"_design">>, _DesignName, _Rewrite|PathParts],
-                  mochi_req=MochiReq}=Req,
-           Rules) when is_list(Rules) ->
-    % create dispatch list from rules
-    Prefix = path_prefix(Req),
-    QueryList = lists:map(fun decode_query_value/1, chttpd:qs(Req)),
-
-    DispatchList =  [make_rule(Rule) || {Rule} <- Rules],
-    Method1 = couch_util:to_binary(Method),
-
-    %% get raw path by matching url to a rule.
-    RawPath = case try_bind_path(DispatchList, Method1,
-            PathParts, QueryList) of
-        no_dispatch_path ->
-            throw(not_found);
-        {NewPathParts, Bindings} ->
-            Parts = [quote_plus(X) || X <- NewPathParts],
-
-            % build new path, reencode query args, eventually convert
-            % them to json
-            Bindings1 = maybe_encode_bindings(Bindings),
-            Path = iolist_to_binary([
-                string:join(Parts, [?SEPARATOR]),
-                [["?", mochiweb_util:urlencode(Bindings1)] || Bindings1 =/= []]
-            ]),
-
-            % if path is relative detect it and rewrite path
-            safe_relative_path(Prefix, Path)
-        end,
-
-    % normalize final path (fix levels "." and "..")
-    RawPath1 = ?b2l(normalize_path(RawPath)),
-
-    couch_log:debug("rewrite to ~p ~n", [RawPath1]),
-
-    % build a new mochiweb request
-    MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
-                                     MochiReq:get(method),
-                                     RawPath1,
-                                     MochiReq:get(version),
-                                     MochiReq:get(headers)),
-
-    % cleanup, It force mochiweb to reparse raw uri.
-    MochiReq1:cleanup(),
-
-    chttpd:handle_request_int(MochiReq1).
-
-
-rewrite_method(#httpd{method=Method}, {Props}) ->
-    DefaultMethod = couch_util:to_binary(Method),
-    couch_util:get_value(<<"method">>, Props, DefaultMethod).
-
-rewrite_path(#httpd{}=Req, {Props}=Rewrite) ->
-    Prefix = path_prefix(Req),
-    RewritePath = case couch_util:get_value(<<"path">>, Props) of
-        undefined ->
-            throw({<<"rewrite_error">>,
-                   <<"Rewrite result must produce a new path.">>});
-        P -> P
-    end,
-    SafeRelativePath = safe_relative_path(Prefix, RewritePath),
-    NormalizedPath = normalize_path(SafeRelativePath),
-    QueryParams = rewrite_query_params(Req, Rewrite),
-    case QueryParams of
-        <<"">> ->
-            NormalizedPath;
-        QueryParams ->
-            <<NormalizedPath/binary, "?", QueryParams/binary>>
-    end.
-
-rewrite_query_params(#httpd{}=Req, {Props}) ->
-    RequestQS = chttpd:qs(Req),
-    RewriteQS = case couch_util:get_value(<<"query">>, Props) of
-        undefined -> RequestQS;
-        {V} -> V
-    end,
-    RewriteQSEsc = [{chttpd:quote(K), chttpd:quote(V)} || {K, V} <- RewriteQS],
-    iolist_to_binary(string:join([[K, "=", V] || {K, V} <- RewriteQSEsc], "&")).
-
-rewrite_headers(#httpd{mochi_req=MochiReq}, {Props}) ->
-    case couch_util:get_value(<<"headers">>, Props) of
-        undefined ->
-            MochiReq:get(headers);
-        {H} ->
-            mochiweb_headers:enter_from_list(
-                lists:map(fun({Key, Val}) -> {?b2l(Key), ?b2l(Val)} end, H),
-                MochiReq:get(headers))
-    end.
-
-rewrite_body({Props}) ->
-    Body = case couch_util:get_value(<<"body">>, Props) of
-        undefined -> erlang:get(mochiweb_request_body);
-        B -> B
-    end,
-    case Body of
-        undefined ->
-            [];
-        _ ->
-            erlang:put(mochiweb_request_body, Body),
-            Body
-    end.
-
-
-path_prefix(#httpd{path_parts=[DbName, <<"_design">>, DesignName | _]}) ->
-    EscapedDesignName = ?l2b(couch_util:url_encode(DesignName)),
-    EscapedDbName = ?l2b(couch_util:url_encode(DbName)),
-    DesignId = <<"_design/", EscapedDesignName/binary>>,
-    <<"/", EscapedDbName/binary, "/", DesignId/binary>>.
-
-safe_relative_path(Prefix, Path) ->
-    case mochiweb_util:safe_relative_path(?b2l(Path)) of
-        undefined ->
-            <<Prefix/binary, "/", Path/binary>>;
-        V0 ->
-            V1 = ?l2b(V0),
-            <<Prefix/binary, "/", V1/binary>>
-    end.
-
-
-quote_plus({bind, X}) ->
-    mochiweb_util:quote_plus(X);
-quote_plus(X) ->
-    mochiweb_util:quote_plus(X).
-
-%% @doc Try to find a rule matching current url. If none is found
-%% 404 error not_found is raised
-try_bind_path([], _Method, _PathParts, _QueryList) ->
-    no_dispatch_path;
-try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) ->
-    [{PathParts1, Method1}, RedirectPath, QueryArgs, Formats] = Dispatch,
-    case bind_method(Method1, Method) of
-        true ->
-            case bind_path(PathParts1, PathParts, []) of
-                {ok, Remaining, Bindings} ->
-                    Bindings1 = Bindings ++ QueryList,
-                    % we parse query args from the rule and fill
-                    % it eventually with bindings vars
-                    QueryArgs1 = make_query_list(QueryArgs, Bindings1,
-                        Formats, []),
-                    % remove params in QueryLists1 that are already in
-                    % QueryArgs1
-                    Bindings2 = lists:foldl(fun({K, V}, Acc) ->
-                        K1 = to_binding(K),
-                        KV = case couch_util:get_value(K1, QueryArgs1) of
-                            undefined -> [{K1, V}];
-                            _V1 -> []
-                        end,
-                        Acc ++ KV
-                    end, [], Bindings1),
-
-                    FinalBindings = Bindings2 ++ QueryArgs1,
-                    NewPathParts = make_new_path(RedirectPath, FinalBindings,
-                                    Remaining, []),
-                    {NewPathParts, FinalBindings};
-                fail ->
-                    try_bind_path(Rest, Method, PathParts, QueryList)
-            end;
-        false ->
-            try_bind_path(Rest, Method, PathParts, QueryList)
-    end.
-
-%% rewriting dynamically the quey list given as query member in
-%% rewrites. Each value is replaced by one binding or an argument
-%% passed in url.
-make_query_list([], _Bindings, _Formats, Acc) ->
-    Acc;
-make_query_list([{Key, {Value}}|Rest], Bindings, Formats, Acc) ->
-    Value1 = {Value},
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_binary(Value) ->
-    Value1 = replace_var(Value, Bindings, Formats),
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_list(Value) ->
-    Value1 = replace_var(Value, Bindings, Formats),
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) ->
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value}|Acc]).
-
-replace_var(<<"*">>=Value, Bindings, Formats) ->
-    get_var(Value, Bindings, Value, Formats);
-replace_var(<<":", Var/binary>> = Value, Bindings, Formats) ->
-    get_var(Var, Bindings, Value, Formats);
-replace_var(Value, _Bindings, _Formats) when is_binary(Value) ->
-    Value;
-replace_var(Value, Bindings, Formats) when is_list(Value) ->
-    lists:reverse(lists:foldl(fun
-                (<<":", Var/binary>>=Value1, Acc) ->
-                    [get_var(Var, Bindings, Value1, Formats)|Acc];
-                (Value1, Acc) ->
-                    [Value1|Acc]
-            end, [], Value));
-replace_var(Value, _Bindings, _Formats) ->
-    Value.
-
-maybe_json(Key, Value) ->
-    case lists:member(Key, [<<"key">>, <<"startkey">>, <<"start_key">>,
-                <<"endkey">>, <<"end_key">>, <<"keys">>]) of
-        true ->
-            ?JSON_ENCODE(Value);
-        false ->
-            Value
-    end.
-
-get_var(VarName, Props, Default, Formats) ->
-    VarName1 = to_binding(VarName),
-    Val = couch_util:get_value(VarName1, Props, Default),
-    maybe_format(VarName, Val, Formats).
-
-maybe_format(VarName, Value, Formats) ->
-    case couch_util:get_value(VarName, Formats) of
-        undefined ->
-             Value;
-        Format ->
-            format(Format, Value)
-    end.
-
-format(<<"int">>, Value) when is_integer(Value) ->
-    Value;
-format(<<"int">>, Value) when is_binary(Value) ->
-    format(<<"int">>, ?b2l(Value));
-format(<<"int">>, Value) when is_list(Value) ->
-    case (catch list_to_integer(Value)) of
-        IntVal when is_integer(IntVal) ->
-            IntVal;
-        _ ->
-            Value
-    end;
-format(<<"bool">>, Value) when is_binary(Value) ->
-    format(<<"bool">>, ?b2l(Value));
-format(<<"bool">>, Value) when is_list(Value) ->
-    case string:to_lower(Value) of
-        "true" -> true;
-        "false" -> false;
-        _ -> Value
-    end;
-format(_Format, Value) ->
-   Value.
-
-%% doc: build new patch from bindings. bindings are query args
-%% (+ dynamic query rewritten if needed) and bindings found in
-%% bind_path step.
-make_new_path([], _Bindings, _Remaining, Acc) ->
-    lists:reverse(Acc);
-make_new_path([?MATCH_ALL], _Bindings, Remaining, Acc) ->
-    Acc1 = lists:reverse(Acc) ++ Remaining,
-    Acc1;
-make_new_path([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
-    Acc1 = lists:reverse(Acc) ++ Remaining,
-    Acc1;
-make_new_path([{bind, P}|Rest], Bindings, Remaining, Acc) ->
-    P2 = case couch_util:get_value({bind, P}, Bindings) of
-        undefined -> << "undefined">>;
-        P1 ->
-            iolist_to_binary(P1)
-    end,
-    make_new_path(Rest, Bindings, Remaining, [P2|Acc]);
-make_new_path([P|Rest], Bindings, Remaining, Acc) ->
-    make_new_path(Rest, Bindings, Remaining, [P|Acc]).
-
-
-%% @doc If method of the query fith the rule method. If the
-%% method rule is '*', which is the default, all
-%% request method will bind. It allows us to make rules
-%% depending on HTTP method.
-bind_method(?MATCH_ALL, _Method) ->
-    true;
-bind_method({bind, Method}, Method) ->
-    true;
-bind_method(_, _) ->
-    false.
-
-
-%% @doc bind path. Using the rule from we try to bind variables given
-%% to the current url by pattern matching
-bind_path([], [], Bindings) ->
-    {ok, [], Bindings};
-bind_path([?MATCH_ALL], Rest, Bindings) when is_list(Rest) ->
-    {ok, Rest, Bindings};
-bind_path(_, [], _) ->
-    fail;
-bind_path([{bind, Token}|RestToken],[Match|RestMatch],Bindings) ->
-    bind_path(RestToken, RestMatch, [{{bind, Token}, Match}|Bindings]);
-bind_path([Token|RestToken], [Token|RestMatch], Bindings) ->
-    bind_path(RestToken, RestMatch, Bindings);
-bind_path(_, _, _) ->
-    fail.
-
-
-%% normalize path.
-normalize_path(Path) when is_binary(Path)->
-    normalize_path(?b2l(Path));
-normalize_path(Path) when is_list(Path)->
-    Segments = normalize_path1(string:tokens(Path, "/"), []),
-    NormalizedPath = string:join(Segments, [?SEPARATOR]),
-    iolist_to_binary(["/", NormalizedPath]).
-
-
-normalize_path1([], Acc) ->
-    lists:reverse(Acc);
-normalize_path1([".."|Rest], Acc) ->
-    Acc1 = case Acc of
-        [] -> [".."|Acc];
-        [T|_] when T =:= ".." -> [".."|Acc];
-        [_|R] -> R
-    end,
-    normalize_path1(Rest, Acc1);
-normalize_path1(["."|Rest], Acc) ->
-    normalize_path1(Rest, Acc);
-normalize_path1([Path|Rest], Acc) ->
-    normalize_path1(Rest, [Path|Acc]).
-
-
-%% @doc transform json rule in erlang for pattern matching
-make_rule(Rule) ->
-    Method = case couch_util:get_value(<<"method">>, Rule) of
-        undefined -> ?MATCH_ALL;
-        M -> to_binding(M)
-    end,
-    QueryArgs = case couch_util:get_value(<<"query">>, Rule) of
-        undefined -> [];
-        {Args} -> Args
-        end,
-    FromParts  = case couch_util:get_value(<<"from">>, Rule) of
-        undefined -> [?MATCH_ALL];
-        From ->
-            parse_path(From)
-        end,
-    ToParts  = case couch_util:get_value(<<"to">>, Rule) of
-        undefined ->
-            throw({error, invalid_rewrite_target});
-        To ->
-            parse_path(To)
-        end,
-    Formats = case couch_util:get_value(<<"formats">>, Rule) of
-        undefined -> [];
-        {Fmts} -> Fmts
-    end,
-    [{FromParts, Method}, ToParts, QueryArgs, Formats].
-
-parse_path(Path) ->
-    {ok, SlashRE} = re:compile(<<"\\/">>),
-    path_to_list(re:split(Path, SlashRE), [], 0).
-
-%% @doc convert a path rule (from or to) to an erlang list
-%% * and path variable starting by ":" are converted
-%% in erlang atom.
-path_to_list([], Acc, _DotDotCount) ->
-    lists:reverse(Acc);
-path_to_list([<<>>|R], Acc, DotDotCount) ->
-    path_to_list(R, Acc, DotDotCount);
-path_to_list([<<"*">>|R], Acc, DotDotCount) ->
-    path_to_list(R, [?MATCH_ALL|Acc], DotDotCount);
-path_to_list([<<"..">>|R], Acc, DotDotCount) when DotDotCount == 2 ->
-    case config:get("httpd", "secure_rewrites", "true") of
-    "false" ->
-        path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
-    _Else ->
-        couch_log:notice("insecure_rewrite_rule ~p blocked", [lists:reverse(Acc) ++ [<<"..">>] ++ R]),
-        throw({insecure_rewrite_rule, "too many ../.. segments"})
-    end;
-path_to_list([<<"..">>|R], Acc, DotDotCount) ->
-    path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
-path_to_list([P|R], Acc, DotDotCount) ->
-    P1 = case P of
-        <<":", Var/binary>> ->
-            to_binding(Var);
-        _ -> P
-    end,
-    path_to_list(R, [P1|Acc], DotDotCount).
-
-maybe_encode_bindings([]) ->
-    [];
-maybe_encode_bindings(Props) ->
-    lists:foldl(fun
-            ({{bind, <<"*">>}, _V}, Acc) ->
-                Acc;
-            ({{bind, K}, V}, Acc) ->
-                V1 = iolist_to_binary(maybe_json(K, V)),
-                [{K, V1}|Acc]
-        end, [], Props).
-
-decode_query_value({K,V}) ->
-    case lists:member(K, ["key", "startkey", "start_key",
-                "endkey", "end_key", "keys"]) of
-        true ->
-            {to_binding(K), ?JSON_DECODE(V)};
-        false ->
-            {to_binding(K), ?l2b(V)}
-    end.
-
-to_binding({bind, V}) ->
-    {bind, V};
-to_binding(V) when is_list(V) ->
-    to_binding(?l2b(V));
-to_binding(V) ->
-    {bind, V}.
diff --git a/src/couch/src/couch.app.src b/src/couch/src/couch.app.src
index 74674bb..e411b5e 100644
--- a/src/couch/src/couch.app.src
+++ b/src/couch/src/couch.app.src
@@ -14,15 +14,12 @@
     {description, "Apache CouchDB"},
     {vsn, git},
     {registered, [
-        couch_db_update,
-        couch_db_update_notifier_sup,
         couch_httpd,
         couch_primary_services,
         couch_proc_manager,
         couch_secondary_services,
         couch_server,
-        couch_sup,
-        couch_task_status
+        couch_sup
     ]},
     {mod, {couch_app, []}},
     {applications, [
@@ -42,8 +39,6 @@
         couch_epi,
         b64url,
         couch_log,
-        couch_event,
-        ioq,
         couch_stats,
         hyper,
         couch_prometheus
diff --git a/src/couch/src/couch_bt_engine.erl b/src/couch/src/couch_bt_engine.erl
deleted file mode 100644
index 48e751a..0000000
--- a/src/couch/src/couch_bt_engine.erl
+++ /dev/null
@@ -1,1246 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine).
--behavior(couch_db_engine).
-
--export([
-    exists/1,
-
-    delete/3,
-    delete_compaction_files/3,
-
-    init/2,
-    terminate/2,
-    handle_db_updater_call/2,
-    handle_db_updater_info/2,
-
-    incref/1,
-    decref/1,
-    monitored_by/1,
-
-    last_activity/1,
-
-    get_compacted_seq/1,
-    get_del_doc_count/1,
-    get_disk_version/1,
-    get_doc_count/1,
-    get_epochs/1,
-    get_purge_seq/1,
-    get_oldest_purge_seq/1,
-    get_purge_infos_limit/1,
-    get_revs_limit/1,
-    get_security/1,
-    get_props/1,
-    get_size_info/1,
-    get_partition_info/2,
-    get_update_seq/1,
-    get_uuid/1,
-
-    set_revs_limit/2,
-    set_purge_infos_limit/2,
-    set_security/2,
-    set_props/2,
-
-    set_update_seq/2,
-
-    open_docs/2,
-    open_local_docs/2,
-    read_doc_body/2,
-    load_purge_infos/2,
-
-    serialize_doc/2,
-    write_doc_body/2,
-    write_doc_infos/3,
-    purge_docs/3,
-    copy_purge_infos/2,
-
-    commit_data/1,
-
-    open_write_stream/2,
-    open_read_stream/2,
-    is_active_stream/2,
-
-    fold_docs/4,
-    fold_local_docs/4,
-    fold_changes/5,
-    fold_purge_infos/5,
-    count_changes_since/2,
-
-    start_compaction/4,
-    finish_compaction/4
-]).
-
-
--export([
-    init_state/4
-]).
-
-
--export([
-    id_tree_split/1,
-    id_tree_join/2,
-    id_tree_reduce/2,
-
-    seq_tree_split/1,
-    seq_tree_join/2,
-    seq_tree_reduce/2,
-
-    local_tree_split/1,
-    local_tree_join/2,
-
-    purge_tree_split/1,
-    purge_tree_join/2,
-    purge_tree_reduce/2,
-    purge_seq_tree_split/1,
-    purge_seq_tree_join/2
-]).
-
-
-% Used by the compactor
--export([
-    update_header/2,
-    copy_security/2,
-    copy_props/2
-]).
-
-
--include_lib("kernel/include/file.hrl").
--include_lib("couch/include/couch_db.hrl").
--include("couch_bt_engine.hrl").
-
-
-exists(FilePath) ->
-    case is_file(FilePath) of
-        true ->
-            true;
-        false ->
-            is_file(FilePath ++ ".compact")
-    end.
-
-
-delete(RootDir, FilePath, Async) ->
-    %% Delete any leftover compaction files. If we don't do this a
-    %% subsequent request for this DB will try to open them to use
-    %% as a recovery.
-    delete_compaction_files(RootDir, FilePath, [{context, compaction}]),
-
-    % Delete the actual database file
-    couch_file:delete(RootDir, FilePath, Async).
-
-
-delete_compaction_files(RootDir, FilePath, DelOpts) ->
-    lists:foreach(fun(Ext) ->
-        couch_file:delete(RootDir, FilePath ++ Ext, DelOpts)
-    end, [".compact", ".compact.data", ".compact.meta"]).
-
-
-init(FilePath, Options) ->
-    {ok, Fd} = open_db_file(FilePath, Options),
-    Header = case lists:member(create, Options) of
-        true ->
-            delete_compaction_files(FilePath),
-            Header0 = couch_bt_engine_header:new(),
-            Header1 = init_set_props(Fd, Header0, Options),
-            ok = couch_file:write_header(Fd, Header1),
-            Header1;
-        false ->
-            case couch_file:read_header(Fd) of
-                {ok, Header0} ->
-                    Header0;
-                no_valid_header ->
-                    delete_compaction_files(FilePath),
-                    Header0 =  couch_bt_engine_header:new(),
-                    ok = couch_file:write_header(Fd, Header0),
-                    Header0
-            end
-    end,
-    {ok, init_state(FilePath, Fd, Header, Options)}.
-
-
-terminate(_Reason, St) ->
-    % If the reason we died is because our fd disappeared
-    % then we don't need to try closing it again.
-    Ref = St#st.fd_monitor,
-    if Ref == closed -> ok; true ->
-        ok = couch_file:close(St#st.fd),
-        receive
-            {'DOWN', Ref, _,  _, _} ->
-                ok
-            after 500 ->
-                ok
-        end
-    end,
-    couch_util:shutdown_sync(St#st.fd),
-    ok.
-
-
-handle_db_updater_call(Msg, St) ->
-    {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
-
-
-handle_db_updater_info({'DOWN', Ref, _, _, _}, #st{fd_monitor=Ref} = St) ->
-    {stop, normal, St#st{fd=undefined, fd_monitor=closed}}.
-
-
-incref(St) ->
-    {ok, St#st{fd_monitor = erlang:monitor(process, St#st.fd)}}.
-
-
-decref(St) ->
-    true = erlang:demonitor(St#st.fd_monitor, [flush]),
-    ok.
-
-
-monitored_by(St) ->
-    case erlang:process_info(St#st.fd, monitored_by) of
-        {monitored_by, Pids} ->
-            lists:filter(fun is_pid/1, Pids);
-        _ ->
-            []
-    end.
-
-
-last_activity(#st{fd = Fd}) ->
-    couch_file:last_read(Fd).
-
-
-get_compacted_seq(#st{header = Header}) ->
-    couch_bt_engine_header:get(Header, compacted_seq).
-
-
-get_del_doc_count(#st{} = St) ->
-    {ok, Reds} = couch_btree:full_reduce(St#st.id_tree),
-    element(2, Reds).
-
-
-get_disk_version(#st{header = Header}) ->
-    couch_bt_engine_header:get(Header, disk_version).
-
-
-get_doc_count(#st{} = St) ->
-    {ok, Reds} = couch_btree:full_reduce(St#st.id_tree),
-    element(1, Reds).
-
-
-get_epochs(#st{header = Header}) ->
-    couch_bt_engine_header:get(Header, epochs).
-
-
-get_purge_seq(#st{purge_seq_tree = PurgeSeqTree}) ->
-    Fun = fun({PurgeSeq, _, _, _}, _Reds, _Acc) ->
-        {stop, PurgeSeq}
-    end,
-    {ok, _, PurgeSeq} = couch_btree:fold(PurgeSeqTree, Fun, 0, [{dir, rev}]),
-    PurgeSeq.
-
-
-get_oldest_purge_seq(#st{purge_seq_tree = PurgeSeqTree}) ->
-    Fun = fun({PurgeSeq, _, _, _}, _Reds, _Acc) ->
-        {stop, PurgeSeq}
-    end,
-    {ok, _, PurgeSeq} = couch_btree:fold(PurgeSeqTree, Fun, 0, []),
-    PurgeSeq.
-
-
-get_purge_infos_limit(#st{header = Header}) ->
-    couch_bt_engine_header:get(Header, purge_infos_limit).
-
-
-get_revs_limit(#st{header = Header}) ->
-    couch_bt_engine_header:get(Header, revs_limit).
-
-
-get_size_info(#st{} = St) ->
-    {ok, FileSize} = couch_file:bytes(St#st.fd),
-    {ok, DbReduction} = couch_btree:full_reduce(St#st.id_tree),
-    SizeInfo0 = element(3, DbReduction),
-    SizeInfo = case SizeInfo0 of
-        SI when is_record(SI, size_info) ->
-            SI;
-        {AS, ES} ->
-            #size_info{active=AS, external=ES};
-        AS ->
-            #size_info{active=AS}
-    end,
-    ActiveSize = active_size(St, SizeInfo),
-    ExternalSize = SizeInfo#size_info.external,
-    [
-        {active, ActiveSize},
-        {external, ExternalSize},
-        {file, FileSize}
-    ].
-
-
-partition_size_cb(traverse, Key, {DC, DDC, Sizes}, {Partition, DCAcc, DDCAcc, SizesAcc}) ->
-    case couch_partition:is_member(Key, Partition) of
-        true ->
-            {skip, {Partition, DC + DCAcc, DDC + DDCAcc, reduce_sizes(Sizes, SizesAcc)}};
-        false ->
-            {ok, {Partition, DCAcc, DDCAcc, SizesAcc}}
-    end;
-
-partition_size_cb(visit, FDI, _PrevReds, {Partition, DCAcc, DDCAcc, Acc}) ->
-    InPartition = couch_partition:is_member(FDI#full_doc_info.id, Partition),
-    Deleted = FDI#full_doc_info.deleted,
-    case {InPartition, Deleted} of
-        {true, true} ->
-            {ok, {Partition, DCAcc, DDCAcc + 1,
-                reduce_sizes(FDI#full_doc_info.sizes, Acc)}};
-        {true, false} ->
-            {ok, {Partition, DCAcc + 1, DDCAcc,
-                reduce_sizes(FDI#full_doc_info.sizes, Acc)}};
-        {false, _} ->
-            {ok, {Partition, DCAcc, DDCAcc, Acc}}
-    end.
-
-
-get_partition_info(#st{} = St, Partition) ->
-    StartKey = couch_partition:start_key(Partition),
-    EndKey = couch_partition:end_key(Partition),
-    Fun = fun partition_size_cb/4,
-    InitAcc = {Partition, 0, 0, #size_info{}},
-    Options = [{start_key, StartKey}, {end_key, EndKey}],
-    {ok, _, OutAcc} = couch_btree:fold(St#st.id_tree, Fun, InitAcc, Options),
-    {Partition, DocCount, DocDelCount, SizeInfo} = OutAcc,
-    [
-        {partition, Partition},
-        {doc_count, DocCount},
-        {doc_del_count, DocDelCount},
-        {sizes, [
-            {active, SizeInfo#size_info.active},
-            {external, SizeInfo#size_info.external}
-        ]}
-    ].
-
-
-get_security(#st{header = Header} = St) ->
-    case couch_bt_engine_header:get(Header, security_ptr) of
-        undefined ->
-            [];
-        Pointer ->
-            {ok, SecProps} = couch_file:pread_term(St#st.fd, Pointer),
-            SecProps
-    end.
-
-
-get_props(#st{header = Header} = St) ->
-    case couch_bt_engine_header:get(Header, props_ptr) of
-        undefined ->
-            [];
-        Pointer ->
-            {ok, Props} = couch_file:pread_term(St#st.fd, Pointer),
-            Props
-    end.
-
-
-get_update_seq(#st{header = Header}) ->
-    couch_bt_engine_header:get(Header, update_seq).
-
-
-get_uuid(#st{header = Header}) ->
-    couch_bt_engine_header:get(Header, uuid).
-
-
-set_revs_limit(#st{header = Header} = St, RevsLimit) ->
-    NewSt = St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {revs_limit, RevsLimit}
-        ]),
-        needs_commit = true
-    },
-    {ok, increment_update_seq(NewSt)}.
-
-
-set_purge_infos_limit(#st{header = Header} = St, PurgeInfosLimit) ->
-    NewSt = St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {purge_infos_limit, PurgeInfosLimit}
-        ]),
-        needs_commit = true
-    },
-    {ok, increment_update_seq(NewSt)}.
-
-
-set_security(#st{header = Header} = St, NewSecurity) ->
-    Options = [{compression, St#st.compression}],
-    {ok, Ptr, _} = couch_file:append_term(St#st.fd, NewSecurity, Options),
-    NewSt = St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {security_ptr, Ptr}
-        ]),
-        needs_commit = true
-    },
-    {ok, increment_update_seq(NewSt)}.
-
-
-set_props(#st{header = Header} = St, Props) ->
-    Options = [{compression, St#st.compression}],
-    {ok, Ptr, _} = couch_file:append_term(St#st.fd, Props, Options),
-    NewSt = St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {props_ptr, Ptr}
-        ]),
-        needs_commit = true
-    },
-    {ok, increment_update_seq(NewSt)}.
-
-
-open_docs(#st{} = St, DocIds) ->
-    Results = couch_btree:lookup(St#st.id_tree, DocIds),
-    lists:map(fun
-        ({ok, FDI}) -> FDI;
-        (not_found) -> not_found
-    end, Results).
-
-
-open_local_docs(#st{} = St, DocIds) ->
-    Results = couch_btree:lookup(St#st.local_tree, DocIds),
-    lists:map(fun
-        ({ok, Doc}) -> Doc;
-        (not_found) -> not_found
-    end, Results).
-
-
-read_doc_body(#st{} = St, #doc{} = Doc) ->
-    {ok, {Body, Atts}} = couch_file:pread_term(St#st.fd, Doc#doc.body),
-    Doc#doc{
-        body = Body,
-        atts = Atts
-    }.
-
-
-load_purge_infos(St, UUIDs) ->
-    Results = couch_btree:lookup(St#st.purge_tree, UUIDs),
-    lists:map(fun
-        ({ok, Info}) -> Info;
-        (not_found) -> not_found
-    end, Results).
-
-
-serialize_doc(#st{} = St, #doc{} = Doc) ->
-    Compress = fun(Term) ->
-        case couch_compress:is_compressed(Term, St#st.compression) of
-            true -> Term;
-            false -> couch_compress:compress(Term, St#st.compression)
-        end
-    end,
-    Body = Compress(Doc#doc.body),
-    Atts = Compress(Doc#doc.atts),
-    SummaryBin = ?term_to_bin({Body, Atts}),
-    Md5 = couch_hash:md5_hash(SummaryBin),
-    Data = couch_file:assemble_file_chunk(SummaryBin, Md5),
-    % TODO: This is a terrible hack to get around the issues
-    %       in COUCHDB-3255. We'll need to come back and figure
-    %       out a better approach to handling the case when we
-    %       need to generate a new revision id after the doc
-    %       has been serialized.
-    Doc#doc{
-        body = Data,
-        meta = [{comp_body, Body} | Doc#doc.meta]
-    }.
-
-
-write_doc_body(St, #doc{} = Doc) ->
-    #st{
-        fd = Fd
-    } = St,
-    {ok, Ptr, Written} = couch_file:append_raw_chunk(Fd, Doc#doc.body),
-    {ok, Doc#doc{body = Ptr}, Written}.
-
-
-write_doc_infos(#st{} = St, Pairs, LocalDocs) ->
-    #st{
-        id_tree = IdTree,
-        seq_tree = SeqTree,
-        local_tree = LocalTree
-    } = St,
-    FinalAcc = lists:foldl(fun({OldFDI, NewFDI}, Acc) ->
-        {AddAcc, RemIdsAcc, RemSeqsAcc} = Acc,
-        case {OldFDI, NewFDI} of
-            {not_found, #full_doc_info{}} ->
-                {[NewFDI | AddAcc], RemIdsAcc, RemSeqsAcc};
-            {#full_doc_info{id = Id}, #full_doc_info{id = Id}} ->
-                NewAddAcc = [NewFDI | AddAcc],
-                NewRemSeqsAcc = [OldFDI#full_doc_info.update_seq | RemSeqsAcc],
-                {NewAddAcc, RemIdsAcc, NewRemSeqsAcc};
-            {#full_doc_info{id = Id}, not_found} ->
-                NewRemIdsAcc = [Id | RemIdsAcc],
-                NewRemSeqsAcc = [OldFDI#full_doc_info.update_seq | RemSeqsAcc],
-                {AddAcc, NewRemIdsAcc, NewRemSeqsAcc}
-        end
-    end, {[], [], []}, Pairs),
-
-    {Add, RemIds, RemSeqs} = FinalAcc,
-    {ok, IdTree2} = couch_btree:add_remove(IdTree, Add, RemIds),
-    {ok, SeqTree2} = couch_btree:add_remove(SeqTree, Add, RemSeqs),
-
-    {AddLDocs, RemLDocIds} = lists:foldl(fun(Doc, {AddAcc, RemAcc}) ->
-        case Doc#doc.deleted of
-            true ->
-                {AddAcc, [Doc#doc.id | RemAcc]};
-            false ->
-                {[Doc | AddAcc], RemAcc}
-        end
-    end, {[], []}, LocalDocs),
-    {ok, LocalTree2} = couch_btree:add_remove(LocalTree, AddLDocs, RemLDocIds),
-
-    NewUpdateSeq = lists:foldl(fun(#full_doc_info{update_seq=Seq}, Acc) ->
-        erlang:max(Seq, Acc)
-    end, get_update_seq(St), Add),
-
-    NewHeader = couch_bt_engine_header:set(St#st.header, [
-        {update_seq, NewUpdateSeq}
-    ]),
-
-    {ok, St#st{
-        header = NewHeader,
-        id_tree = IdTree2,
-        seq_tree = SeqTree2,
-        local_tree = LocalTree2,
-        needs_commit = true
-    }}.
-
-
-purge_docs(#st{} = St, Pairs, PurgeInfos) ->
-    #st{
-        id_tree = IdTree,
-        seq_tree = SeqTree,
-        purge_tree = PurgeTree,
-        purge_seq_tree = PurgeSeqTree
-    } = St,
-
-    RemDocIds = [Old#full_doc_info.id || {Old, not_found} <- Pairs],
-    RemSeqs = [Old#full_doc_info.update_seq || {Old, _} <- Pairs],
-    DocsToAdd = [New || {_, New} <- Pairs, New /= not_found],
-    CurrSeq = couch_bt_engine_header:get(St#st.header, update_seq),
-    Seqs = [FDI#full_doc_info.update_seq || FDI <- DocsToAdd],
-    NewSeq = lists:max([CurrSeq | Seqs]),
-
-    % We bump NewUpdateSeq because we have to ensure that
-    % indexers see that they need to process the new purge
-    % information.
-    UpdateSeq = case NewSeq == CurrSeq of
-        true -> CurrSeq + 1;
-        false -> NewSeq
-    end,
-    Header = couch_bt_engine_header:set(St#st.header, [
-        {update_seq, UpdateSeq}
-    ]),
-
-    {ok, IdTree2} = couch_btree:add_remove(IdTree, DocsToAdd, RemDocIds),
-    {ok, SeqTree2} = couch_btree:add_remove(SeqTree, DocsToAdd, RemSeqs),
-    {ok, PurgeTree2} = couch_btree:add(PurgeTree, PurgeInfos),
-    {ok, PurgeSeqTree2} = couch_btree:add(PurgeSeqTree, PurgeInfos),
-    {ok, St#st{
-        header = Header,
-        id_tree = IdTree2,
-        seq_tree = SeqTree2,
-        purge_tree = PurgeTree2,
-        purge_seq_tree = PurgeSeqTree2,
-        needs_commit = true
-    }}.
-
-
-copy_purge_infos(#st{} = St, PurgeInfos) ->
-    #st{
-        purge_tree = PurgeTree,
-        purge_seq_tree = PurgeSeqTree
-    } = St,
-    {ok, PurgeTree2} = couch_btree:add(PurgeTree, PurgeInfos),
-    {ok, PurgeSeqTree2} = couch_btree:add(PurgeSeqTree, PurgeInfos),
-    {ok, St#st{
-       purge_tree = PurgeTree2,
-       purge_seq_tree = PurgeSeqTree2,
-       needs_commit = true
-    }}.
-
-
-commit_data(St) ->
-    #st{
-        fd = Fd,
-        header = OldHeader,
-        needs_commit = NeedsCommit
-    } = St,
-
-    NewHeader = update_header(St, OldHeader),
-
-    case NewHeader /= OldHeader orelse NeedsCommit of
-        true ->
-            couch_file:sync(Fd),
-            ok = couch_file:write_header(Fd, NewHeader),
-            couch_file:sync(Fd),
-            {ok, St#st{
-                header = NewHeader,
-                needs_commit = false
-            }};
-        false ->
-            {ok, St}
-    end.
-
-
-open_write_stream(#st{} = St, Options) ->
-    couch_stream:open({couch_bt_engine_stream, {St#st.fd, []}}, Options).
-
-
-open_read_stream(#st{} = St, StreamSt) ->
-    {ok, {couch_bt_engine_stream, {St#st.fd, StreamSt}}}.
-
-
-is_active_stream(#st{} = St, {couch_bt_engine_stream, {Fd, _}}) ->
-    St#st.fd == Fd;
-is_active_stream(_, _) ->
-    false.
-
-
-fold_docs(St, UserFun, UserAcc, Options) ->
-    fold_docs_int(St, St#st.id_tree, UserFun, UserAcc, Options).
-
-
-fold_local_docs(St, UserFun, UserAcc, Options) ->
-    case fold_docs_int(St, St#st.local_tree, UserFun, UserAcc, Options) of
-        {ok, _Reds, FinalAcc} -> {ok, null, FinalAcc};
-        {ok, FinalAcc} -> {ok, FinalAcc}
-    end.
-
-
-fold_changes(St, SinceSeq, UserFun, UserAcc, Options) ->
-    Fun = fun drop_reductions/4,
-    InAcc = {UserFun, UserAcc},
-    Opts = [{start_key, SinceSeq + 1}] ++ Options,
-    {ok, _, OutAcc} = couch_btree:fold(St#st.seq_tree, Fun, InAcc, Opts),
-    {_, FinalUserAcc} = OutAcc,
-    {ok, FinalUserAcc}.
-
-
-fold_purge_infos(St, StartSeq0, UserFun, UserAcc, Options) ->
-    PurgeSeqTree = St#st.purge_seq_tree,
-    StartSeq = StartSeq0 + 1,
-    MinSeq = get_oldest_purge_seq(St),
-    if MinSeq =< StartSeq -> ok; true ->
-        erlang:error({invalid_start_purge_seq, StartSeq0})
-    end,
-    Wrapper = fun(Info, _Reds, UAcc) ->
-        UserFun(Info, UAcc)
-    end,
-    Opts = [{start_key, StartSeq}] ++ Options,
-    {ok, _, OutAcc} = couch_btree:fold(PurgeSeqTree, Wrapper, UserAcc, Opts),
-    {ok, OutAcc}.
-
-
-count_changes_since(St, SinceSeq) ->
-    BTree = St#st.seq_tree,
-    FoldFun = fun(_SeqStart, PartialReds, 0) ->
-        {ok, couch_btree:final_reduce(BTree, PartialReds)}
-    end,
-    Opts = [{start_key, SinceSeq + 1}],
-    {ok, Changes} = couch_btree:fold_reduce(BTree, FoldFun, 0, Opts),
-    Changes.
-
-
-start_compaction(St, DbName, Options, Parent) ->
-    Args = [St, DbName, Options, Parent],
-    Pid = spawn_link(couch_bt_engine_compactor, start, Args),
-    {ok, St, Pid}.
-
-
-finish_compaction(OldState, DbName, Options, CompactFilePath) ->
-    {ok, NewState1} = ?MODULE:init(CompactFilePath, Options),
-    OldSeq = get_update_seq(OldState),
-    NewSeq = get_update_seq(NewState1),
-    case OldSeq == NewSeq of
-        true ->
-            finish_compaction_int(OldState, NewState1);
-        false ->
-            couch_log:info("Compaction file still behind main file "
-                           "(update seq=~p. compact update seq=~p). Retrying.",
-                           [OldSeq, NewSeq]),
-            ok = decref(NewState1),
-            start_compaction(OldState, DbName, Options, self())
-    end.
-
-
-id_tree_split(#full_doc_info{}=Info) ->
-    #full_doc_info{
-        id = Id,
-        update_seq = Seq,
-        deleted = Deleted,
-        sizes = SizeInfo,
-        rev_tree = Tree
-    } = Info,
-    {Id, {Seq, ?b2i(Deleted), split_sizes(SizeInfo), disk_tree(Tree)}}.
-
-
-id_tree_join(Id, {HighSeq, Deleted, DiskTree}) ->
-    % Handle old formats before data_size was added
-    id_tree_join(Id, {HighSeq, Deleted, #size_info{}, DiskTree});
-
-id_tree_join(Id, {HighSeq, Deleted, Sizes, DiskTree}) ->
-    #full_doc_info{
-        id = Id,
-        update_seq = HighSeq,
-        deleted = ?i2b(Deleted),
-        sizes = couch_db_updater:upgrade_sizes(Sizes),
-        rev_tree = rev_tree(DiskTree)
-    }.
-
-
-id_tree_reduce(reduce, FullDocInfos) ->
-    lists:foldl(fun(Info, {NotDeleted, Deleted, Sizes}) ->
-        Sizes2 = reduce_sizes(Sizes, Info#full_doc_info.sizes),
-        case Info#full_doc_info.deleted of
-        true ->
-            {NotDeleted, Deleted + 1, Sizes2};
-        false ->
-            {NotDeleted + 1, Deleted, Sizes2}
-        end
-    end, {0, 0, #size_info{}}, FullDocInfos);
-id_tree_reduce(rereduce, Reds) ->
-    lists:foldl(fun
-        ({NotDeleted, Deleted}, {AccNotDeleted, AccDeleted, _AccSizes}) ->
-            % pre 1.2 format, will be upgraded on compaction
-            {AccNotDeleted + NotDeleted, AccDeleted + Deleted, nil};
-        ({NotDeleted, Deleted, Sizes}, {AccNotDeleted, AccDeleted, AccSizes}) ->
-            AccSizes2 = reduce_sizes(AccSizes, Sizes),
-            {AccNotDeleted + NotDeleted, AccDeleted + Deleted, AccSizes2}
-    end, {0, 0, #size_info{}}, Reds).
-
-
-seq_tree_split(#full_doc_info{}=Info) ->
-    #full_doc_info{
-        id = Id,
-        update_seq = Seq,
-        deleted = Del,
-        sizes = SizeInfo,
-        rev_tree = Tree
-    } = Info,
-    {Seq, {Id, ?b2i(Del), split_sizes(SizeInfo), disk_tree(Tree)}}.
-
-
-seq_tree_join(Seq, {Id, Del, DiskTree}) when is_integer(Del) ->
-    seq_tree_join(Seq, {Id, Del, {0, 0}, DiskTree});
-
-seq_tree_join(Seq, {Id, Del, Sizes, DiskTree}) when is_integer(Del) ->
-    #full_doc_info{
-        id = Id,
-        update_seq = Seq,
-        deleted = ?i2b(Del),
-        sizes = join_sizes(Sizes),
-        rev_tree = rev_tree(DiskTree)
-    };
-
-seq_tree_join(KeySeq, {Id, RevInfos, DeletedRevInfos}) ->
-    % Older versions stored #doc_info records in the seq_tree.
-    % Compact to upgrade.
-    Revs = lists:map(fun({Rev, Seq, Bp}) ->
-        #rev_info{rev = Rev, seq = Seq, deleted = false, body_sp = Bp}
-    end, RevInfos),
-    DeletedRevs = lists:map(fun({Rev, Seq, Bp}) ->
-        #rev_info{rev = Rev, seq = Seq, deleted = true, body_sp = Bp}
-    end, DeletedRevInfos),
-    #doc_info{
-        id = Id,
-        high_seq = KeySeq,
-        revs = Revs ++ DeletedRevs
-    }.
-
-
-seq_tree_reduce(reduce, DocInfos) ->
-    % count the number of documents
-    length(DocInfos);
-seq_tree_reduce(rereduce, Reds) ->
-    lists:sum(Reds).
-
-
-local_tree_split(#doc{revs = {0, [Rev]}} = Doc) when is_binary(Rev) ->
-    #doc{
-        id = Id,
-        body = BodyData
-    } = Doc,
-    {Id, {binary_to_integer(Rev), BodyData}};
-
-local_tree_split(#doc{revs = {0, [Rev]}} = Doc) when is_integer(Rev) ->
-    #doc{
-        id = Id,
-        body = BodyData
-    } = Doc,
-    {Id, {Rev, BodyData}}.
-
-
-local_tree_join(Id, {Rev, BodyData}) when is_binary(Rev) ->
-    #doc{
-        id = Id,
-        revs = {0, [Rev]},
-        body = BodyData
-    };
-
-local_tree_join(Id, {Rev, BodyData}) when is_integer(Rev) ->
-    #doc{
-        id = Id,
-        revs = {0, [integer_to_binary(Rev)]},
-        body = BodyData
-    }.
-
-
-purge_tree_split({PurgeSeq, UUID, DocId, Revs}) ->
-    {UUID, {PurgeSeq, DocId, Revs}}.
-
-
-purge_tree_join(UUID, {PurgeSeq, DocId, Revs}) ->
-    {PurgeSeq, UUID, DocId, Revs}.
-
-
-purge_seq_tree_split({PurgeSeq, UUID, DocId, Revs}) ->
-    {PurgeSeq, {UUID, DocId, Revs}}.
-
-
-purge_seq_tree_join(PurgeSeq, {UUID, DocId, Revs}) ->
-    {PurgeSeq, UUID, DocId, Revs}.
-
-
-purge_tree_reduce(reduce, IdRevs) ->
-    % count the number of purge requests
-    length(IdRevs);
-purge_tree_reduce(rereduce, Reds) ->
-    lists:sum(Reds).
-
-
-set_update_seq(#st{header = Header} = St, UpdateSeq) ->
-    {ok, St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {update_seq, UpdateSeq}
-        ]),
-        needs_commit = true
-    }}.
-
-
-copy_security(#st{header = Header} = St, SecProps) ->
-    Options = [{compression, St#st.compression}],
-    {ok, Ptr, _} = couch_file:append_term(St#st.fd, SecProps, Options),
-    {ok, St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {security_ptr, Ptr}
-        ]),
-        needs_commit = true
-    }}.
-
-
-copy_props(#st{header = Header} = St, Props) ->
-    Options = [{compression, St#st.compression}],
-    {ok, Ptr, _} = couch_file:append_term(St#st.fd, Props, Options),
-    {ok, St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {props_ptr, Ptr}
-        ]),
-        needs_commit = true
-    }}.
-
-
-open_db_file(FilePath, Options) ->
-    case couch_file:open(FilePath, Options) of
-        {ok, Fd} ->
-            {ok, Fd};
-        {error, enoent} ->
-            % Couldn't find file. is there a compact version? This ca
-            % happen (rarely) if we crashed during the file switch.
-            case couch_file:open(FilePath ++ ".compact", [nologifmissing]) of
-                {ok, Fd} ->
-                    Fmt = "Recovering from compaction file: ~s~s",
-                    couch_log:info(Fmt, [FilePath, ".compact"]),
-                    ok = file:rename(FilePath ++ ".compact", FilePath),
-                    ok = couch_file:sync(Fd),
-                    {ok, Fd};
-                {error, enoent} ->
-                    throw({not_found, no_db_file})
-            end;
-        Error ->
-            throw(Error)
-    end.
-
-
-init_state(FilePath, Fd, Header0, Options) ->
-    ok = couch_file:sync(Fd),
-
-    Compression = couch_compress:get_compression_method(),
-
-    Header1 = couch_bt_engine_header:upgrade(Header0),
-    Header2 = set_default_security_object(Fd, Header1, Compression, Options),
-    Header = upgrade_purge_info(Fd, Header2),
-
-    IdTreeState = couch_bt_engine_header:id_tree_state(Header),
-    {ok, IdTree} = couch_btree:open(IdTreeState, Fd, [
-            {split, fun ?MODULE:id_tree_split/1},
-            {join, fun ?MODULE:id_tree_join/2},
-            {reduce, fun ?MODULE:id_tree_reduce/2},
-            {compression, Compression}
-        ]),
-
-    SeqTreeState = couch_bt_engine_header:seq_tree_state(Header),
-    {ok, SeqTree} = couch_btree:open(SeqTreeState, Fd, [
-            {split, fun ?MODULE:seq_tree_split/1},
-            {join, fun ?MODULE:seq_tree_join/2},
-            {reduce, fun ?MODULE:seq_tree_reduce/2},
-            {compression, Compression}
-        ]),
-
-    LocalTreeState = couch_bt_engine_header:local_tree_state(Header),
-    {ok, LocalTree} = couch_btree:open(LocalTreeState, Fd, [
-            {split, fun ?MODULE:local_tree_split/1},
-            {join, fun ?MODULE:local_tree_join/2},
-            {compression, Compression}
-        ]),
-
-    PurgeTreeState = couch_bt_engine_header:purge_tree_state(Header),
-    {ok, PurgeTree} = couch_btree:open(PurgeTreeState, Fd, [
-        {split, fun ?MODULE:purge_tree_split/1},
-        {join, fun ?MODULE:purge_tree_join/2},
-        {reduce, fun ?MODULE:purge_tree_reduce/2}
-    ]),
-
-    PurgeSeqTreeState = couch_bt_engine_header:purge_seq_tree_state(Header),
-    {ok, PurgeSeqTree} = couch_btree:open(PurgeSeqTreeState, Fd, [
-        {split, fun ?MODULE:purge_seq_tree_split/1},
-        {join, fun ?MODULE:purge_seq_tree_join/2},
-        {reduce, fun ?MODULE:purge_tree_reduce/2}
-    ]),
-
-    ok = couch_file:set_db_pid(Fd, self()),
-
-    St = #st{
-        filepath = FilePath,
-        fd = Fd,
-        fd_monitor = erlang:monitor(process, Fd),
-        header = Header,
-        needs_commit = false,
-        id_tree = IdTree,
-        seq_tree = SeqTree,
-        local_tree = LocalTree,
-        compression = Compression,
-        purge_tree = PurgeTree,
-        purge_seq_tree = PurgeSeqTree
-    },
-
-    % If this is a new database we've just created a
-    % new UUID and default security object which need
-    % to be written to disk.
-    case Header /= Header0 of
-        true ->
-            {ok, NewSt} = commit_data(St#st{needs_commit = true}),
-            NewSt;
-        false ->
-            St
-    end.
-
-
-update_header(St, Header) ->
-    couch_bt_engine_header:set(Header, [
-        {seq_tree_state, couch_btree:get_state(St#st.seq_tree)},
-        {id_tree_state, couch_btree:get_state(St#st.id_tree)},
-        {local_tree_state, couch_btree:get_state(St#st.local_tree)},
-        {purge_tree_state, couch_btree:get_state(St#st.purge_tree)},
-        {purge_seq_tree_state, couch_btree:get_state(St#st.purge_seq_tree)}
-    ]).
-
-
-increment_update_seq(#st{header = Header} = St) ->
-    UpdateSeq = couch_bt_engine_header:get(Header, update_seq),
-    St#st{
-        header = couch_bt_engine_header:set(Header, [
-            {update_seq, UpdateSeq + 1}
-        ])
-    }.
-
-
-set_default_security_object(Fd, Header, Compression, Options) ->
-    case couch_bt_engine_header:get(Header, security_ptr) of
-        Pointer when is_integer(Pointer) ->
-            Header;
-        _ ->
-            Default = couch_util:get_value(default_security_object, Options),
-            AppendOpts = [{compression, Compression}],
-            {ok, Ptr, _} = couch_file:append_term(Fd, Default, AppendOpts),
-            couch_bt_engine_header:set(Header, security_ptr, Ptr)
-    end.
-
-
-% This function is here, and not in couch_bt_engine_header
-% because it requires modifying file contents
-upgrade_purge_info(Fd, Header) ->
-    case couch_bt_engine_header:get(Header, purge_tree_state) of
-        nil ->
-            Header;
-        Ptr when is_tuple(Ptr) ->
-            Header;
-        PurgeSeq when is_integer(PurgeSeq)->
-            % Pointer to old purged ids/revs is in purge_seq_tree_state
-            Ptr = couch_bt_engine_header:get(Header, purge_seq_tree_state),
-
-            case Ptr of
-                nil ->
-                    PTS = couch_bt_engine_header:purge_tree_state(Header),
-                    PurgeTreeSt = case PTS of 0 -> nil; Else -> Else end,
-                    couch_bt_engine_header:set(Header, [
-                        {purge_tree_state, PurgeTreeSt}
-                    ]);
-                _ ->
-                    {ok, PurgedIdsRevs} = couch_file:pread_term(Fd, Ptr),
-
-                    {Infos, _} = lists:foldl(fun({Id, Revs}, {InfoAcc, PSeq}) ->
-                        Info = {PSeq, couch_uuids:random(), Id, Revs},
-                        {[Info | InfoAcc], PSeq + 1}
-                    end, {[], PurgeSeq}, PurgedIdsRevs),
-
-                    {ok, PurgeTree} = couch_btree:open(nil, Fd, [
-                        {split, fun ?MODULE:purge_tree_split/1},
-                        {join, fun ?MODULE:purge_tree_join/2},
-                        {reduce, fun ?MODULE:purge_tree_reduce/2}
-                    ]),
-                    {ok, PurgeTree2} = couch_btree:add(PurgeTree, Infos),
-                    PurgeTreeSt = couch_btree:get_state(PurgeTree2),
-
-                    {ok, PurgeSeqTree} = couch_btree:open(nil, Fd, [
-                        {split, fun ?MODULE:purge_seq_tree_split/1},
-                        {join, fun ?MODULE:purge_seq_tree_join/2},
-                        {reduce, fun ?MODULE:purge_tree_reduce/2}
-                    ]),
-                    {ok, PurgeSeqTree2} = couch_btree:add(PurgeSeqTree, Infos),
-                    PurgeSeqTreeSt = couch_btree:get_state(PurgeSeqTree2),
-
-                    couch_bt_engine_header:set(Header, [
-                        {purge_tree_state, PurgeTreeSt},
-                        {purge_seq_tree_state, PurgeSeqTreeSt}
-                    ])
-            end
-    end.
-
-
-init_set_props(Fd, Header, Options) ->
-    case couch_util:get_value(props, Options) of
-        undefined ->
-            Header;
-        InitialProps ->
-            Compression = couch_compress:get_compression_method(),
-            AppendOpts = [{compression, Compression}],
-            {ok, Ptr, _} = couch_file:append_term(Fd, InitialProps, AppendOpts),
-            couch_bt_engine_header:set(Header, props_ptr, Ptr)
-    end.
-
-
-delete_compaction_files(FilePath) ->
-    RootDir = config:get("couchdb", "database_dir", "."),
-    DelOpts = [{context, compaction}],
-    delete_compaction_files(RootDir, FilePath, DelOpts).
-
-
-rev_tree(DiskTree) ->
-    couch_key_tree:map(fun
-        (_RevId, {Del, Ptr, Seq}) ->
-            #leaf{
-                deleted = ?i2b(Del),
-                ptr = Ptr,
-                seq = Seq
-            };
-        (_RevId, {Del, Ptr, Seq, Size}) ->
-            #leaf{
-                deleted = ?i2b(Del),
-                ptr = Ptr,
-                seq = Seq,
-                sizes = couch_db_updater:upgrade_sizes(Size)
-            };
-        (_RevId, {Del, Ptr, Seq, Sizes, Atts}) ->
-            #leaf{
-                deleted = ?i2b(Del),
-                ptr = Ptr,
-                seq = Seq,
-                sizes = couch_db_updater:upgrade_sizes(Sizes),
-                atts = Atts
-            };
-        (_RevId, ?REV_MISSING) ->
-            ?REV_MISSING
-    end, DiskTree).
-
-
-disk_tree(RevTree) ->
-    couch_key_tree:map(fun
-        (_RevId, ?REV_MISSING) ->
-            ?REV_MISSING;
-        (_RevId, #leaf{} = Leaf) ->
-            #leaf{
-                deleted = Del,
-                ptr = Ptr,
-                seq = Seq,
-                sizes = Sizes,
-                atts = Atts
-            } = Leaf,
-            {?b2i(Del), Ptr, Seq, split_sizes(Sizes), Atts}
-    end, RevTree).
-
-
-split_sizes(#size_info{}=SI) ->
-    {SI#size_info.active, SI#size_info.external}.
-
-
-join_sizes({Active, External}) when is_integer(Active), is_integer(External) ->
-    #size_info{active=Active, external=External}.
-
-
-reduce_sizes(nil, _) ->
-    nil;
-reduce_sizes(_, nil) ->
-    nil;
-reduce_sizes(#size_info{}=S1, #size_info{}=S2) ->
-    #size_info{
-        active = S1#size_info.active + S2#size_info.active,
-        external = S1#size_info.external + S2#size_info.external
-    };
-reduce_sizes(S1, S2) ->
-    US1 = couch_db_updater:upgrade_sizes(S1),
-    US2 = couch_db_updater:upgrade_sizes(S2),
-    reduce_sizes(US1, US2).
-
-
-active_size(#st{} = St, #size_info{} = SI) ->
-    Trees = [
-        St#st.id_tree,
-        St#st.seq_tree,
-        St#st.local_tree,
-        St#st.purge_tree,
-        St#st.purge_seq_tree
-    ],
-    lists:foldl(fun(T, Acc) ->
-        case couch_btree:size(T) of
-            _ when Acc == null ->
-                null;
-            nil ->
-                null;
-            Size ->
-                Acc + Size
-        end
-    end, SI#size_info.active, Trees).
-
-
-fold_docs_int(St, Tree, UserFun, UserAcc, Options) ->
-    Fun = case lists:member(include_deleted, Options) of
-        true -> fun include_deleted/4;
-        false -> fun skip_deleted/4
-    end,
-    RedFun = case lists:member(include_reductions, Options) of
-        true -> fun include_reductions/4;
-        false -> fun drop_reductions/4
-    end,
-    InAcc = {RedFun, {UserFun, UserAcc}},
-    {ok, Reds, OutAcc} = couch_btree:fold(Tree, Fun, InAcc, Options),
-    {_, {_, FinalUserAcc}} = OutAcc,
-    case lists:member(include_reductions, Options) of
-        true when Tree == St#st.id_tree ->
-            {ok, fold_docs_reduce_to_count(Reds), FinalUserAcc};
-        true when Tree == St#st.local_tree ->
-            {ok, 0, FinalUserAcc};
-        false ->
-            {ok, FinalUserAcc}
-    end.
-
-
-include_deleted(Case, Entry, Reds, {UserFun, UserAcc}) ->
-    {Go, NewUserAcc} = UserFun(Case, Entry, Reds, UserAcc),
-    {Go, {UserFun, NewUserAcc}}.
-
-
-% First element of the reductions is the total
-% number of undeleted documents.
-skip_deleted(traverse, _Entry, {0, _, _} = _Reds, Acc) ->
-    {skip, Acc};
-skip_deleted(visit, #full_doc_info{deleted = true}, _, Acc) ->
-    {ok, Acc};
-skip_deleted(Case, Entry, Reds, {UserFun, UserAcc}) ->
-    {Go, NewUserAcc} = UserFun(Case, Entry, Reds, UserAcc),
-    {Go, {UserFun, NewUserAcc}}.
-
-
-include_reductions(visit, FDI, Reds, {UserFun, UserAcc}) ->
-    {Go, NewUserAcc} = UserFun(FDI, Reds, UserAcc),
-    {Go, {UserFun, NewUserAcc}};
-include_reductions(_, _, _, Acc) ->
-    {ok, Acc}.
-
-
-drop_reductions(visit, FDI, _Reds, {UserFun, UserAcc}) ->
-    {Go, NewUserAcc} = UserFun(FDI, UserAcc),
-    {Go, {UserFun, NewUserAcc}};
-drop_reductions(_, _, _, Acc) ->
-    {ok, Acc}.
-
-
-fold_docs_reduce_to_count(Reds) ->
-    RedFun = fun id_tree_reduce/2,
-    FinalRed = couch_btree:final_reduce(RedFun, Reds),
-    element(1, FinalRed).
-
-
-finish_compaction_int(#st{} = OldSt, #st{} = NewSt1) ->
-    #st{
-        filepath = FilePath,
-        local_tree = OldLocal
-    } = OldSt,
-    #st{
-        filepath = CompactDataPath,
-        header = Header,
-        local_tree = NewLocal1
-    } = NewSt1,
-
-    % suck up all the local docs into memory and write them to the new db
-    LoadFun = fun(Value, _Offset, Acc) ->
-        {ok, [Value | Acc]}
-    end,
-    {ok, _, LocalDocs} = couch_btree:foldl(OldLocal, LoadFun, []),
-    {ok, NewLocal2} = couch_btree:add(NewLocal1, LocalDocs),
-
-    {ok, NewSt2} = commit_data(NewSt1#st{
-        header = couch_bt_engine_header:set(Header, [
-            {compacted_seq, get_update_seq(OldSt)},
-            {revs_limit, get_revs_limit(OldSt)},
-            {purge_infos_limit, get_purge_infos_limit(OldSt)}
-        ]),
-        local_tree = NewLocal2
-    }),
-
-    % Rename our *.compact.data file to *.compact so that if we
-    % die between deleting the old file and renaming *.compact
-    % we can recover correctly.
-    ok = file:rename(CompactDataPath, FilePath ++ ".compact"),
-
-    % Remove the uncompacted database file
-    RootDir = config:get("couchdb", "database_dir", "."),
-    couch_file:delete(RootDir, FilePath),
-
-    % Move our compacted file into its final location
-    ok = file:rename(FilePath ++ ".compact", FilePath),
-
-    % Delete the old meta compaction file after promoting
-    % the compaction file.
-    couch_file:delete(RootDir, FilePath ++ ".compact.meta"),
-
-    % We're finished with our old state
-    decref(OldSt),
-
-    % And return our finished new state
-    {ok, NewSt2#st{
-        filepath = FilePath
-    }, undefined}.
-
-
-is_file(Path) ->
-    case file:read_file_info(Path, [raw]) of
-        {ok, #file_info{type = regular}} -> true;
-        {ok, #file_info{type = directory}} -> true;
-        _ -> false
-    end.
diff --git a/src/couch/src/couch_bt_engine.hrl b/src/couch/src/couch_bt_engine.hrl
deleted file mode 100644
index e3c1d49..0000000
--- a/src/couch/src/couch_bt_engine.hrl
+++ /dev/null
@@ -1,27 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(st, {
-    filepath,
-    fd,
-    fd_monitor,
-    % deprecated but keeping it here to avoid altering the record size
-    fsync_options_deprecated,
-    header,
-    needs_commit,
-    id_tree,
-    seq_tree,
-    local_tree,
-    compression,
-    purge_tree,
-    purge_seq_tree
-}).
diff --git a/src/couch/src/couch_bt_engine_compactor.erl b/src/couch/src/couch_bt_engine_compactor.erl
deleted file mode 100644
index 0b3fb22..0000000
--- a/src/couch/src/couch_bt_engine_compactor.erl
+++ /dev/null
@@ -1,590 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_compactor).
-
-
--export([
-    start/4
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_bt_engine.hrl").
-
-
--record(comp_header, {
-    db_header,
-    meta_state
-}).
-
--record(merge_st, {
-    id_tree,
-    seq_tree,
-    curr,
-    rem_seqs,
-    infos
-}).
-
-
-start(#st{} = St, DbName, Options, Parent) ->
-    erlang:put(io_priority, {db_compact, DbName}),
-    #st{
-        filepath = FilePath,
-        header = Header
-    } = St,
-    couch_log:debug("Compaction process spawned for db \"~s\"", [DbName]),
-
-    couch_db_engine:trigger_on_compact(DbName),
-
-    {ok, NewSt, DName, DFd, MFd, Retry} =
-            open_compaction_files(Header, FilePath, Options),
-    erlang:monitor(process, MFd),
-
-    % This is a bit worrisome. init_db/4 will monitor the data fd
-    % but it doesn't know about the meta fd. For now I'll maintain
-    % that the data fd is the old normal fd and meta fd is special
-    % and hope everything works out for the best.
-    unlink(DFd),
-
-    NewSt1 = copy_purge_info(DbName, St, NewSt, Retry),
-    NewSt2 = copy_compact(DbName, St, NewSt1, Retry),
-    NewSt3 = sort_meta_data(NewSt2),
-    NewSt4 = commit_compaction_data(NewSt3),
-    NewSt5 = copy_meta_data(NewSt4),
-    {ok, NewSt6} = couch_bt_engine:commit_data(NewSt5),
-    ok = couch_bt_engine:decref(NewSt6),
-    ok = couch_file:close(MFd),
-
-    % Done
-    gen_server:cast(Parent, {compact_done, couch_bt_engine, DName}).
-
-
-open_compaction_files(SrcHdr, DbFilePath, Options) ->
-    DataFile = DbFilePath ++ ".compact.data",
-    MetaFile = DbFilePath ++ ".compact.meta",
-    {ok, DataFd, DataHdr} = open_compaction_file(DataFile),
-    {ok, MetaFd, MetaHdr} = open_compaction_file(MetaFile),
-    DataHdrIsDbHdr = couch_bt_engine_header:is_header(DataHdr),
-    case {DataHdr, MetaHdr} of
-        {#comp_header{}=A, #comp_header{}=A} ->
-            DbHeader = A#comp_header.db_header,
-            St0 = couch_bt_engine:init_state(
-                    DataFile, DataFd, DbHeader, Options),
-            St1 = bind_emsort(St0, MetaFd, A#comp_header.meta_state),
-            {ok, St1, DataFile, DataFd, MetaFd, St0#st.id_tree};
-        _ when DataHdrIsDbHdr ->
-            Header = couch_bt_engine_header:from(SrcHdr),
-            ok = reset_compaction_file(MetaFd, Header),
-            St0 = couch_bt_engine:init_state(
-                    DataFile, DataFd, DataHdr, Options),
-            St1 = bind_emsort(St0, MetaFd, nil),
-            {ok, St1, DataFile, DataFd, MetaFd, St0#st.id_tree};
-        _ ->
-            Header = couch_bt_engine_header:from(SrcHdr),
-            ok = reset_compaction_file(DataFd, Header),
-            ok = reset_compaction_file(MetaFd, Header),
-            St0 = couch_bt_engine:init_state(DataFile, DataFd, Header, Options),
-            St1 = bind_emsort(St0, MetaFd, nil),
-            {ok, St1, DataFile, DataFd, MetaFd, nil}
-    end.
-
-
-copy_purge_info(DbName, OldSt, NewSt, Retry) ->
-    MinPurgeSeq = couch_util:with_db(DbName, fun(Db) ->
-        couch_db:get_minimum_purge_seq(Db)
-    end),
-    OldPSTree = OldSt#st.purge_seq_tree,
-    StartSeq = couch_bt_engine:get_purge_seq(NewSt) + 1,
-    BufferSize = config:get_integer(
-            "database_compaction", "doc_buffer_size", 524288),
-    CheckpointAfter = config:get(
-            "database_compaction", "checkpoint_after", BufferSize * 10),
-
-    EnumFun = fun(Info, _Reds, {StAcc0, InfosAcc, InfosSize, CopiedSize}) ->
-        NewInfosSize = InfosSize + ?term_size(Info),
-        if NewInfosSize >= BufferSize ->
-            StAcc1 = copy_purge_infos(
-                    OldSt, StAcc0, [Info | InfosAcc], MinPurgeSeq, Retry),
-            NewCopiedSize = CopiedSize + NewInfosSize,
-            if NewCopiedSize >= CheckpointAfter ->
-                StAcc2 = commit_compaction_data(StAcc1),
-                {ok, {StAcc2, [], 0, 0}};
-            true ->
-                {ok, {StAcc1, [], 0, NewCopiedSize}}
-            end;
-        true ->
-            NewInfosAcc = [Info | InfosAcc],
-            {ok, {StAcc0, NewInfosAcc, NewInfosSize, CopiedSize}}
-        end
-    end,
-
-    InitAcc = {NewSt, [], 0, 0},
-    Opts = [{start_key, StartSeq}],
-    {ok, _, FinalAcc} = couch_btree:fold(OldPSTree, EnumFun, InitAcc, Opts),
-    {NewStAcc, Infos, _, _} = FinalAcc,
-    copy_purge_infos(OldSt, NewStAcc, Infos, MinPurgeSeq, Retry).
-
-
-copy_purge_infos(OldSt, NewSt0, Infos, MinPurgeSeq, Retry) ->
-    #st{
-        id_tree = OldIdTree
-    } = OldSt,
-
-    % Re-bind our id_tree to the backing btree
-    NewIdTreeState = couch_bt_engine_header:id_tree_state(NewSt0#st.header),
-    MetaFd = couch_emsort:get_fd(NewSt0#st.id_tree),
-    MetaState = couch_emsort:get_state(NewSt0#st.id_tree),
-    NewSt1 = bind_id_tree(NewSt0, NewSt0#st.fd, NewIdTreeState),
-
-    #st{
-        id_tree = NewIdTree0,
-        seq_tree = NewSeqTree0,
-        purge_tree = NewPurgeTree0,
-        purge_seq_tree = NewPurgeSeqTree0
-    } = NewSt1,
-
-    % Copy over the purge infos
-    InfosToAdd = lists:filter(fun({PSeq, _, _, _}) ->
-        PSeq > MinPurgeSeq
-    end, Infos),
-    {ok, NewPurgeTree1} = couch_btree:add(NewPurgeTree0, InfosToAdd),
-    {ok, NewPurgeSeqTree1} = couch_btree:add(NewPurgeSeqTree0, InfosToAdd),
-
-    NewSt2 = NewSt1#st{
-        purge_tree = NewPurgeTree1,
-        purge_seq_tree = NewPurgeSeqTree1
-    },
-
-    % If we're peforming a retry compaction we have to check if
-    % any of the referenced docs have been completely purged
-    % from the database. Any doc that has been completely purged
-    % must then be removed from our partially compacted database.
-    NewSt3 = if Retry == nil -> NewSt2; true ->
-        AllDocIds = [DocId || {_PurgeSeq, _UUID, DocId, _Revs} <- Infos],
-        UniqDocIds = lists:usort(AllDocIds),
-        OldIdResults = couch_btree:lookup(OldIdTree, UniqDocIds),
-        OldZipped = lists:zip(UniqDocIds, OldIdResults),
-
-        % The list of non-existant docs in the database being compacted
-        MaybeRemDocIds = [DocId || {DocId, not_found} <- OldZipped],
-
-        % Removing anything that exists in the partially compacted database
-        NewIdResults = couch_btree:lookup(NewIdTree0, MaybeRemDocIds),
-        ToRemove = [Doc || {ok, Doc} <- NewIdResults, Doc /= {ok, not_found}],
-
-        {RemIds, RemSeqs} = lists:unzip(lists:map(fun(FDI) ->
-            #full_doc_info{
-                id = Id,
-                update_seq = Seq
-            } = FDI,
-            {Id, Seq}
-        end, ToRemove)),
-
-        {ok, NewIdTree1} = couch_btree:add_remove(NewIdTree0, [], RemIds),
-        {ok, NewSeqTree1} = couch_btree:add_remove(NewSeqTree0, [], RemSeqs),
-
-        NewSt2#st{
-            id_tree = NewIdTree1,
-            seq_tree = NewSeqTree1
-        }
-    end,
-
-    Header = couch_bt_engine:update_header(NewSt3, NewSt3#st.header),
-    NewSt4 = NewSt3#st{
-        header = Header
-    },
-    bind_emsort(NewSt4, MetaFd, MetaState).
-
-
-copy_compact(DbName, St, NewSt0, Retry) ->
-    Compression = couch_compress:get_compression_method(),
-    NewSt = NewSt0#st{compression = Compression},
-    NewUpdateSeq = couch_bt_engine:get_update_seq(NewSt0),
-    TotalChanges = couch_bt_engine:count_changes_since(St, NewUpdateSeq),
-    BufferSize = list_to_integer(
-        config:get("database_compaction", "doc_buffer_size", "524288")),
-    CheckpointAfter = couch_util:to_integer(
-        config:get("database_compaction", "checkpoint_after",
-            BufferSize * 10)),
-
-    EnumBySeqFun =
-    fun(DocInfo, _Offset,
-            {AccNewSt, AccUncopied, AccUncopiedSize, AccCopiedSize}) ->
-
-        Seq = case DocInfo of
-            #full_doc_info{} -> DocInfo#full_doc_info.update_seq;
-            #doc_info{} -> DocInfo#doc_info.high_seq
-        end,
-
-        AccUncopiedSize2 = AccUncopiedSize + ?term_size(DocInfo),
-        if AccUncopiedSize2 >= BufferSize ->
-            NewSt2 = copy_docs(
-                St, AccNewSt, lists:reverse([DocInfo | AccUncopied]), Retry),
-            AccCopiedSize2 = AccCopiedSize + AccUncopiedSize2,
-            if AccCopiedSize2 >= CheckpointAfter ->
-                {ok, NewSt3} = couch_bt_engine:set_update_seq(NewSt2, Seq),
-                CommNewSt3 = commit_compaction_data(NewSt3),
-                {ok, {CommNewSt3, [], 0, 0}};
-            true ->
-                {ok, NewSt3} = couch_bt_engine:set_update_seq(NewSt2, Seq),
-                {ok, {NewSt3, [], 0, AccCopiedSize2}}
-            end;
-        true ->
-            {ok, {AccNewSt, [DocInfo | AccUncopied], AccUncopiedSize2,
-                AccCopiedSize}}
-        end
-    end,
-
-    TaskProps0 = [
-        {type, database_compaction},
-        {database, DbName},
-        {progress, 0},
-        {changes_done, 0},
-        {total_changes, TotalChanges}
-    ],
-    case (Retry =/= nil) and couch_task_status:is_task_added() of
-    true ->
-        couch_task_status:update([
-            {retry, true},
-            {progress, 0},
-            {changes_done, 0},
-            {total_changes, TotalChanges}
-        ]);
-    false ->
-        couch_task_status:add_task(TaskProps0),
-        couch_task_status:set_update_frequency(500)
-    end,
-
-    {ok, _, {NewSt2, Uncopied, _, _}} =
-        couch_btree:foldl(St#st.seq_tree, EnumBySeqFun,
-            {NewSt, [], 0, 0},
-            [{start_key, NewUpdateSeq + 1}]),
-
-    NewSt3 = copy_docs(St, NewSt2, lists:reverse(Uncopied), Retry),
-
-    % Copy the security information over
-    SecProps = couch_bt_engine:get_security(St),
-    {ok, NewSt4} = couch_bt_engine:copy_security(NewSt3, SecProps),
-
-    % Copy general properties over
-    Props = couch_bt_engine:get_props(St),
-    {ok, NewSt5} = couch_bt_engine:set_props(NewSt4, Props),
-
-    FinalUpdateSeq = couch_bt_engine:get_update_seq(St),
-    {ok, NewSt6} = couch_bt_engine:set_update_seq(NewSt5, FinalUpdateSeq),
-    commit_compaction_data(NewSt6).
-
-
-copy_docs(St, #st{} = NewSt, MixedInfos, Retry) ->
-    DocInfoIds = [Id || #doc_info{id=Id} <- MixedInfos],
-    LookupResults = couch_btree:lookup(St#st.id_tree, DocInfoIds),
-    % COUCHDB-968, make sure we prune duplicates during compaction
-    NewInfos0 = lists:usort(fun(#full_doc_info{id=A}, #full_doc_info{id=B}) ->
-        A =< B
-    end, merge_lookups(MixedInfos, LookupResults)),
-
-    NewInfos1 = lists:map(fun(Info) ->
-        {NewRevTree, FinalAcc} = couch_key_tree:mapfold(fun
-            ({RevPos, RevId}, #leaf{ptr=Sp}=Leaf, leaf, SizesAcc) ->
-                {Body, AttInfos} = copy_doc_attachments(St, Sp, NewSt),
-                #size_info{external = OldExternalSize} = Leaf#leaf.sizes,
-                ExternalSize = case OldExternalSize of
-                    0 when is_binary(Body) ->
-                        couch_compress:uncompressed_size(Body);
-                    0 ->
-                        couch_ejson_size:encoded_size(Body);
-                    N -> N
-                end,
-                Doc0 = #doc{
-                    id = Info#full_doc_info.id,
-                    revs = {RevPos, [RevId]},
-                    deleted = Leaf#leaf.deleted,
-                    body = Body,
-                    atts = AttInfos
-                },
-                Doc1 = couch_bt_engine:serialize_doc(NewSt, Doc0),
-                {ok, Doc2, ActiveSize} =
-                        couch_bt_engine:write_doc_body(NewSt, Doc1),
-                AttSizes = [{element(3,A), element(4,A)} || A <- AttInfos],
-                NewLeaf = Leaf#leaf{
-                    ptr = Doc2#doc.body,
-                    sizes = #size_info{
-                        active = ActiveSize,
-                        external = ExternalSize
-                    },
-                    atts = AttSizes
-                },
-                {NewLeaf, couch_db_updater:add_sizes(leaf, NewLeaf, SizesAcc)};
-            (_Rev, _Leaf, branch, SizesAcc) ->
-                {?REV_MISSING, SizesAcc}
-        end, {0, 0, []}, Info#full_doc_info.rev_tree),
-        {FinalAS, FinalES, FinalAtts} = FinalAcc,
-        TotalAttSize = lists:foldl(fun({_, S}, A) -> S + A end, 0, FinalAtts),
-        NewActiveSize = FinalAS + TotalAttSize,
-        NewExternalSize = FinalES + TotalAttSize,
-        Info#full_doc_info{
-            rev_tree = NewRevTree,
-            sizes = #size_info{
-                active = NewActiveSize,
-                external = NewExternalSize
-            }
-        }
-    end, NewInfos0),
-
-    Limit = couch_bt_engine:get_revs_limit(St),
-    NewInfos = lists:map(fun(FDI) ->
-        FDI#full_doc_info{
-            rev_tree = couch_key_tree:stem(FDI#full_doc_info.rev_tree, Limit)
-        }
-    end, NewInfos1),
-
-    RemoveSeqs =
-    case Retry of
-    nil ->
-        [];
-    OldDocIdTree ->
-        % Compaction is being rerun to catch up to writes during the
-        % first pass. This means we may have docs that already exist
-        % in the seq_tree in the .data file. Here we lookup any old
-        % update_seqs so that they can be removed.
-        Ids = [Id || #full_doc_info{id=Id} <- NewInfos],
-        Existing = couch_btree:lookup(OldDocIdTree, Ids),
-        [Seq || {ok, #full_doc_info{update_seq=Seq}} <- Existing]
-    end,
-
-    {ok, SeqTree} = couch_btree:add_remove(
-            NewSt#st.seq_tree, NewInfos, RemoveSeqs),
-
-    FDIKVs = lists:map(fun(#full_doc_info{id=Id, update_seq=Seq}=FDI) ->
-        {{Id, Seq}, FDI}
-    end, NewInfos),
-    {ok, IdEms} = couch_emsort:add(NewSt#st.id_tree, FDIKVs),
-    update_compact_task(length(NewInfos)),
-    NewSt#st{id_tree=IdEms, seq_tree=SeqTree}.
-
-
-copy_doc_attachments(#st{} = SrcSt, SrcSp, DstSt) ->
-    {ok, {BodyData, BinInfos0}} = couch_file:pread_term(SrcSt#st.fd, SrcSp),
-    BinInfos = case BinInfos0 of
-    _ when is_binary(BinInfos0) ->
-        couch_compress:decompress(BinInfos0);
-    _ when is_list(BinInfos0) ->
-        % pre 1.2 file format
-        BinInfos0
-    end,
-    % copy the bin values
-    NewBinInfos = lists:map(
-        fun({Name, Type, BinSp, AttLen, RevPos, ExpectedMd5}) ->
-            % 010 UPGRADE CODE
-            {ok, SrcStream} = couch_bt_engine:open_read_stream(SrcSt, BinSp),
-            {ok, DstStream} = couch_bt_engine:open_write_stream(DstSt, []),
-            ok = couch_stream:copy(SrcStream, DstStream),
-            {NewStream, AttLen, AttLen, ActualMd5, _IdentityMd5} =
-                couch_stream:close(DstStream),
-            {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
-            couch_util:check_md5(ExpectedMd5, ActualMd5),
-            {Name, Type, NewBinSp, AttLen, AttLen, RevPos, ExpectedMd5, identity};
-        ({Name, Type, BinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc1}) ->
-            {ok, SrcStream} = couch_bt_engine:open_read_stream(SrcSt, BinSp),
-            {ok, DstStream} = couch_bt_engine:open_write_stream(DstSt, []),
-            ok = couch_stream:copy(SrcStream, DstStream),
-            {NewStream, AttLen, _, ActualMd5, _IdentityMd5} =
-                couch_stream:close(DstStream),
-            {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
-            couch_util:check_md5(ExpectedMd5, ActualMd5),
-            Enc = case Enc1 of
-            true ->
-                % 0110 UPGRADE CODE
-                gzip;
-            false ->
-                % 0110 UPGRADE CODE
-                identity;
-            _ ->
-                Enc1
-            end,
-            {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc}
-        end, BinInfos),
-    {BodyData, NewBinInfos}.
-
-
-sort_meta_data(St0) ->
-    {ok, Ems} = couch_emsort:merge(St0#st.id_tree),
-    St0#st{id_tree=Ems}.
-
-
-copy_meta_data(#st{} = St) ->
-    #st{
-        fd = Fd,
-        header = Header,
-        id_tree = Src
-    } = St,
-    DstState = couch_bt_engine_header:id_tree_state(Header),
-    {ok, IdTree0} = couch_btree:open(DstState, Fd, [
-        {split, fun couch_bt_engine:id_tree_split/1},
-        {join, fun couch_bt_engine:id_tree_join/2},
-        {reduce, fun couch_bt_engine:id_tree_reduce/2}
-    ]),
-    {ok, Iter} = couch_emsort:iter(Src),
-    Acc0 = #merge_st{
-        id_tree=IdTree0,
-        seq_tree=St#st.seq_tree,
-        rem_seqs=[],
-        infos=[]
-    },
-    Acc = merge_docids(Iter, Acc0),
-    {ok, IdTree} = couch_btree:add(Acc#merge_st.id_tree, Acc#merge_st.infos),
-    {ok, SeqTree} = couch_btree:add_remove(
-        Acc#merge_st.seq_tree, [], Acc#merge_st.rem_seqs
-    ),
-    St#st{id_tree=IdTree, seq_tree=SeqTree}.
-
-
-open_compaction_file(FilePath) ->
-    case couch_file:open(FilePath, [nologifmissing]) of
-        {ok, Fd} ->
-            case couch_file:read_header(Fd) of
-                {ok, Header} -> {ok, Fd, Header};
-                no_valid_header -> {ok, Fd, nil}
-            end;
-        {error, enoent} ->
-            {ok, Fd} = couch_file:open(FilePath, [create]),
-            {ok, Fd, nil}
-    end.
-
-
-reset_compaction_file(Fd, Header) ->
-    ok = couch_file:truncate(Fd, 0),
-    ok = couch_file:write_header(Fd, Header).
-
-
-commit_compaction_data(#st{}=St) ->
-    % Compaction needs to write headers to both the data file
-    % and the meta file so if we need to restart we can pick
-    % back up from where we left off.
-    commit_compaction_data(St, couch_emsort:get_fd(St#st.id_tree)),
-    commit_compaction_data(St, St#st.fd).
-
-
-commit_compaction_data(#st{header = OldHeader} = St0, Fd) ->
-    DataState = couch_bt_engine_header:id_tree_state(OldHeader),
-    MetaFd = couch_emsort:get_fd(St0#st.id_tree),
-    MetaState = couch_emsort:get_state(St0#st.id_tree),
-    St1 = bind_id_tree(St0, St0#st.fd, DataState),
-    Header = couch_bt_engine:update_header(St1, St1#st.header),
-    CompHeader = #comp_header{
-        db_header = Header,
-        meta_state = MetaState
-    },
-    ok = couch_file:sync(Fd),
-    ok = couch_file:write_header(Fd, CompHeader),
-    St2 = St1#st{
-        header = Header
-    },
-    bind_emsort(St2, MetaFd, MetaState).
-
-
-bind_emsort(St, Fd, nil) ->
-    {ok, Ems} = couch_emsort:open(Fd),
-    St#st{id_tree=Ems};
-bind_emsort(St, Fd, State) ->
-    {ok, Ems} = couch_emsort:open(Fd, [{root, State}]),
-    St#st{id_tree=Ems}.
-
-
-bind_id_tree(St, Fd, State) ->
-    {ok, IdBtree} = couch_btree:open(State, Fd, [
-        {split, fun couch_bt_engine:id_tree_split/1},
-        {join, fun couch_bt_engine:id_tree_join/2},
-        {reduce, fun couch_bt_engine:id_tree_reduce/2}
-    ]),
-    St#st{id_tree=IdBtree}.
-
-
-merge_lookups(Infos, []) ->
-    Infos;
-merge_lookups([], _) ->
-    [];
-merge_lookups([#doc_info{}=DI | RestInfos], [{ok, FDI} | RestLookups]) ->
-    % Assert we've matched our lookups
-    if DI#doc_info.id == FDI#full_doc_info.id -> ok; true ->
-        erlang:error({mismatched_doc_infos, DI#doc_info.id})
-    end,
-    [FDI | merge_lookups(RestInfos, RestLookups)];
-merge_lookups([FDI | RestInfos], Lookups) ->
-    [FDI | merge_lookups(RestInfos, Lookups)].
-
-
-merge_docids(Iter, #merge_st{infos=Infos}=Acc) when length(Infos) > 1000 ->
-    #merge_st{
-        id_tree=IdTree0,
-        seq_tree=SeqTree0,
-        rem_seqs=RemSeqs
-    } = Acc,
-    {ok, IdTree1} = couch_btree:add(IdTree0, Infos),
-    {ok, SeqTree1} = couch_btree:add_remove(SeqTree0, [], RemSeqs),
-    Acc1 = Acc#merge_st{
-        id_tree=IdTree1,
-        seq_tree=SeqTree1,
-        rem_seqs=[],
-        infos=[]
-    },
-    merge_docids(Iter, Acc1);
-merge_docids(Iter, #merge_st{curr=Curr}=Acc) ->
-    case next_info(Iter, Curr, []) of
-        {NextIter, NewCurr, FDI, Seqs} ->
-            Acc1 = Acc#merge_st{
-                infos = [FDI | Acc#merge_st.infos],
-                rem_seqs = Seqs ++ Acc#merge_st.rem_seqs,
-                curr = NewCurr
-            },
-            merge_docids(NextIter, Acc1);
-        {finished, FDI, Seqs} ->
-            Acc#merge_st{
-                infos = [FDI | Acc#merge_st.infos],
-                rem_seqs = Seqs ++ Acc#merge_st.rem_seqs,
-                curr = undefined
-            };
-        empty ->
-            Acc
-    end.
-
-
-next_info(Iter, undefined, []) ->
-    case couch_emsort:next(Iter) of
-        {ok, {{Id, Seq}, FDI}, NextIter} ->
-            next_info(NextIter, {Id, Seq, FDI}, []);
-        finished ->
-            empty
-    end;
-next_info(Iter, {Id, Seq, FDI}, Seqs) ->
-    case couch_emsort:next(Iter) of
-        {ok, {{Id, NSeq}, NFDI}, NextIter} ->
-            next_info(NextIter, {Id, NSeq, NFDI}, [Seq | Seqs]);
-        {ok, {{NId, NSeq}, NFDI}, NextIter} ->
-            {NextIter, {NId, NSeq, NFDI}, FDI, Seqs};
-        finished ->
-            {finished, FDI, Seqs}
-    end.
-
-
-update_compact_task(NumChanges) ->
-    [Changes, Total] = couch_task_status:get([changes_done, total_changes]),
-    Changes2 = Changes + NumChanges,
-    Progress = case Total of
-    0 ->
-        0;
-    _ ->
-        (Changes2 * 100) div Total
-    end,
-    couch_task_status:update([{changes_done, Changes2}, {progress, Progress}]).
-
diff --git a/src/couch/src/couch_bt_engine_header.erl b/src/couch/src/couch_bt_engine_header.erl
deleted file mode 100644
index 3f9f518..0000000
--- a/src/couch/src/couch_bt_engine_header.erl
+++ /dev/null
@@ -1,451 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_header).
-
-
--export([
-    new/0,
-    from/1,
-    is_header/1,
-    upgrade/1,
-    get/2,
-    get/3,
-    set/2,
-    set/3
-]).
-
--export([
-    disk_version/1,
-    latest_disk_version/0,
-    update_seq/1,
-    id_tree_state/1,
-    seq_tree_state/1,
-    latest/1,
-    local_tree_state/1,
-    purge_tree_state/1,
-    purge_seq_tree_state/1,
-    purge_infos_limit/1,
-    security_ptr/1,
-    revs_limit/1,
-    uuid/1,
-    epochs/1,
-    compacted_seq/1
-]).
-
-
-% This should be updated anytime a header change happens that requires more
-% than filling in new defaults.
-%
-% As long the changes are limited to new header fields (with inline
-% defaults) added to the end of the record, then there is no need to increment
-% the disk revision number.
-%
-% if the disk revision is incremented, then new upgrade logic will need to be
-% added to couch_db_updater:init_db.
-
--define(LATEST_DISK_VERSION, 8).
-
--record(db_header, {
-    disk_version = ?LATEST_DISK_VERSION,
-    update_seq = 0,
-    unused = 0,
-    id_tree_state = nil,
-    seq_tree_state = nil,
-    local_tree_state = nil,
-    purge_tree_state = nil,
-    purge_seq_tree_state = nil, %purge tree: purge_seq -> uuid
-    security_ptr = nil,
-    revs_limit = 1000,
-    uuid,
-    epochs,
-    compacted_seq,
-    purge_infos_limit = 1000,
-    props_ptr
-}).
-
-
--define(PARTITION_DISK_VERSION, 8).
-
-
-new() ->
-    #db_header{
-        uuid = couch_uuids:random(),
-        epochs = [{node(), 0}]
-    }.
-
-
-from(Header0) ->
-    Header = upgrade(Header0),
-    #db_header{
-        uuid = Header#db_header.uuid,
-        epochs = Header#db_header.epochs,
-        compacted_seq = Header#db_header.compacted_seq
-    }.
-
-
-is_header(Header) ->
-    try
-        upgrade(Header),
-        true
-    catch _:_ ->
-        false
-    end.
-
-
-upgrade(Header) ->
-    Funs = [
-        fun upgrade_tuple/1,
-        fun upgrade_disk_version/1,
-        fun upgrade_uuid/1,
-        fun upgrade_epochs/1,
-        fun upgrade_compacted_seq/1
-    ],
-    lists:foldl(fun(F, HdrAcc) ->
-        F(HdrAcc)
-    end, Header, Funs).
-
-
-get(Header, Key) ->
-    ?MODULE:get(Header, Key, undefined).
-
-
-get(Header, Key, Default) ->
-    get_field(Header, Key, Default).
-
-
-set(Header, Key, Value) ->
-    ?MODULE:set(Header, [{Key, Value}]).
-
-
-set(Header0, Fields) ->
-    % A subtlety here is that if a database was open during
-    % the release upgrade that updates to uuids and epochs then
-    % this dynamic upgrade also assigns a uuid and epoch.
-    Header = upgrade(Header0),
-    lists:foldl(fun({Field, Value}, HdrAcc) ->
-        set_field(HdrAcc, Field, Value)
-    end, Header, Fields).
-
-
-disk_version(Header) ->
-    get_field(Header, disk_version).
-
-
-latest_disk_version() ->
-        ?LATEST_DISK_VERSION.
-
-
-update_seq(Header) ->
-    get_field(Header, update_seq).
-
-
-id_tree_state(Header) ->
-    get_field(Header, id_tree_state).
-
-
-seq_tree_state(Header) ->
-    get_field(Header, seq_tree_state).
-
-
-local_tree_state(Header) ->
-    get_field(Header, local_tree_state).
-
-
-purge_tree_state(Header) ->
-    get_field(Header, purge_tree_state).
-
-
-purge_seq_tree_state(Header) ->
-    get_field(Header, purge_seq_tree_state).
-
-
-security_ptr(Header) ->
-    get_field(Header, security_ptr).
-
-
-revs_limit(Header) ->
-    get_field(Header, revs_limit).
-
-
-uuid(Header) ->
-    get_field(Header, uuid).
-
-
-epochs(Header) ->
-    get_field(Header, epochs).
-
-
-compacted_seq(Header) ->
-    get_field(Header, compacted_seq).
-
-
-purge_infos_limit(Header) ->
-    get_field(Header, purge_infos_limit).
-
-
-get_field(Header, Field) ->
-    get_field(Header, Field, undefined).
-
-
-get_field(Header, Field, Default) ->
-    Idx = index(Field),
-    case Idx > tuple_size(Header) of
-        true -> Default;
-        false -> element(index(Field), Header)
-    end.
-
-
-set_field(Header, Field, Value) ->
-    setelement(index(Field), Header, Value).
-
-
-index(Field) ->
-    couch_util:get_value(Field, indexes()).
-
-
-indexes() ->
-    Fields = record_info(fields, db_header),
-    Indexes = lists:seq(2, record_info(size, db_header)),
-    lists:zip(Fields, Indexes).
-
-
-upgrade_tuple(Old) when is_record(Old, db_header) ->
-    Old;
-upgrade_tuple(Old) when is_tuple(Old) ->
-    NewSize = record_info(size, db_header),
-    if tuple_size(Old) < NewSize -> ok; true ->
-        erlang:error({invalid_header_size, Old})
-    end,
-    {_, New} = lists:foldl(fun(Val, {Idx, Hdr}) ->
-        {Idx+1, setelement(Idx, Hdr, Val)}
-    end, {1, #db_header{}}, tuple_to_list(Old)),
-    if is_record(New, db_header) -> ok; true ->
-        erlang:error({invalid_header_extension, {Old, New}})
-    end,
-    New.
-
--define(OLD_DISK_VERSION_ERROR,
-    "Database files from versions smaller than 0.10.0 are no longer supported").
-
-upgrade_disk_version(#db_header{}=Header) ->
-    case element(2, Header) of
-        1 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
-        2 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
-        3 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
-        4 -> Header#db_header{security_ptr = nil}; % [0.10 - 0.11)
-        5 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre 1.2
-        6 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre clustered purge
-        7 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre partitioned dbs
-        ?LATEST_DISK_VERSION -> Header;
-        _ ->
-            Reason = "Incorrect disk header version",
-            throw({database_disk_version_error, Reason})
-    end.
-
-
-upgrade_uuid(#db_header{}=Header) ->
-    case Header#db_header.uuid of
-        undefined ->
-            % Upgrading this old db file to a newer
-            % on disk format that includes a UUID.
-            Header#db_header{uuid=couch_uuids:random()};
-        _ ->
-            Header
-    end.
-
-
-upgrade_epochs(#db_header{}=Header) ->
-    NewEpochs = case Header#db_header.epochs of
-        undefined ->
-            % This node is taking over ownership of shard with
-            % and old version of couch file. Before epochs there
-            % was always an implicit assumption that a file was
-            % owned since eternity by the node it was on. This
-            % just codifies that assumption.
-            [{node(), 0}];
-        [{Node, _} | _] = Epochs0 when Node == node() ->
-            % Current node is the current owner of this db
-            Epochs0;
-        Epochs1 ->
-            % This node is taking over ownership of this db
-            % and marking the update sequence where it happened.
-            [{node(), Header#db_header.update_seq} | Epochs1]
-    end,
-    % Its possible for a node to open a db and claim
-    % ownership but never make a write to the db. This
-    % removes nodes that claimed ownership but never
-    % changed the database.
-    DedupedEpochs = remove_dup_epochs(NewEpochs),
-    Header#db_header{epochs=DedupedEpochs}.
-
-
-% This is slightly relying on the udpate_seq's being sorted
-% in epochs due to how we only ever push things onto the
-% front. Although if we ever had a case where the update_seq
-% is not monotonically increasing I don't know that we'd
-% want to remove dupes (by calling a sort on the input to this
-% function). So for now we don't sort but are relying on the
-% idea that epochs is always sorted.
-remove_dup_epochs([_]=Epochs) ->
-    Epochs;
-remove_dup_epochs([{N1, S}, {_N2, S}]) ->
-    % Seqs match, keep the most recent owner
-    [{N1, S}];
-remove_dup_epochs([_, _]=Epochs) ->
-    % Seqs don't match.
-    Epochs;
-remove_dup_epochs([{N1, S}, {_N2, S} | Rest]) ->
-    % Seqs match, keep the most recent owner
-    remove_dup_epochs([{N1, S} | Rest]);
-remove_dup_epochs([{N1, S1}, {N2, S2} | Rest]) ->
-    % Seqs don't match, recurse to check others
-    [{N1, S1} | remove_dup_epochs([{N2, S2} | Rest])].
-
-
-upgrade_compacted_seq(#db_header{}=Header) ->
-    case Header#db_header.compacted_seq of
-        undefined ->
-            Header#db_header{compacted_seq=0};
-        _ ->
-            Header
-    end.
-
-latest(?LATEST_DISK_VERSION) ->
-    true;
-latest(N) when is_integer(N), N < ?LATEST_DISK_VERSION ->
-    false;
-latest(_Else) ->
-    undefined.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-mk_header(Vsn) ->
-    {
-        db_header, % record name
-        Vsn, % disk version
-        100, % update_seq
-        0, % unused
-        foo, % id_tree_state
-        bar, % seq_tree_state
-        bam, % local_tree_state
-        flam, % was purge_seq - now purge_tree_state
-        baz, % was purged_docs - now purge_seq_tree_state
-        bang, % security_ptr
-        999 % revs_limit
-    }.
-
-
--ifdef(run_broken_tests).
-
-upgrade_v3_test() ->
-    Vsn3Header = mk_header(3),
-    NewHeader = upgrade_tuple(Vsn3Header),
-
-    % Tuple upgrades don't change
-    ?assert(is_record(NewHeader, db_header)),
-    ?assertEqual(3, disk_version(NewHeader)),
-    ?assertEqual(100, update_seq(NewHeader)),
-    ?assertEqual(foo, id_tree_state(NewHeader)),
-    ?assertEqual(bar, seq_tree_state(NewHeader)),
-    ?assertEqual(bam, local_tree_state(NewHeader)),
-    ?assertEqual(flam, purge_tree_state(NewHeader)),
-    ?assertEqual(baz, purge_seq_tree_state(NewHeader)),
-    ?assertEqual(bang, security_ptr(NewHeader)),
-    ?assertEqual(999, revs_limit(NewHeader)),
-    ?assertEqual(undefined, uuid(NewHeader)),
-    ?assertEqual(undefined, epochs(NewHeader)),
-
-    % Security ptr isn't changed until upgrade_disk_version/1
-    NewNewHeader = upgrade_disk_version(NewHeader),
-    ?assert(is_record(NewNewHeader, db_header)),
-    ?assertEqual(nil, security_ptr(NewNewHeader)),
-
-    % Assert upgrade works on really old headers
-    NewestHeader = upgrade(Vsn3Header),
-    ?assertMatch(<<_:32/binary>>, uuid(NewestHeader)),
-    ?assertEqual([{node(), 0}], epochs(NewestHeader)).
-
--endif.
-
-upgrade_v5_to_v8_test() ->
-    Vsn5Header = mk_header(5),
-    NewHeader = upgrade_disk_version(upgrade_tuple(Vsn5Header)),
-
-    ?assert(is_record(NewHeader, db_header)),
-    ?assertEqual(8, disk_version(NewHeader)),
-
-    % Security ptr isn't changed for v5 headers
-    ?assertEqual(bang, security_ptr(NewHeader)).
-
-
-upgrade_uuid_test() ->
-    Vsn5Header = mk_header(5),
-
-    % Upgraded headers get a new UUID
-    NewHeader = upgrade_uuid(upgrade_disk_version(upgrade_tuple(Vsn5Header))),
-    ?assertMatch(<<_:32/binary>>, uuid(NewHeader)),
-
-    % Headers with a UUID don't have their UUID changed
-    NewNewHeader = upgrade_uuid(upgrade_disk_version(upgrade_tuple(NewHeader))),
-    ?assertEqual(uuid(NewHeader), uuid(NewNewHeader)),
-
-    % Derived empty headers maintain the same UUID
-    ResetHeader = from(NewNewHeader),
-    ?assertEqual(uuid(NewHeader), uuid(ResetHeader)).
-
-
-upgrade_epochs_test() ->
-    Vsn5Header = mk_header(5),
-
-    % Upgraded headers get a default epochs set
-    NewHeader = upgrade(Vsn5Header),
-    ?assertEqual([{node(), 0}], epochs(NewHeader)),
-
-    % Fake an old entry in epochs
-    FakeFields = [
-        {update_seq, 20},
-        {epochs, [{'someothernode@someotherhost', 0}]}
-    ],
-    NotOwnedHeader = set(NewHeader, FakeFields),
-
-    OwnedEpochs = [
-        {node(), 20},
-        {'someothernode@someotherhost', 0}
-    ],
-
-    % Upgrading a header not owned by the local node updates
-    % the epochs appropriately.
-    NowOwnedHeader = upgrade(NotOwnedHeader),
-    ?assertEqual(OwnedEpochs, epochs(NowOwnedHeader)),
-
-    % Headers with epochs stay the same after upgrades
-    NewNewHeader = upgrade(NowOwnedHeader),
-    ?assertEqual(OwnedEpochs, epochs(NewNewHeader)),
-
-    % Getting a reset header maintains the epoch data
-    ResetHeader = from(NewNewHeader),
-    ?assertEqual(OwnedEpochs, epochs(ResetHeader)).
-
-
-get_uuid_from_old_header_test() ->
-    Vsn5Header = mk_header(5),
-    ?assertEqual(undefined, uuid(Vsn5Header)).
-
-
-get_epochs_from_old_header_test() ->
-    Vsn5Header = mk_header(5),
-    ?assertEqual(undefined, epochs(Vsn5Header)).
-
-
--endif.
diff --git a/src/couch/src/couch_bt_engine_stream.erl b/src/couch/src/couch_bt_engine_stream.erl
deleted file mode 100644
index 431894a..0000000
--- a/src/couch/src/couch_bt_engine_stream.erl
+++ /dev/null
@@ -1,70 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_stream).
-
--export([
-    foldl/3,
-    seek/2,
-    write/2,
-    finalize/1,
-    to_disk_term/1
-]).
-
-
-foldl({_Fd, []}, _Fun, Acc) ->
-    Acc;
-
-foldl({Fd, [{Pos, _} | Rest]}, Fun, Acc) ->
-    foldl({Fd, [Pos | Rest]}, Fun, Acc);
-
-foldl({Fd, [Bin | Rest]}, Fun, Acc) when is_binary(Bin) ->
-    % We're processing the first bit of data
-    % after we did a seek for a range fold.
-    foldl({Fd, Rest}, Fun, Fun(Bin, Acc));
-
-foldl({Fd, [Pos | Rest]}, Fun, Acc) when is_integer(Pos) ->
-    {ok, Bin} = couch_file:pread_binary(Fd, Pos),
-    foldl({Fd, Rest}, Fun, Fun(Bin, Acc)).
-
-
-seek({Fd, [{Pos, Length} | Rest]}, Offset) ->
-    case Length =< Offset of
-        true ->
-            seek({Fd, Rest}, Offset - Length);
-        false ->
-            seek({Fd, [Pos | Rest]}, Offset)
-    end;
-
-seek({Fd, [Pos | Rest]}, Offset) when is_integer(Pos) ->
-    {ok, Bin} = couch_file:pread_binary(Fd, Pos),
-    case iolist_size(Bin) =< Offset of
-        true ->
-            seek({Fd, Rest}, Offset - size(Bin));
-        false ->
-            <<_:Offset/binary, Tail/binary>> = Bin,
-            {ok, {Fd, [Tail | Rest]}}
-    end.
-
-
-write({Fd, Written}, Data) when is_pid(Fd) ->
-    {ok, Pos, _} = couch_file:append_binary(Fd, Data),
-    {ok, {Fd, [{Pos, iolist_size(Data)} | Written]}}.
-
-
-finalize({Fd, Written}) ->
-    {ok, {Fd, lists:reverse(Written)}}.
-
-
-to_disk_term({_Fd, Written}) ->
-    {ok, Written}.
-
diff --git a/src/couch/src/couch_btree.erl b/src/couch/src/couch_btree.erl
deleted file mode 100644
index ea0cf69..0000000
--- a/src/couch/src/couch_btree.erl
+++ /dev/null
@@ -1,855 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_btree).
-
--export([open/2, open/3, query_modify/4, add/2, add_remove/3]).
--export([fold/4, full_reduce/1, final_reduce/2, size/1, foldl/3, foldl/4]).
--export([fold_reduce/4, lookup/2, get_state/1, set_options/2]).
--export([extract/2, assemble/3, less/3]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(FILL_RATIO, 0.5).
-
-extract(#btree{extract_kv=undefined}, Value) ->
-    Value;
-extract(#btree{extract_kv=Extract}, Value) ->
-    Extract(Value).
-
-assemble(#btree{assemble_kv=undefined}, Key, Value) ->
-    {Key, Value};
-assemble(#btree{assemble_kv=Assemble}, Key, Value) ->
-    Assemble(Key, Value).
-
-less(#btree{less=undefined}, A, B) ->
-    A < B;
-less(#btree{less=Less}, A, B) ->
-    Less(A, B).
-
-% pass in 'nil' for State if a new Btree.
-open(State, Fd) ->
-    {ok, #btree{root=State, fd=Fd}}.
-
-set_options(Bt, []) ->
-    Bt;
-set_options(Bt, [{split, Extract}|Rest]) ->
-    set_options(Bt#btree{extract_kv=Extract}, Rest);
-set_options(Bt, [{join, Assemble}|Rest]) ->
-    set_options(Bt#btree{assemble_kv=Assemble}, Rest);
-set_options(Bt, [{less, Less}|Rest]) ->
-    set_options(Bt#btree{less=Less}, Rest);
-set_options(Bt, [{reduce, Reduce}|Rest]) ->
-    set_options(Bt#btree{reduce=Reduce}, Rest);
-set_options(Bt, [{compression, Comp}|Rest]) ->
-    set_options(Bt#btree{compression=Comp}, Rest).
-
-open(State, Fd, Options) ->
-    {ok, set_options(#btree{root=State, fd=Fd}, Options)}.
-
-get_state(#btree{root=Root}) ->
-    Root.
-
-final_reduce(#btree{reduce=Reduce}, Val) ->
-    final_reduce(Reduce, Val);
-final_reduce(Reduce, {[], []}) ->
-    Reduce(reduce, []);
-final_reduce(_Bt, {[], [Red]}) ->
-    Red;
-final_reduce(Reduce, {[], Reductions}) ->
-    Reduce(rereduce, Reductions);
-final_reduce(Reduce, {KVs, Reductions}) ->
-    Red = Reduce(reduce, KVs),
-    final_reduce(Reduce, {[], [Red | Reductions]}).
-
-fold_reduce(#btree{root=Root}=Bt, Fun, Acc, Options) ->
-    Dir = couch_util:get_value(dir, Options, fwd),
-    StartKey = couch_util:get_value(start_key, Options),
-    InEndRangeFun = make_key_in_end_range_function(Bt, Dir, Options),
-    KeyGroupFun = get_group_fun(Bt, Options),
-    try
-        {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
-            reduce_stream_node(Bt, Dir, Root, StartKey, InEndRangeFun, undefined, [], [],
-            KeyGroupFun, Fun, Acc),
-        if GroupedKey2 == undefined ->
-            {ok, Acc2};
-        true ->
-            case Fun(GroupedKey2, {GroupedKVsAcc2, GroupedRedsAcc2}, Acc2) of
-            {ok, Acc3} -> {ok, Acc3};
-            {stop, Acc3} -> {ok, Acc3}
-            end
-        end
-    catch
-        throw:{stop, AccDone} -> {ok, AccDone}
-    end.
-
-full_reduce(#btree{root=nil,reduce=Reduce}) ->
-    {ok, Reduce(reduce, [])};
-full_reduce(#btree{root=Root}) ->
-    {ok, element(2, Root)}.
-
-size(#btree{root = nil}) ->
-    0;
-size(#btree{root = {_P, _Red}}) ->
-    % pre 1.2 format
-    nil;
-size(#btree{root = {_P, _Red, Size}}) ->
-    Size.
-
-get_group_fun(Bt, Options) ->
-    case couch_util:get_value(key_group_level, Options) of
-        exact ->
-            make_group_fun(Bt, exact);
-        0 ->
-            fun(_, _) -> true end;
-        N when is_integer(N), N > 0 ->
-            make_group_fun(Bt, N);
-        undefined ->
-            couch_util:get_value(key_group_fun, Options, fun(_,_) -> true end)
-    end.
-
-make_group_fun(Bt, exact) ->
-    fun({Key1, _}, {Key2, _}) ->
-        case less(Bt, {Key1, nil}, {Key2, nil}) of
-            false ->
-                case less(Bt, {Key2, nil}, {Key1, nil}) of
-                    false ->
-                        true;
-                    _ ->
-                        false
-                end;
-            _ ->
-                false
-        end
-    end;
-make_group_fun(Bt, GroupLevel) when is_integer(GroupLevel), GroupLevel > 0 ->
-    fun
-        GF({{p, Partition, Key1}, Val1}, {{p, Partition, Key2}, Val2}) ->
-            GF({Key1, Val1}, {Key2, Val2});
-        GF({[_|_] = Key1, _}, {[_|_] = Key2, _}) ->
-            SL1 = lists:sublist(Key1, GroupLevel),
-            SL2 = lists:sublist(Key2, GroupLevel),
-            case less(Bt, {SL1, nil}, {SL2, nil}) of
-                false ->
-                    case less(Bt, {SL2, nil}, {SL1, nil}) of
-                        false ->
-                            true;
-                        _ ->
-                            false
-                    end;
-                _ ->
-                    false
-            end;
-        GF({Key1, _}, {Key2, _}) ->
-            case less(Bt, {Key1, nil}, {Key2, nil}) of
-                false ->
-                    case less(Bt, {Key2, nil}, {Key1, nil}) of
-                        false ->
-                            true;
-                        _ ->
-                            false
-                    end;
-                _ ->
-                    false
-            end
-    end.
-
-% wraps a 2 arity function with the proper 3 arity function
-convert_fun_arity(Fun) when is_function(Fun, 2) ->
-    fun
-        (visit, KV, _Reds, AccIn) -> Fun(KV, AccIn);
-        (traverse, _K, _Red, AccIn) -> {ok, AccIn}
-    end;
-convert_fun_arity(Fun) when is_function(Fun, 3) ->
-    fun
-        (visit, KV, Reds, AccIn) -> Fun(KV, Reds, AccIn);
-        (traverse, _K, _Red, AccIn) -> {ok, AccIn}
-    end;
-convert_fun_arity(Fun) when is_function(Fun, 4) ->
-    Fun.    % Already arity 4
-
-make_key_in_end_range_function(Bt, fwd, Options) ->
-    case couch_util:get_value(end_key_gt, Options) of
-    undefined ->
-        case couch_util:get_value(end_key, Options) of
-        undefined ->
-            fun(_Key) -> true end;
-        LastKey ->
-            fun(Key) -> not less(Bt, LastKey, Key) end
-        end;
-    EndKey ->
-        fun(Key) -> less(Bt, Key, EndKey) end
-    end;
-make_key_in_end_range_function(Bt, rev, Options) ->
-    case couch_util:get_value(end_key_gt, Options) of
-    undefined ->
-        case couch_util:get_value(end_key, Options) of
-        undefined ->
-            fun(_Key) -> true end;
-        LastKey ->
-            fun(Key) -> not less(Bt, Key, LastKey) end
-        end;
-    EndKey ->
-        fun(Key) -> less(Bt, EndKey, Key) end
-    end.
-
-
-foldl(Bt, Fun, Acc) ->
-    fold(Bt, Fun, Acc, []).
-
-foldl(Bt, Fun, Acc, Options) ->
-    fold(Bt, Fun, Acc, Options).
-
-
-fold(#btree{root=nil}, _Fun, Acc, _Options) ->
-    {ok, {[], []}, Acc};
-fold(#btree{root=Root}=Bt, Fun, Acc, Options) ->
-    Dir = couch_util:get_value(dir, Options, fwd),
-    InRange = make_key_in_end_range_function(Bt, Dir, Options),
-    Result =
-    case couch_util:get_value(start_key, Options) of
-    undefined ->
-        stream_node(Bt, [], Bt#btree.root, InRange, Dir,
-                convert_fun_arity(Fun), Acc);
-    StartKey ->
-        stream_node(Bt, [], Bt#btree.root, StartKey, InRange, Dir,
-                convert_fun_arity(Fun), Acc)
-    end,
-    case Result of
-    {ok, Acc2}->
-        FullReduction = element(2, Root),
-        {ok, {[], [FullReduction]}, Acc2};
-    {stop, LastReduction, Acc2} ->
-        {ok, LastReduction, Acc2}
-    end.
-
-add(Bt, InsertKeyValues) ->
-    add_remove(Bt, InsertKeyValues, []).
-
-add_remove(Bt, InsertKeyValues, RemoveKeys) ->
-    {ok, [], Bt2} = query_modify(Bt, [], InsertKeyValues, RemoveKeys),
-    {ok, Bt2}.
-
-query_modify(Bt, LookupKeys, InsertValues, RemoveKeys) ->
-    #btree{root=Root} = Bt,
-    InsertActions = lists:map(
-        fun(KeyValue) ->
-            {Key, Value} = extract(Bt, KeyValue),
-            {insert, Key, Value}
-        end, InsertValues),
-    RemoveActions = [{remove, Key, nil} || Key <- RemoveKeys],
-    FetchActions = [{fetch, Key, nil} || Key <- LookupKeys],
-    SortFun =
-        fun({OpA, A, _}, {OpB, B, _}) ->
-            case A == B of
-            % A and B are equal, sort by op.
-            true -> op_order(OpA) < op_order(OpB);
-            false ->
-                less(Bt, A, B)
-            end
-        end,
-    Actions = lists:sort(SortFun, lists:append([InsertActions, RemoveActions, FetchActions])),
-    {ok, KeyPointers, QueryResults} = modify_node(Bt, Root, Actions, []),
-    {ok, NewRoot} = complete_root(Bt, KeyPointers),
-    {ok, QueryResults, Bt#btree{root=NewRoot}}.
-
-% for ordering different operations with the same key.
-% fetch < remove < insert
-op_order(fetch) -> 1;
-op_order(remove) -> 2;
-op_order(insert) -> 3.
-
-lookup(#btree{root=Root, less=Less}=Bt, Keys) ->
-    SortedKeys = case Less of
-        undefined -> lists:sort(Keys);
-        _ -> lists:sort(Less, Keys)
-    end,
-    {ok, SortedResults} = lookup(Bt, Root, SortedKeys),
-    % We want to return the results in the same order as the keys were input
-    % but we may have changed the order when we sorted. So we need to put the
-    % order back into the results.
-    couch_util:reorder_results(Keys, SortedResults).
-
-lookup(_Bt, nil, Keys) ->
-    {ok, [{Key, not_found} || Key <- Keys]};
-lookup(Bt, Node, Keys) ->
-    Pointer = element(1, Node),
-    {NodeType, NodeList} = get_node(Bt, Pointer),
-    case NodeType of
-    kp_node ->
-        lookup_kpnode(Bt, list_to_tuple(NodeList), 1, Keys, []);
-    kv_node ->
-        lookup_kvnode(Bt, list_to_tuple(NodeList), 1, Keys, [])
-    end.
-
-lookup_kpnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
-    {ok, lists:reverse(Output)};
-lookup_kpnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound ->
-    {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
-lookup_kpnode(Bt, NodeTuple, LowerBound, [FirstLookupKey | _] = LookupKeys, Output) ->
-    N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), FirstLookupKey),
-    {Key, PointerInfo} = element(N, NodeTuple),
-    SplitFun = fun(LookupKey) -> not less(Bt, Key, LookupKey) end,
-    case lists:splitwith(SplitFun, LookupKeys) of
-    {[], GreaterQueries} ->
-        lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, Output);
-    {LessEqQueries, GreaterQueries} ->
-        {ok, Results} = lookup(Bt, PointerInfo, LessEqQueries),
-        lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, lists:reverse(Results, Output))
-    end.
-
-
-lookup_kvnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
-    {ok, lists:reverse(Output)};
-lookup_kvnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound ->
-    % keys not found
-    {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
-lookup_kvnode(Bt, NodeTuple, LowerBound, [LookupKey | RestLookupKeys], Output) ->
-    N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), LookupKey),
-    {Key, Value} = element(N, NodeTuple),
-    case less(Bt, LookupKey, Key) of
-    true ->
-        % LookupKey is less than Key
-        lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, not_found} | Output]);
-    false ->
-        case less(Bt, Key, LookupKey) of
-        true ->
-            % LookupKey is greater than Key
-            lookup_kvnode(Bt, NodeTuple, N+1, RestLookupKeys, [{LookupKey, not_found} | Output]);
-        false ->
-            % LookupKey is equal to Key
-            lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, {ok, assemble(Bt, LookupKey, Value)}} | Output])
-        end
-    end.
-
-
-complete_root(_Bt, []) ->
-    {ok, nil};
-complete_root(_Bt, [{_Key, PointerInfo}])->
-    {ok, PointerInfo};
-complete_root(Bt, KPs) ->
-    {ok, ResultKeyPointers} = write_node(Bt, kp_node, KPs),
-    complete_root(Bt, ResultKeyPointers).
-
-%%%%%%%%%%%%% The chunkify function sucks! %%%%%%%%%%%%%
-% It is inaccurate as it does not account for compression when blocks are
-% written. Plus with the "case byte_size(term_to_binary(InList)) of" code
-% it's probably really inefficient.
-
-chunkify(InList) ->
-    BaseChunkSize = get_chunk_size(),
-    case ?term_size(InList) of
-    Size when Size > BaseChunkSize ->
-        NumberOfChunksLikely = ((Size div BaseChunkSize) + 1),
-        ChunkThreshold = Size div NumberOfChunksLikely,
-        chunkify(InList, ChunkThreshold, [], 0, []);
-    _Else ->
-        [InList]
-    end.
-
-chunkify([], _ChunkThreshold, [], 0, OutputChunks) ->
-    lists:reverse(OutputChunks);
-chunkify([], _ChunkThreshold, [Item], _OutListSize, [PrevChunk | RestChunks]) ->
-    NewPrevChunk = PrevChunk ++ [Item],
-    lists:reverse(RestChunks, [NewPrevChunk]);
-chunkify([], _ChunkThreshold, OutList, _OutListSize, OutputChunks) ->
-    lists:reverse([lists:reverse(OutList) | OutputChunks]);
-chunkify([InElement | RestInList], ChunkThreshold, OutList, OutListSize, OutputChunks) ->
-    case ?term_size(InElement) of
-    Size when (Size + OutListSize) > ChunkThreshold andalso OutList /= [] ->
-        chunkify(RestInList, ChunkThreshold, [], 0, [lists:reverse([InElement | OutList]) | OutputChunks]);
-    Size ->
-        chunkify(RestInList, ChunkThreshold, [InElement | OutList], OutListSize + Size, OutputChunks)
-    end.
-
--compile({inline,[get_chunk_size/0]}).
-get_chunk_size() ->
-    try
-        list_to_integer(config:get("couchdb", "btree_chunk_size", "1279"))
-    catch error:badarg ->
-        1279
-    end.
-
-modify_node(Bt, RootPointerInfo, Actions, QueryOutput) ->
-    {NodeType, NodeList} = case RootPointerInfo of
-    nil ->
-        {kv_node, []};
-    _Tuple ->
-        Pointer = element(1, RootPointerInfo),
-        get_node(Bt, Pointer)
-    end,
-    NodeTuple = list_to_tuple(NodeList),
-
-    {ok, NewNodeList, QueryOutput2} =
-    case NodeType of
-    kp_node -> modify_kpnode(Bt, NodeTuple, 1, Actions, [], QueryOutput);
-    kv_node -> modify_kvnode(Bt, NodeTuple, 1, Actions, [], QueryOutput)
-    end,
-    case NewNodeList of
-    [] ->  % no nodes remain
-        {ok, [], QueryOutput2};
-    NodeList ->  % nothing changed
-        {LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple),
-        {ok, [{LastKey, RootPointerInfo}], QueryOutput2};
-    _Else2 ->
-        {ok, ResultList} = case RootPointerInfo of
-        nil ->
-            write_node(Bt, NodeType, NewNodeList);
-        _ ->
-            {LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple),
-            OldNode = {LastKey, RootPointerInfo},
-            write_node(Bt, OldNode, NodeType, NodeList, NewNodeList)
-        end,
-        {ok, ResultList, QueryOutput2}
-    end.
-
-reduce_node(#btree{reduce=nil}, _NodeType, _NodeList) ->
-    [];
-reduce_node(#btree{reduce=R}, kp_node, NodeList) ->
-    R(rereduce, [element(2, Node) || {_K, Node} <- NodeList]);
-reduce_node(#btree{reduce=R}=Bt, kv_node, NodeList) ->
-    R(reduce, [assemble(Bt, K, V) || {K, V} <- NodeList]).
-
-reduce_tree_size(kv_node, NodeSize, _KvList) ->
-    NodeSize;
-reduce_tree_size(kp_node, NodeSize, []) ->
-    NodeSize;
-reduce_tree_size(kp_node, _NodeSize, [{_K, {_P, _Red}} | _]) ->
-    % pre 1.2 format
-    nil;
-reduce_tree_size(kp_node, _NodeSize, [{_K, {_P, _Red, nil}} | _]) ->
-    nil;
-reduce_tree_size(kp_node, NodeSize, [{_K, {_P, _Red, Sz}} | NodeList]) ->
-    reduce_tree_size(kp_node, NodeSize + Sz, NodeList).
-
-get_node(#btree{fd = Fd}, NodePos) ->
-    {ok, {NodeType, NodeList}} = couch_file:pread_term(Fd, NodePos),
-    {NodeType, NodeList}.
-
-write_node(#btree{fd = Fd, compression = Comp} = Bt, NodeType, NodeList) ->
-    % split up nodes into smaller sizes
-    NodeListList = chunkify(NodeList),
-    % now write out each chunk and return the KeyPointer pairs for those nodes
-    ResultList = [
-        begin
-            {ok, Pointer, Size} = couch_file:append_term(
-                Fd, {NodeType, ANodeList}, [{compression, Comp}]),
-            {LastKey, _} = lists:last(ANodeList),
-            SubTreeSize = reduce_tree_size(NodeType, Size, ANodeList),
-            {LastKey, {Pointer, reduce_node(Bt, NodeType, ANodeList), SubTreeSize}}
-        end
-    ||
-        ANodeList <- NodeListList
-    ],
-    {ok, ResultList}.
-
-
-write_node(Bt, _OldNode, NodeType, [], NewList) ->
-    write_node(Bt, NodeType, NewList);
-write_node(Bt, _OldNode, NodeType, [_], NewList) ->
-    write_node(Bt, NodeType, NewList);
-write_node(Bt, OldNode, NodeType, OldList, NewList) ->
-    case can_reuse_old_node(OldList, NewList) of
-        {true, Prefix, Suffix} ->
-            {ok, PrefixKVs} = case Prefix of
-                [] -> {ok, []};
-                _ -> write_node(Bt, NodeType, Prefix)
-            end,
-            {ok, SuffixKVs} = case Suffix of
-                [] -> {ok, []};
-                _ -> write_node(Bt, NodeType, Suffix)
-            end,
-            Result = PrefixKVs ++ [OldNode] ++ SuffixKVs,
-            {ok, Result};
-        false ->
-            write_node(Bt, NodeType, NewList)
-    end.
-
-can_reuse_old_node(OldList, NewList) ->
-    {Prefix, RestNewList} = remove_prefix_kvs(hd(OldList), NewList),
-    case old_list_is_prefix(OldList, RestNewList, 0) of
-        {true, Size, Suffix} ->
-            ReuseThreshold = get_chunk_size() * ?FILL_RATIO,
-            if Size < ReuseThreshold -> false; true ->
-                {true, Prefix, Suffix}
-            end;
-        false ->
-            false
-    end.
-
-remove_prefix_kvs(KV1, [KV2 | Rest]) when KV2 < KV1 ->
-    {Prefix, RestNewList} = remove_prefix_kvs(KV1, Rest),
-    {[KV2 | Prefix], RestNewList};
-remove_prefix_kvs(_, RestNewList) ->
-    {[], RestNewList}.
-
-% No more KV's in the old node so its a prefix
-old_list_is_prefix([], Suffix, Size) ->
-    {true, Size, Suffix};
-% Some KV's have been removed from the old node
-old_list_is_prefix(_OldList, [], _Size) ->
-    false;
-% KV is equal in both old and new node so continue
-old_list_is_prefix([KV | Rest1], [KV | Rest2], Acc) ->
-    old_list_is_prefix(Rest1, Rest2, ?term_size(KV) + Acc);
-% KV mismatch between old and new node so not a prefix
-old_list_is_prefix(_OldList, _NewList, _Acc) ->
-    false.
-
-modify_kpnode(Bt, {}, _LowerBound, Actions, [], QueryOutput) ->
-    modify_node(Bt, nil, Actions, QueryOutput);
-modify_kpnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
-    {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound,
-            tuple_size(NodeTuple), [])), QueryOutput};
-modify_kpnode(Bt, NodeTuple, LowerBound,
-        [{_, FirstActionKey, _}|_]=Actions, ResultNode, QueryOutput) ->
-    Sz = tuple_size(NodeTuple),
-    N = find_first_gteq(Bt, NodeTuple, LowerBound, Sz, FirstActionKey),
-    case N =:= Sz of
-    true  ->
-        % perform remaining actions on last node
-        {_, PointerInfo} = element(Sz, NodeTuple),
-        {ok, ChildKPs, QueryOutput2} =
-            modify_node(Bt, PointerInfo, Actions, QueryOutput),
-        NodeList = lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound,
-            Sz - 1, ChildKPs)),
-        {ok, NodeList, QueryOutput2};
-    false ->
-        {NodeKey, PointerInfo} = element(N, NodeTuple),
-        SplitFun = fun({_ActionType, ActionKey, _ActionValue}) ->
-                not less(Bt, NodeKey, ActionKey)
-            end,
-        {LessEqQueries, GreaterQueries} = lists:splitwith(SplitFun, Actions),
-        {ok, ChildKPs, QueryOutput2} =
-                modify_node(Bt, PointerInfo, LessEqQueries, QueryOutput),
-        ResultNode2 = lists:reverse(ChildKPs, bounded_tuple_to_revlist(NodeTuple,
-                LowerBound, N - 1, ResultNode)),
-        modify_kpnode(Bt, NodeTuple, N+1, GreaterQueries, ResultNode2, QueryOutput2)
-    end.
-
-bounded_tuple_to_revlist(_Tuple, Start, End, Tail) when Start > End ->
-    Tail;
-bounded_tuple_to_revlist(Tuple, Start, End, Tail) ->
-    bounded_tuple_to_revlist(Tuple, Start+1, End, [element(Start, Tuple)|Tail]).
-
-bounded_tuple_to_list(Tuple, Start, End, Tail) ->
-    bounded_tuple_to_list2(Tuple, Start, End, [], Tail).
-
-bounded_tuple_to_list2(_Tuple, Start, End, Acc, Tail) when Start > End ->
-    lists:reverse(Acc, Tail);
-bounded_tuple_to_list2(Tuple, Start, End, Acc, Tail) ->
-    bounded_tuple_to_list2(Tuple, Start + 1, End, [element(Start, Tuple) | Acc], Tail).
-
-find_first_gteq(_Bt, _Tuple, Start, End, _Key) when Start == End ->
-    End;
-find_first_gteq(Bt, Tuple, Start, End, Key) ->
-    Mid = Start + ((End - Start) div 2),
-    {TupleKey, _} = element(Mid, Tuple),
-    case less(Bt, TupleKey, Key) of
-    true ->
-        find_first_gteq(Bt, Tuple, Mid+1, End, Key);
-    false ->
-        find_first_gteq(Bt, Tuple, Start, Mid, Key)
-    end.
-
-modify_kvnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
-    {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound, tuple_size(NodeTuple), [])), QueryOutput};
-modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], ResultNode, QueryOutput) when LowerBound > tuple_size(NodeTuple) ->
-    case ActionType of
-    insert ->
-        modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
-    remove ->
-        % just drop the action
-        modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, QueryOutput);
-    fetch ->
-        % the key/value must not exist in the tree
-        modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
-    end;
-modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], AccNode, QueryOutput) ->
-    N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), ActionKey),
-    {Key, Value} = element(N, NodeTuple),
-    ResultNode =  bounded_tuple_to_revlist(NodeTuple, LowerBound, N - 1, AccNode),
-    case less(Bt, ActionKey, Key) of
-    true ->
-        case ActionType of
-        insert ->
-            % ActionKey is less than the Key, so insert
-            modify_kvnode(Bt, NodeTuple, N, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
-        remove ->
-            % ActionKey is less than the Key, just drop the action
-            modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, QueryOutput);
-        fetch ->
-            % ActionKey is less than the Key, the key/value must not exist in the tree
-            modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
-        end;
-    false ->
-        % ActionKey and Key are maybe equal.
-        case less(Bt, Key, ActionKey) of
-        false ->
-            case ActionType of
-            insert ->
-                modify_kvnode(Bt, NodeTuple, N+1, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
-            remove ->
-                modify_kvnode(Bt, NodeTuple, N+1, RestActions, ResultNode, QueryOutput);
-            fetch ->
-                % ActionKey is equal to the Key, insert into the QueryOuput, but re-process the node
-                % since an identical action key can follow it.
-                modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{ok, assemble(Bt, Key, Value)} | QueryOutput])
-            end;
-        true ->
-            modify_kvnode(Bt, NodeTuple, N + 1, [{ActionType, ActionKey, ActionValue} | RestActions], [{Key, Value} | ResultNode], QueryOutput)
-        end
-    end.
-
-
-reduce_stream_node(_Bt, _Dir, nil, _KeyStart, _InEndRangeFun, GroupedKey, GroupedKVsAcc,
-        GroupedRedsAcc, _KeyGroupFun, _Fun, Acc) ->
-    {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
-reduce_stream_node(Bt, Dir, Node, KeyStart, InEndRangeFun, GroupedKey, GroupedKVsAcc,
-        GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
-    P = element(1, Node),
-    case get_node(Bt, P) of
-    {kp_node, NodeList} ->
-        NodeList2 = adjust_dir(Dir, NodeList),
-        reduce_stream_kp_node(Bt, Dir, NodeList2, KeyStart, InEndRangeFun, GroupedKey,
-                GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc);
-    {kv_node, KVs} ->
-        KVs2 = adjust_dir(Dir, KVs),
-        reduce_stream_kv_node(Bt, Dir, KVs2, KeyStart, InEndRangeFun, GroupedKey,
-                GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc)
-    end.
-
-reduce_stream_kv_node(Bt, Dir, KVs, KeyStart, InEndRangeFun,
-                        GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
-                        KeyGroupFun, Fun, Acc) ->
-
-    GTEKeyStartKVs =
-    case KeyStart of
-    undefined ->
-        KVs;
-    _ ->
-        DropFun = case Dir of
-        fwd ->
-            fun({Key, _}) -> less(Bt, Key, KeyStart) end;
-        rev ->
-            fun({Key, _}) -> less(Bt, KeyStart, Key) end
-        end,
-        lists:dropwhile(DropFun, KVs)
-    end,
-    KVs2 = lists:takewhile(
-        fun({Key, _}) -> InEndRangeFun(Key) end, GTEKeyStartKVs),
-    reduce_stream_kv_node2(Bt, KVs2, GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
-                        KeyGroupFun, Fun, Acc).
-
-
-reduce_stream_kv_node2(_Bt, [], GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
-        _KeyGroupFun, _Fun, Acc) ->
-    {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
-reduce_stream_kv_node2(Bt, [{Key, Value}| RestKVs], GroupedKey, GroupedKVsAcc,
-        GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
-    case GroupedKey of
-    undefined ->
-        reduce_stream_kv_node2(Bt, RestKVs, Key,
-                [assemble(Bt,Key,Value)], [], KeyGroupFun, Fun, Acc);
-    _ ->
-
-        case KeyGroupFun(GroupedKey, Key) of
-        true ->
-            reduce_stream_kv_node2(Bt, RestKVs, GroupedKey,
-                [assemble(Bt,Key,Value)|GroupedKVsAcc], GroupedRedsAcc, KeyGroupFun,
-                Fun, Acc);
-        false ->
-            case Fun(GroupedKey, {GroupedKVsAcc, GroupedRedsAcc}, Acc) of
-            {ok, Acc2} ->
-                reduce_stream_kv_node2(Bt, RestKVs, Key, [assemble(Bt,Key,Value)],
-                    [], KeyGroupFun, Fun, Acc2);
-            {stop, Acc2} ->
-                throw({stop, Acc2})
-            end
-        end
-    end.
-
-reduce_stream_kp_node(Bt, Dir, NodeList, KeyStart, InEndRangeFun,
-                        GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
-                        KeyGroupFun, Fun, Acc) ->
-    Nodes =
-    case KeyStart of
-    undefined ->
-        NodeList;
-    _ ->
-        case Dir of
-        fwd ->
-            lists:dropwhile(fun({Key, _}) -> less(Bt, Key, KeyStart) end, NodeList);
-        rev ->
-            RevKPs = lists:reverse(NodeList),
-            case lists:splitwith(fun({Key, _}) -> less(Bt, Key, KeyStart) end, RevKPs) of
-            {_Before, []} ->
-                NodeList;
-            {Before, [FirstAfter | _]} ->
-                [FirstAfter | lists:reverse(Before)]
-            end
-        end
-    end,
-    {InRange, MaybeInRange} = lists:splitwith(
-        fun({Key, _}) -> InEndRangeFun(Key) end, Nodes),
-    NodesInRange = case MaybeInRange of
-    [FirstMaybeInRange | _] when Dir =:= fwd ->
-        InRange ++ [FirstMaybeInRange];
-    _ ->
-        InRange
-    end,
-    reduce_stream_kp_node2(Bt, Dir, NodesInRange, KeyStart, InEndRangeFun,
-        GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc).
-
-
-reduce_stream_kp_node2(Bt, Dir, [{_Key, NodeInfo} | RestNodeList], KeyStart, InEndRangeFun,
-                        undefined, [], [], KeyGroupFun, Fun, Acc) ->
-    {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
-            reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, InEndRangeFun, undefined,
-                [], [], KeyGroupFun, Fun, Acc),
-    reduce_stream_kp_node2(Bt, Dir, RestNodeList, KeyStart, InEndRangeFun, GroupedKey2,
-            GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2);
-reduce_stream_kp_node2(Bt, Dir, NodeList, KeyStart, InEndRangeFun,
-        GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
-    {Grouped0, Ungrouped0} = lists:splitwith(fun({Key,_}) ->
-        KeyGroupFun(GroupedKey, Key) end, NodeList),
-    {GroupedNodes, UngroupedNodes} =
-    case Grouped0 of
-    [] ->
-        {Grouped0, Ungrouped0};
-    _ ->
-        [FirstGrouped | RestGrouped] = lists:reverse(Grouped0),
-        {RestGrouped, [FirstGrouped | Ungrouped0]}
-    end,
-    GroupedReds = [element(2, Node) || {_, Node} <- GroupedNodes],
-    case UngroupedNodes of
-    [{_Key, NodeInfo}|RestNodes] ->
-        {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
-            reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, InEndRangeFun, GroupedKey,
-                GroupedKVsAcc, GroupedReds ++ GroupedRedsAcc, KeyGroupFun, Fun, Acc),
-        reduce_stream_kp_node2(Bt, Dir, RestNodes, KeyStart, InEndRangeFun, GroupedKey2,
-                GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2);
-    [] ->
-        {ok, Acc, GroupedReds ++ GroupedRedsAcc, GroupedKVsAcc, GroupedKey}
-    end.
-
-adjust_dir(fwd, List) ->
-    List;
-adjust_dir(rev, List) ->
-    lists:reverse(List).
-
-stream_node(Bt, Reds, Node, StartKey, InRange, Dir, Fun, Acc) ->
-    Pointer = element(1, Node),
-    {NodeType, NodeList} = get_node(Bt, Pointer),
-    case NodeType of
-    kp_node ->
-        stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc);
-    kv_node ->
-        stream_kv_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc)
-    end.
-
-stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc) ->
-    Pointer = element(1, Node),
-    {NodeType, NodeList} = get_node(Bt, Pointer),
-    case NodeType of
-    kp_node ->
-        stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc);
-    kv_node ->
-        stream_kv_node2(Bt, Reds, [], adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc)
-    end.
-
-stream_kp_node(_Bt, _Reds, [], _InRange, _Dir, _Fun, Acc) ->
-    {ok, Acc};
-stream_kp_node(Bt, Reds, [{Key, Node} | Rest], InRange, Dir, Fun, Acc) ->
-    Red = element(2, Node),
-    case Fun(traverse, Key, Red, Acc) of
-    {ok, Acc2} ->
-        case stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc2) of
-        {ok, Acc3} ->
-            stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc3);
-        {stop, LastReds, Acc3} ->
-            {stop, LastReds, Acc3}
-        end;
-    {skip, Acc2} ->
-        stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc2);
-    {stop, Acc2} ->
-        {stop, Reds, Acc2}
-    end.
-
-drop_nodes(_Bt, Reds, _StartKey, []) ->
-    {Reds, []};
-drop_nodes(Bt, Reds, StartKey, [{NodeKey, Node} | RestKPs]) ->
-    case less(Bt, NodeKey, StartKey) of
-    true ->
-        drop_nodes(Bt, [element(2, Node) | Reds], StartKey, RestKPs);
-    false ->
-        {Reds, [{NodeKey, Node} | RestKPs]}
-    end.
-
-stream_kp_node(Bt, Reds, KPs, StartKey, InRange, Dir, Fun, Acc) ->
-    {NewReds, NodesToStream} =
-    case Dir of
-    fwd ->
-        % drop all nodes sorting before the key
-        drop_nodes(Bt, Reds, StartKey, KPs);
-    rev ->
-        % keep all nodes sorting before the key, AND the first node to sort after
-        RevKPs = lists:reverse(KPs),
-         case lists:splitwith(fun({Key, _Pointer}) -> less(Bt, Key, StartKey) end, RevKPs) of
-        {_RevsBefore, []} ->
-            % everything sorts before it
-            {Reds, KPs};
-        {RevBefore, [FirstAfter | Drop]} ->
-            {[element(2, Node) || {_K, Node} <- Drop] ++ Reds,
-                 [FirstAfter | lists:reverse(RevBefore)]}
-        end
-    end,
-    case NodesToStream of
-    [] ->
-        {ok, Acc};
-    [{_Key, Node} | Rest] ->
-        case stream_node(Bt, NewReds, Node, StartKey, InRange, Dir, Fun, Acc) of
-        {ok, Acc2} ->
-            Red = element(2, Node),
-            stream_kp_node(Bt, [Red | NewReds], Rest, InRange, Dir, Fun, Acc2);
-        {stop, LastReds, Acc2} ->
-            {stop, LastReds, Acc2}
-        end
-    end.
-
-stream_kv_node(Bt, Reds, KVs, StartKey, InRange, Dir, Fun, Acc) ->
-    DropFun =
-    case Dir of
-    fwd ->
-        fun({Key, _}) -> less(Bt, Key, StartKey) end;
-    rev ->
-        fun({Key, _}) -> less(Bt, StartKey, Key) end
-    end,
-    {LTKVs, GTEKVs} = lists:splitwith(DropFun, KVs),
-    AssembleLTKVs = [assemble(Bt,K,V) || {K,V} <- LTKVs],
-    stream_kv_node2(Bt, Reds, AssembleLTKVs, GTEKVs, InRange, Dir, Fun, Acc).
-
-stream_kv_node2(_Bt, _Reds, _PrevKVs, [], _InRange, _Dir, _Fun, Acc) ->
-    {ok, Acc};
-stream_kv_node2(Bt, Reds, PrevKVs, [{K,V} | RestKVs], InRange, Dir, Fun, Acc) ->
-    case InRange(K) of
-    false ->
-        {stop, {PrevKVs, Reds}, Acc};
-    true ->
-        AssembledKV = assemble(Bt, K, V),
-        case Fun(visit, AssembledKV, {PrevKVs, Reds}, Acc) of
-        {ok, Acc2} ->
-            stream_kv_node2(Bt, Reds, [AssembledKV | PrevKVs], RestKVs, InRange, Dir, Fun, Acc2);
-        {stop, Acc2} ->
-            {stop, {PrevKVs, Reds}, Acc2}
-        end
-    end.
diff --git a/src/couch/src/couch_changes.erl b/src/couch/src/couch_changes.erl
deleted file mode 100644
index 6e9294a..0000000
--- a/src/couch/src/couch_changes.erl
+++ /dev/null
@@ -1,724 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_changes).
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--export([
-    handle_db_changes/3,
-    get_changes_timeout/2,
-    wait_updated/3,
-    get_rest_updated/1,
-    configure_filter/4,
-    filter/3,
-    handle_db_event/3,
-    handle_view_event/3,
-    send_changes_doc_ids/6,
-    send_changes_design_docs/6
-]).
-
--export([changes_enumerator/2]).
-
-%% export so we can use fully qualified call to facilitate hot-code upgrade
--export([
-    keep_sending_changes/3
-]).
-
--record(changes_acc, {
-    db,
-    seq,
-    prepend,
-    filter,
-    callback,
-    user_acc,
-    resp_type,
-    limit,
-    include_docs,
-    doc_options,
-    conflicts,
-    timeout,
-    timeout_fun,
-    aggregation_kvs,
-    aggregation_results
-}).
-
-handle_db_changes(Args0, Req, Db0) ->
-    #changes_args{
-        style = Style,
-        filter = FilterName,
-        feed = Feed,
-        dir = Dir,
-        since = Since
-    } = Args0,
-    Filter = configure_filter(FilterName, Style, Req, Db0),
-    Args = Args0#changes_args{filter_fun = Filter},
-    DbName = couch_db:name(Db0),
-    StartListenerFun = fun() ->
-        couch_event:link_listener(
-            ?MODULE, handle_db_event, self(), [{dbname, DbName}]
-        )
-    end,
-    Start = fun() ->
-        {ok, Db} = couch_db:reopen(Db0),
-        StartSeq = case Dir of
-        rev ->
-            couch_db:get_update_seq(Db);
-        fwd ->
-            Since
-        end,
-        {Db, StartSeq}
-    end,
-    % begin timer to deal with heartbeat when filter function fails
-    case Args#changes_args.heartbeat of
-    undefined ->
-        erlang:erase(last_changes_heartbeat);
-    Val when is_integer(Val); Val =:= true ->
-        put(last_changes_heartbeat, os:timestamp())
-    end,
-
-    case lists:member(Feed, ["continuous", "longpoll", "eventsource"]) of
-    true ->
-        fun(CallbackAcc) ->
-            {Callback, UserAcc} = get_callback_acc(CallbackAcc),
-            {ok, Listener} = StartListenerFun(),
-
-            {Db, StartSeq} = Start(),
-            UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
-            {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
-            Acc0 = build_acc(Args, Callback, UserAcc2, Db, StartSeq,
-                             <<"">>, Timeout, TimeoutFun),
-            try
-                keep_sending_changes(
-                    Args#changes_args{dir=fwd},
-                    Acc0,
-                    true)
-            after
-                couch_event:stop_listener(Listener),
-                get_rest_updated(ok) % clean out any remaining update messages
-            end
-        end;
-    false ->
-        fun(CallbackAcc) ->
-            {Callback, UserAcc} = get_callback_acc(CallbackAcc),
-            UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
-            {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
-            {Db, StartSeq} = Start(),
-            Acc0 = build_acc(Args#changes_args{feed="normal"}, Callback,
-                             UserAcc2, Db, StartSeq, <<>>,
-                             Timeout, TimeoutFun),
-            {ok, #changes_acc{seq = LastSeq, user_acc = UserAcc3}} =
-                send_changes(
-                    Acc0,
-                    Dir,
-                    true),
-            end_sending_changes(Callback, UserAcc3, LastSeq, Feed)
-        end
-    end.
-
-
-handle_db_event(_DbName, updated, Parent) ->
-    Parent ! updated,
-    {ok, Parent};
-handle_db_event(_DbName, deleted, Parent) ->
-    Parent ! deleted,
-    {ok, Parent};
-handle_db_event(_DbName, _Event, Parent) ->
-    {ok, Parent}.
-
-
-handle_view_event(_DbName, Msg, {Parent, DDocId}) ->
-    case Msg of
-        {index_commit, DDocId} ->
-            Parent ! updated;
-        {index_delete, DDocId} ->
-            Parent ! deleted;
-        _ ->
-            ok
-    end,
-    {ok, {Parent, DDocId}}.
-
-get_callback_acc({Callback, _UserAcc} = Pair) when is_function(Callback, 3) ->
-    Pair;
-get_callback_acc(Callback) when is_function(Callback, 2) ->
-    {fun(Ev, Data, _) -> Callback(Ev, Data) end, ok}.
-
-
-configure_filter("_doc_ids", Style, Req, _Db) ->
-    {doc_ids, Style, get_doc_ids(Req)};
-configure_filter("_selector", Style, Req, _Db) ->
-    {selector, Style,  get_selector_and_fields(Req)};
-configure_filter("_design", Style, _Req, _Db) ->
-    {design_docs, Style};
-configure_filter("_view", Style, Req, Db) ->
-    ViewName = get_view_qs(Req),
-    if ViewName /= "" -> ok; true ->
-        throw({bad_request, "`view` filter parameter is not provided."})
-    end,
-    ViewNameParts = string:tokens(ViewName, "/"),
-    case [?l2b(couch_httpd:unquote(Part)) || Part <- ViewNameParts] of
-        [DName, VName] ->
-            {ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
-            check_member_exists(DDoc, [<<"views">>, VName]),
-            case couch_db:is_clustered(Db) of
-                true ->
-                    DIR = fabric_util:doc_id_and_rev(DDoc),
-                    {fetch, view, Style, DIR, VName};
-                false ->
-                    {view, Style, DDoc, VName}
-            end;
-        [] ->
-            Msg = "`view` must be of the form `designname/viewname`",
-            throw({bad_request, Msg})
-    end;
-configure_filter([$_ | _], _Style, _Req, _Db) ->
-    throw({bad_request, "unknown builtin filter name"});
-configure_filter("", main_only, _Req, _Db) ->
-    {default, main_only};
-configure_filter("", all_docs, _Req, _Db) ->
-    {default, all_docs};
-configure_filter(FilterName, Style, Req, Db) ->
-    FilterNameParts = string:tokens(FilterName, "/"),
-    case [?l2b(couch_httpd:unquote(Part)) || Part <- FilterNameParts] of
-        [DName, FName] ->
-            {ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
-            check_member_exists(DDoc, [<<"filters">>, FName]),
-            case couch_db:is_clustered(Db) of
-                true ->
-                    DIR = fabric_util:doc_id_and_rev(DDoc),
-                    {fetch, custom, Style, Req, DIR, FName};
-                false->
-                    {custom, Style, Req, DDoc, FName}
-            end;
-
-        [] ->
-            {default, Style};
-        _Else ->
-            Msg = "`filter` must be of the form `designname/filtername`",
-            throw({bad_request, Msg})
-    end.
-
-
-filter(Db, #full_doc_info{}=FDI, Filter) ->
-    filter(Db, couch_doc:to_doc_info(FDI), Filter);
-filter(_Db, DocInfo, {default, Style}) ->
-    apply_style(DocInfo, Style);
-filter(_Db, DocInfo, {doc_ids, Style, DocIds}) ->
-    case lists:member(DocInfo#doc_info.id, DocIds) of
-        true ->
-            apply_style(DocInfo, Style);
-        false ->
-            []
-    end;
-filter(Db, DocInfo, {selector, Style, {Selector, _Fields}}) ->
-    Docs = open_revs(Db, DocInfo, Style),
-    Passes = [mango_selector:match(Selector, couch_doc:to_json_obj(Doc, []))
-        || Doc <- Docs],
-    filter_revs(Passes, Docs);
-filter(_Db, DocInfo, {design_docs, Style}) ->
-    case DocInfo#doc_info.id of
-        <<"_design", _/binary>> ->
-            apply_style(DocInfo, Style);
-        _ ->
-            []
-    end;
-filter(Db, DocInfo, {view, Style, DDoc, VName}) ->
-    Docs = open_revs(Db, DocInfo, Style),
-    {ok, Passes} = couch_query_servers:filter_view(DDoc, VName, Docs),
-    filter_revs(Passes, Docs);
-filter(Db, DocInfo, {custom, Style, Req0, DDoc, FName}) ->
-    Req = case Req0 of
-        {json_req, _} -> Req0;
-        #httpd{} -> {json_req, couch_httpd_external:json_req_obj(Req0, Db)}
-    end,
-    Docs = open_revs(Db, DocInfo, Style),
-    {ok, Passes} = couch_query_servers:filter_docs(Req, Db, DDoc, FName, Docs),
-    filter_revs(Passes, Docs).
-
-
-get_view_qs({json_req, {Props}}) ->
-    {Query} = couch_util:get_value(<<"query">>, Props, {[]}),
-    binary_to_list(couch_util:get_value(<<"view">>, Query, ""));
-get_view_qs(Req) ->
-    couch_httpd:qs_value(Req, "view", "").
-
-get_doc_ids({json_req, {Props}}) ->
-    check_docids(couch_util:get_value(<<"doc_ids">>, Props));
-get_doc_ids(#httpd{method='POST'}=Req) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    {Props} = couch_httpd:json_body_obj(Req),
-    check_docids(couch_util:get_value(<<"doc_ids">>, Props));
-get_doc_ids(#httpd{method='GET'}=Req) ->
-    DocIds = ?JSON_DECODE(couch_httpd:qs_value(Req, "doc_ids", "null")),
-    check_docids(DocIds);
-get_doc_ids(_) ->
-    throw({bad_request, no_doc_ids_provided}).
-
-
-get_selector_and_fields({json_req, {Props}}) ->
-    Selector = check_selector(couch_util:get_value(<<"selector">>, Props)),
-    Fields = check_fields(couch_util:get_value(<<"fields">>, Props, nil)),
-    {Selector, Fields};
-get_selector_and_fields(#httpd{method='POST'}=Req) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    get_selector_and_fields({json_req,  couch_httpd:json_body_obj(Req)});
-get_selector_and_fields(_) ->
-    throw({bad_request, "Selector must be specified in POST payload"}).
-
-
-check_docids(DocIds) when is_list(DocIds) ->
-    lists:foreach(fun
-        (DocId) when not is_binary(DocId) ->
-            Msg = "`doc_ids` filter parameter is not a list of doc ids.",
-            throw({bad_request, Msg});
-        (_) -> ok
-    end, DocIds),
-    DocIds;
-check_docids(_) ->
-    Msg = "`doc_ids` filter parameter is not a list of doc ids.",
-    throw({bad_request, Msg}).
-
-
-check_selector(Selector={_}) ->
-    try
-        mango_selector:normalize(Selector)
-    catch
-        {mango_error, Mod, Reason0} ->
-            {_StatusCode, _Error, Reason} = mango_error:info(Mod, Reason0),
-            throw({bad_request, Reason})
-    end;
-check_selector(_Selector) ->
-    throw({bad_request, "Selector error: expected a JSON object"}).
-
-
-check_fields(nil) ->
-    nil;
-check_fields(Fields) when is_list(Fields) ->
-    try
-        {ok, Fields1} = mango_fields:new(Fields),
-        Fields1
-    catch
-        {mango_error, Mod, Reason0} ->
-            {_StatusCode, _Error, Reason} = mango_error:info(Mod, Reason0),
-            throw({bad_request, Reason})
-    end;
-check_fields(_Fields) ->
-    throw({bad_request, "Selector error: fields must be JSON array"}).
-
-
-open_ddoc(Db, DDocId) ->
-    DbName = couch_db:name(Db),
-    case couch_db:is_clustered(Db) of
-        true ->
-            case ddoc_cache:open_doc(mem3:dbname(DbName), DDocId) of
-                {ok, _} = Resp -> Resp;
-                Else -> throw(Else)
-            end;
-        false ->
-            case couch_db:open_doc(Db, DDocId, [ejson_body]) of
-                {ok, _} = Resp -> Resp;
-                Else -> throw(Else)
-            end
-    end.
-
-
-check_member_exists(#doc{body={Props}}, Path) ->
-    couch_util:get_nested_json_value({Props}, Path).
-
-
-apply_style(#doc_info{revs=Revs}, main_only) ->
-    [#rev_info{rev=Rev} | _] = Revs,
-    [{[{<<"rev">>, couch_doc:rev_to_str(Rev)}]}];
-apply_style(#doc_info{revs=Revs}, all_docs) ->
-    [{[{<<"rev">>, couch_doc:rev_to_str(R)}]} || #rev_info{rev=R} <- Revs].
-
-
-open_revs(Db, DocInfo, Style) ->
-    DocInfos = case Style of
-        main_only -> [DocInfo];
-        all_docs -> [DocInfo#doc_info{revs=[R]}|| R <- DocInfo#doc_info.revs]
-    end,
-    OpenOpts = [deleted, conflicts],
-    % Relying on list comprehensions to silence errors
-    OpenResults = [couch_db:open_doc(Db, DI, OpenOpts) || DI <- DocInfos],
-    [Doc || {ok, Doc} <- OpenResults].
-
-
-filter_revs(Passes, Docs) ->
-    lists:flatmap(fun
-        ({true, #doc{revs={RevPos, [RevId | _]}}}) ->
-            RevStr = couch_doc:rev_to_str({RevPos, RevId}),
-            Change = {[{<<"rev">>, RevStr}]},
-            [Change];
-        (_) ->
-            []
-    end, lists:zip(Passes, Docs)).
-
-
-get_changes_timeout(Args, Callback) ->
-    #changes_args{
-        heartbeat = Heartbeat,
-        timeout = Timeout,
-        feed = ResponseType
-    } = Args,
-    DefaultTimeout = list_to_integer(
-        config:get("httpd", "changes_timeout", "60000")
-    ),
-    case Heartbeat of
-    undefined ->
-        case Timeout of
-        undefined ->
-            {DefaultTimeout, fun(UserAcc) -> {stop, UserAcc} end};
-        infinity ->
-            {infinity, fun(UserAcc) -> {stop, UserAcc} end};
-        _ ->
-            {lists:min([DefaultTimeout, Timeout]),
-                fun(UserAcc) -> {stop, UserAcc} end}
-        end;
-    true ->
-        {DefaultTimeout,
-            fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end};
-    _ ->
-        {lists:min([DefaultTimeout, Heartbeat]),
-            fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end}
-    end.
-
-start_sending_changes(_Callback, UserAcc, ResponseType)
-        when ResponseType =:= "continuous"
-        orelse ResponseType =:= "eventsource" ->
-    UserAcc;
-start_sending_changes(Callback, UserAcc, ResponseType) ->
-    Callback(start, ResponseType, UserAcc).
-
-build_acc(Args, Callback, UserAcc, Db, StartSeq, Prepend, Timeout, TimeoutFun) ->
-    #changes_args{
-        include_docs = IncludeDocs,
-        doc_options = DocOpts,
-        conflicts = Conflicts,
-        limit = Limit,
-        feed = ResponseType,
-        filter_fun = Filter
-    } = Args,
-    #changes_acc{
-        db = Db,
-        seq = StartSeq,
-        prepend = Prepend,
-        filter = Filter,
-        callback = Callback,
-        user_acc = UserAcc,
-        resp_type = ResponseType,
-        limit = Limit,
-        include_docs = IncludeDocs,
-        doc_options = DocOpts,
-        conflicts = Conflicts,
-        timeout = Timeout,
-        timeout_fun = TimeoutFun,
-        aggregation_results=[],
-        aggregation_kvs=[]
-    }.
-
-send_changes(Acc, Dir, FirstRound) ->
-    #changes_acc{
-        db = Db,
-        seq = StartSeq,
-        filter = Filter
-    } = maybe_upgrade_changes_acc(Acc),
-    DbEnumFun = fun changes_enumerator/2,
-    case can_optimize(FirstRound, Filter) of
-        {true, Fun} ->
-            Fun(Db, StartSeq, Dir, DbEnumFun, Acc, Filter);
-        _ ->
-            Opts = [{dir, Dir}],
-            couch_db:fold_changes(Db, StartSeq, DbEnumFun, Acc, Opts)
-    end.
-
-
-can_optimize(true, {doc_ids, _Style, DocIds}) ->
-    MaxDocIds = config:get_integer("couchdb",
-        "changes_doc_ids_optimization_threshold", 100),
-    if length(DocIds) =< MaxDocIds ->
-        {true, fun send_changes_doc_ids/6};
-    true ->
-        false
-    end;
-can_optimize(true, {design_docs, _Style}) ->
-    {true, fun send_changes_design_docs/6};
-can_optimize(_, _) ->
-    false.
-
-
-send_changes_doc_ids(Db, StartSeq, Dir, Fun, Acc0, {doc_ids, _Style, DocIds}) ->
-    Results = couch_db:get_full_doc_infos(Db, DocIds),
-    FullInfos = lists:foldl(fun
-        (#full_doc_info{}=FDI, Acc) -> [FDI | Acc];
-        (not_found, Acc) -> Acc
-    end, [], Results),
-    send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
-
-
-send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0, {design_docs, _Style}) ->
-    FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
-    Opts = [
-        include_deleted,
-        {start_key, <<"_design/">>},
-        {end_key_gt, <<"_design0">>}
-    ],
-    {ok, FullInfos} = couch_db:fold_docs(Db, FoldFun, [], Opts),
-    send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
-
-
-send_lookup_changes(FullDocInfos, StartSeq, Dir, Db, Fun, Acc0) ->
-    FoldFun = case Dir of
-        fwd -> fun lists:foldl/3;
-        rev -> fun lists:foldr/3
-    end,
-    GreaterFun = case Dir of
-        fwd -> fun(A, B) -> A > B end;
-        rev -> fun(A, B) -> A =< B end
-    end,
-    DocInfos = lists:foldl(fun(FDI, Acc) ->
-        DI = couch_doc:to_doc_info(FDI),
-        case GreaterFun(DI#doc_info.high_seq, StartSeq) of
-            true -> [DI | Acc];
-            false -> Acc
-        end
-    end, [], FullDocInfos),
-    SortedDocInfos = lists:keysort(#doc_info.high_seq, DocInfos),
-    FinalAcc = try
-        FoldFun(fun(DocInfo, Acc) ->
-            case Fun(DocInfo, Acc) of
-                {ok, NewAcc} ->
-                    NewAcc;
-                {stop, NewAcc} ->
-                    throw({stop, NewAcc})
-            end
-        end, Acc0, SortedDocInfos)
-    catch
-        {stop, Acc} -> Acc
-    end,
-    case Dir of
-        fwd ->
-            FinalAcc0 = case element(1, FinalAcc) of
-                changes_acc -> % we came here via couch_http or internal call
-                    FinalAcc#changes_acc{seq = couch_db:get_update_seq(Db)};
-                fabric_changes_acc -> % we came here via chttpd / fabric / rexi
-                    FinalAcc#fabric_changes_acc{seq = couch_db:get_update_seq(Db)}
-            end,
-            {ok, FinalAcc0};
-        rev -> {ok, FinalAcc}
-    end.
-
-
-keep_sending_changes(Args, Acc0, FirstRound) ->
-    #changes_args{
-        feed = ResponseType,
-        limit = Limit,
-        db_open_options = DbOptions
-    } = Args,
-
-    {ok, ChangesAcc} = send_changes(Acc0, fwd, FirstRound),
-
-    #changes_acc{
-        db = Db, callback = Callback,
-        timeout = Timeout, timeout_fun = TimeoutFun, seq = EndSeq,
-        prepend = Prepend2, user_acc = UserAcc2, limit = NewLimit
-    } = maybe_upgrade_changes_acc(ChangesAcc),
-
-    couch_db:close(Db),
-    if Limit > NewLimit, ResponseType == "longpoll" ->
-        end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType);
-    true ->
-        case wait_updated(Timeout, TimeoutFun, UserAcc2) of
-        {updated, UserAcc4} ->
-            DbOptions1 = [{user_ctx, couch_db:get_user_ctx(Db)} | DbOptions],
-            case couch_db:open(couch_db:name(Db), DbOptions1) of
-            {ok, Db2} ->
-                ?MODULE:keep_sending_changes(
-                  Args#changes_args{limit=NewLimit},
-                  ChangesAcc#changes_acc{
-                    db = Db2,
-                    user_acc = UserAcc4,
-                    seq = EndSeq,
-                    prepend = Prepend2,
-                    timeout = Timeout,
-                    timeout_fun = TimeoutFun},
-                  false);
-            _Else ->
-                end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType)
-            end;
-        {stop, UserAcc4} ->
-            end_sending_changes(Callback, UserAcc4, EndSeq, ResponseType)
-        end
-    end.
-
-end_sending_changes(Callback, UserAcc, EndSeq, ResponseType) ->
-    Callback({stop, EndSeq}, ResponseType, UserAcc).
-
-changes_enumerator(Value, Acc) ->
-    #changes_acc{
-        filter = Filter, callback = Callback, prepend = Prepend,
-        user_acc = UserAcc, limit = Limit, resp_type = ResponseType, db = Db,
-        timeout = Timeout, timeout_fun = TimeoutFun
-    } = maybe_upgrade_changes_acc(Acc),
-    Results0 = filter(Db, Value, Filter),
-    Results = [Result || Result <- Results0, Result /= null],
-    Seq = case Value of
-        #full_doc_info{} ->
-            Value#full_doc_info.update_seq;
-        #doc_info{} ->
-            Value#doc_info.high_seq
-    end,
-    Go = if (Limit =< 1) andalso Results =/= [] -> stop; true -> ok end,
-    case Results of
-    [] ->
-        {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
-        case Done of
-        stop ->
-            {stop, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}};
-        ok ->
-            {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}}
-        end;
-    _ ->
-        if ResponseType =:= "continuous" orelse ResponseType =:= "eventsource" ->
-            ChangesRow = changes_row(Results, Value, Acc),
-            UserAcc2 = Callback({change, ChangesRow, <<>>}, ResponseType, UserAcc),
-            reset_heartbeat(),
-            {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2, limit = Limit - 1}};
-        true ->
-            ChangesRow = changes_row(Results, Value, Acc),
-            UserAcc2 = Callback({change, ChangesRow, Prepend}, ResponseType, UserAcc),
-            reset_heartbeat(),
-            {Go, Acc#changes_acc{
-                seq = Seq, prepend = <<",\n">>,
-                user_acc = UserAcc2, limit = Limit - 1}}
-        end
-    end.
-
-
-
-changes_row(Results, #full_doc_info{} = FDI, Acc) ->
-    changes_row(Results, couch_doc:to_doc_info(FDI), Acc);
-changes_row(Results, DocInfo, Acc0) ->
-    Acc = maybe_upgrade_changes_acc(Acc0),
-    #doc_info{
-        id = Id, high_seq = Seq, revs = [#rev_info{deleted = Del} | _]
-    } = DocInfo,
-    {[{<<"seq">>, Seq}, {<<"id">>, Id}, {<<"changes">>, Results}] ++
-        deleted_item(Del) ++ maybe_get_changes_doc(DocInfo, Acc)}.
-
-maybe_get_changes_doc(Value, #changes_acc{include_docs=true}=Acc) ->
-    #changes_acc{
-        db = Db,
-        doc_options = DocOpts,
-        conflicts = Conflicts,
-        filter = Filter
-    } = Acc,
-    Opts = case Conflicts of
-               true -> [deleted, conflicts];
-               false -> [deleted]
-           end,
-    load_doc(Db, Value, Opts, DocOpts, Filter);
-
-maybe_get_changes_doc(_Value, _Acc) ->
-    [].
-
-
-load_doc(Db, Value, Opts, DocOpts, Filter) ->
-    case couch_index_util:load_doc(Db, Value, Opts) of
-        null ->
-            [{doc, null}];
-        Doc ->
-            [{doc, doc_to_json(Doc, DocOpts, Filter)}]
-    end.
-
-
-doc_to_json(Doc, DocOpts, {selector, _Style, {_Selector, Fields}})
-    when Fields =/= nil ->
-    mango_fields:extract(couch_doc:to_json_obj(Doc, DocOpts), Fields);
-doc_to_json(Doc, DocOpts, _Filter) ->
-    couch_doc:to_json_obj(Doc, DocOpts).
-
-
-deleted_item(true) -> [{<<"deleted">>, true}];
-deleted_item(_) -> [].
-
-% waits for a updated msg, if there are multiple msgs, collects them.
-wait_updated(Timeout, TimeoutFun, UserAcc) ->
-    receive
-    updated ->
-        get_rest_updated(UserAcc);
-    deleted ->
-        {stop, UserAcc}
-    after Timeout ->
-        {Go, UserAcc2} = TimeoutFun(UserAcc),
-        case Go of
-        ok ->
-            ?MODULE:wait_updated(Timeout, TimeoutFun, UserAcc2);
-        stop ->
-            {stop, UserAcc2}
-        end
-    end.
-
-get_rest_updated(UserAcc) ->
-    receive
-    updated ->
-        get_rest_updated(UserAcc)
-    after 0 ->
-        {updated, UserAcc}
-    end.
-
-reset_heartbeat() ->
-    case get(last_changes_heartbeat) of
-    undefined ->
-        ok;
-    _ ->
-        put(last_changes_heartbeat, os:timestamp())
-    end.
-
-maybe_heartbeat(Timeout, TimeoutFun, Acc) ->
-    Before = get(last_changes_heartbeat),
-    case Before of
-    undefined ->
-        {ok, Acc};
-    _ ->
-        Now = os:timestamp(),
-        case timer:now_diff(Now, Before) div 1000 >= Timeout of
-        true ->
-            Acc2 = TimeoutFun(Acc),
-            put(last_changes_heartbeat, Now),
-            Acc2;
-        false ->
-            {ok, Acc}
-        end
-    end.
-
-
-maybe_upgrade_changes_acc(#changes_acc{} = Acc) ->
-    Acc;
-maybe_upgrade_changes_acc(Acc) when tuple_size(Acc) == 19 ->
-    #changes_acc{
-        db = element(2, Acc),
-        seq = element(6, Acc),
-        prepend = element(7, Acc),
-        filter = element(8, Acc),
-        callback = element(9, Acc),
-        user_acc = element(10, Acc),
-        resp_type = element(11, Acc),
-        limit = element(12, Acc),
-        include_docs = element(13, Acc),
-        doc_options = element(14, Acc),
-        conflicts = element(15, Acc),
-        timeout = element(16, Acc),
-        timeout_fun = element(17, Acc),
-        aggregation_kvs = element(18, Acc),
-        aggregation_results = element(19, Acc)
-    }.
diff --git a/src/couch/src/couch_compress.erl b/src/couch/src/couch_compress.erl
deleted file mode 100644
index cfcc2a4..0000000
--- a/src/couch/src/couch_compress.erl
+++ /dev/null
@@ -1,99 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_compress).
-
--export([compress/2, decompress/1, is_compressed/2]).
--export([get_compression_method/0]).
--export([uncompressed_size/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
-% binaries compressed with snappy have their first byte set to this value
--define(SNAPPY_PREFIX, 1).
-% Term prefixes documented at:
-%      http://www.erlang.org/doc/apps/erts/erl_ext_dist.html
--define(TERM_PREFIX, 131).
--define(COMPRESSED_TERM_PREFIX, 131, 80).
-
-
-get_compression_method() ->
-    case config:get("couchdb", "file_compression") of
-    undefined ->
-        ?DEFAULT_COMPRESSION;
-    Method1 ->
-        case string:tokens(Method1, "_") of
-        [Method] ->
-            list_to_existing_atom(Method);
-        [Method, Level] ->
-            {list_to_existing_atom(Method), list_to_integer(Level)}
-        end
-    end.
-
-
-compress(<<?SNAPPY_PREFIX, _/binary>> = Bin, snappy) ->
-    Bin;
-compress(<<?SNAPPY_PREFIX, _/binary>> = Bin, Method) ->
-    compress(decompress(Bin), Method);
-compress(<<?COMPRESSED_TERM_PREFIX, _/binary>> = Bin, {deflate, _Level}) ->
-    Bin;
-compress(<<?TERM_PREFIX, _/binary>> = Bin, Method) ->
-    compress(decompress(Bin), Method);
-compress(Term, none) ->
-    ?term_to_bin(Term);
-compress(Term, {deflate, Level}) ->
-    term_to_binary(Term, [{minor_version, 1}, {compressed, Level}]);
-compress(Term, snappy) ->
-    Bin = ?term_to_bin(Term),
-    try
-        {ok, CompressedBin} = snappy:compress(Bin),
-        <<?SNAPPY_PREFIX, CompressedBin/binary>>
-    catch exit:snappy_nif_not_loaded ->
-        Bin
-    end.
-
-
-decompress(<<?SNAPPY_PREFIX, Rest/binary>>) ->
-    {ok, TermBin} = snappy:decompress(Rest),
-    binary_to_term(TermBin);
-decompress(<<?TERM_PREFIX, _/binary>> = Bin) ->
-    binary_to_term(Bin);
-decompress(_) ->
-    error(invalid_compression).
-
-
-is_compressed(<<?SNAPPY_PREFIX, _/binary>>, Method) ->
-    Method =:= snappy;
-is_compressed(<<?COMPRESSED_TERM_PREFIX, _/binary>>, {deflate, _Level}) ->
-    true;
-is_compressed(<<?COMPRESSED_TERM_PREFIX, _/binary>>, _Method) ->
-    false;
-is_compressed(<<?TERM_PREFIX, _/binary>>, Method) ->
-    Method =:= none;
-is_compressed(Term, _Method) when not is_binary(Term) ->
-    false;
-is_compressed(_, _) ->
-    error(invalid_compression).
-
-
-uncompressed_size(<<?SNAPPY_PREFIX, Rest/binary>>) ->
-    {ok, Size} = snappy:uncompressed_length(Rest),
-    Size;
-uncompressed_size(<<?COMPRESSED_TERM_PREFIX, Size:32, _/binary>> = _Bin) ->
-    % See http://erlang.org/doc/apps/erts/erl_ext_dist.html
-    % The uncompressed binary would be encoded with <<131, Rest/binary>>
-    % so need to add 1 for 131
-    Size + 1;
-uncompressed_size(<<?TERM_PREFIX, _/binary>> = Bin) ->
-    byte_size(Bin);
-uncompressed_size(_) ->
-    error(invalid_compression).
diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl
deleted file mode 100644
index 6587205..0000000
--- a/src/couch/src/couch_db.erl
+++ /dev/null
@@ -1,2086 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db).
-
--export([
-    create/2,
-    open/2,
-    open_int/2,
-    incref/1,
-    reopen/1,
-    close/1,
-
-    clustered_db/2,
-    clustered_db/3,
-
-    monitor/1,
-    monitored_by/1,
-    is_idle/1,
-
-    is_admin/1,
-    check_is_admin/1,
-    check_is_member/1,
-
-    name/1,
-    get_after_doc_read_fun/1,
-    get_before_doc_update_fun/1,
-    get_committed_update_seq/1,
-    get_compacted_seq/1,
-    get_compactor_pid/1,
-    get_compactor_pid_sync/1,
-    get_db_info/1,
-    get_partition_info/2,
-    get_del_doc_count/1,
-    get_doc_count/1,
-    get_epochs/1,
-    get_filepath/1,
-    get_instance_start_time/1,
-    get_pid/1,
-    get_revs_limit/1,
-    get_security/1,
-    get_update_seq/1,
-    get_user_ctx/1,
-    get_uuid/1,
-    get_purge_seq/1,
-    get_oldest_purge_seq/1,
-    get_purge_infos_limit/1,
-
-    is_db/1,
-    is_system_db/1,
-    is_clustered/1,
-    is_system_db_name/1,
-    is_partitioned/1,
-
-    set_revs_limit/2,
-    set_purge_infos_limit/2,
-    set_security/2,
-    set_user_ctx/2,
-
-    load_validation_funs/1,
-    reload_validation_funs/1,
-
-    open_doc/2,
-    open_doc/3,
-    open_doc_revs/4,
-    open_doc_int/3,
-    get_doc_info/2,
-    get_full_doc_info/2,
-    get_full_doc_infos/2,
-    get_missing_revs/2,
-    get_design_doc/2,
-    get_design_docs/1,
-    get_design_doc_count/1,
-    get_purge_infos/2,
-
-    get_minimum_purge_seq/1,
-    purge_client_exists/3,
-
-    validate_docid/2,
-    doc_from_json_obj_validate/2,
-
-    update_doc/3,
-    update_doc/4,
-    update_docs/4,
-    update_docs/2,
-    update_docs/3,
-    delete_doc/3,
-
-    purge_docs/2,
-    purge_docs/3,
-
-    with_stream/3,
-    open_write_stream/2,
-    open_read_stream/2,
-    is_active_stream/2,
-
-    fold_docs/3,
-    fold_docs/4,
-    fold_local_docs/4,
-    fold_design_docs/4,
-    fold_changes/4,
-    fold_changes/5,
-    count_changes_since/2,
-    fold_purge_infos/4,
-    fold_purge_infos/5,
-
-    calculate_start_seq/3,
-    owner_of/2,
-
-    start_compact/1,
-    cancel_compact/1,
-    wait_for_compaction/1,
-    wait_for_compaction/2,
-
-    dbname_suffix/1,
-    normalize_dbname/1,
-    validate_dbname/1,
-
-    make_doc/5,
-    new_revid/1
-]).
-
-
--export([
-    start_link/4
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_db_int.hrl").
-
--define(DBNAME_REGEX,
-    "^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*" % use the stock CouchDB regex
-    "(\\.[0-9]{10,})?$" % but allow an optional shard timestamp at the end
-).
-
-start_link(Engine, DbName, Filepath, Options) ->
-    Arg = {Engine, DbName, Filepath, Options},
-    proc_lib:start_link(couch_db_updater, init, [Arg]).
-
-create(DbName, Options) ->
-    couch_server:create(DbName, Options).
-
-% this is for opening a database for internal purposes like the replicator
-% or the view indexer. it never throws a reader error.
-open_int(DbName, Options) ->
-    couch_server:open(DbName, Options).
-
-% this should be called anytime an http request opens the database.
-% it ensures that the http userCtx is a valid reader
-open(DbName, Options) ->
-    case couch_server:open(DbName, Options) of
-        {ok, Db} ->
-            try
-                check_is_member(Db),
-                {ok, Db}
-            catch
-                throw:Error ->
-                    close(Db),
-                    throw(Error)
-            end;
-        Else -> Else
-    end.
-
-
-reopen(#db{} = Db) ->
-    % We could have just swapped out the storage engine
-    % for this database during a compaction so we just
-    % reimplement this as a close/open pair now.
-    try
-        open(Db#db.name, [{user_ctx, Db#db.user_ctx} | Db#db.options])
-    after
-        close(Db)
-    end.
-
-
-% You shouldn't call this. Its part of the ref counting between
-% couch_server and couch_db instances.
-incref(#db{} = Db) ->
-    couch_db_engine:incref(Db).
-
-clustered_db(DbName, Options) when is_list(Options) ->
-    UserCtx = couch_util:get_value(user_ctx, Options, #user_ctx{}),
-    SecProps = couch_util:get_value(security, Options, []),
-    Props = couch_util:get_value(props, Options, []),
-    {ok, #db{
-        name = DbName,
-        user_ctx = UserCtx,
-        security = SecProps,
-        options = [{props, Props}]
-    }};
-
-clustered_db(DbName, #user_ctx{} = UserCtx) ->
-    clustered_db(DbName, [{user_ctx, UserCtx}]).
-
-clustered_db(DbName, UserCtx, SecProps) ->
-    clustered_db(DbName, [{user_ctx, UserCtx}, {security, SecProps}]).
-
-is_db(#db{}) ->
-    true;
-is_db(_) ->
-    false.
-
-is_system_db(#db{options = Options}) ->
-    lists:member(sys_db, Options).
-
-is_clustered(#{}) ->
-    true;
-is_clustered(#db{main_pid = nil}) ->
-    true;
-is_clustered(#db{}) ->
-    false;
-is_clustered(?OLD_DB_REC = Db) ->
-    ?OLD_DB_MAIN_PID(Db) == undefined.
-
-is_partitioned(#db{options = Options}) ->
-    Props = couch_util:get_value(props, Options, []),
-    couch_util:get_value(partitioned, Props, false).
-
-close(#db{} = Db) ->
-    ok = couch_db_engine:decref(Db);
-close(?OLD_DB_REC) ->
-    ok.
-
-is_idle(#db{compactor_pid=nil} = Db) ->
-    monitored_by(Db) == [];
-is_idle(_Db) ->
-    false.
-
-monitored_by(Db) ->
-    case couch_db_engine:monitored_by(Db) of
-        Pids when is_list(Pids) ->
-            PidTracker = whereis(couch_stats_process_tracker),
-            Pids -- [Db#db.main_pid, PidTracker];
-        undefined ->
-            []
-    end.
-
-
-monitor(#db{main_pid=MainPid}) ->
-    erlang:monitor(process, MainPid).
-
-start_compact(#db{} = Db) ->
-    gen_server:call(Db#db.main_pid, start_compact).
-
-cancel_compact(#db{main_pid=Pid}) ->
-    gen_server:call(Pid, cancel_compact).
-
-wait_for_compaction(Db) ->
-    wait_for_compaction(Db, infinity).
-
-wait_for_compaction(#db{main_pid=Pid}=Db, Timeout) ->
-    Start = os:timestamp(),
-    case gen_server:call(Pid, compactor_pid) of
-        CPid when is_pid(CPid) ->
-            Ref = erlang:monitor(process, CPid),
-            receive
-                {'DOWN', Ref, _, _, normal} when Timeout == infinity ->
-                    wait_for_compaction(Db, Timeout);
-                {'DOWN', Ref, _, _, normal} ->
-                    Elapsed = timer:now_diff(os:timestamp(), Start) div 1000,
-                    wait_for_compaction(Db, Timeout - Elapsed);
-                {'DOWN', Ref, _, _, Reason} ->
-                    {error, Reason}
-            after Timeout ->
-                erlang:demonitor(Ref, [flush]),
-                {error, Timeout}
-            end;
-        _ ->
-            ok
-    end.
-
-delete_doc(Db, Id, Revisions) ->
-    DeletedDocs = [#doc{id=Id, revs=[Rev], deleted=true} || Rev <- Revisions],
-    {ok, [Result]} = update_docs(Db, DeletedDocs, []),
-    {ok, Result}.
-
-open_doc(Db, IdOrDocInfo) ->
-    open_doc(Db, IdOrDocInfo, []).
-
-open_doc(Db, Id, Options) ->
-    increment_stat(Db, [couchdb, database_reads]),
-    case open_doc_int(Db, Id, Options) of
-    {ok, #doc{deleted=true}=Doc} ->
-        case lists:member(deleted, Options) of
-        true ->
-            apply_open_options({ok, Doc},Options);
-        false ->
-            {not_found, deleted}
-        end;
-    Else ->
-        apply_open_options(Else,Options)
-    end.
-
-apply_open_options({ok, Doc},Options) ->
-    apply_open_options2(Doc,Options);
-apply_open_options(Else,_Options) ->
-    Else.
-
-apply_open_options2(Doc,[]) ->
-    {ok, Doc};
-apply_open_options2(#doc{atts=Atts0,revs=Revs}=Doc,
-        [{atts_since, PossibleAncestors}|Rest]) ->
-    RevPos = find_ancestor_rev_pos(Revs, PossibleAncestors),
-    Atts = lists:map(fun(Att) ->
-        [AttPos, Data] = couch_att:fetch([revpos, data], Att),
-        if  AttPos > RevPos -> couch_att:store(data, Data, Att);
-            true -> couch_att:store(data, stub, Att)
-        end
-    end, Atts0),
-    apply_open_options2(Doc#doc{atts=Atts}, Rest);
-apply_open_options2(Doc, [ejson_body | Rest]) ->
-    apply_open_options2(couch_doc:with_ejson_body(Doc), Rest);
-apply_open_options2(Doc,[_|Rest]) ->
-    apply_open_options2(Doc,Rest).
-
-
-find_ancestor_rev_pos({_, []}, _AttsSinceRevs) ->
-    0;
-find_ancestor_rev_pos(_DocRevs, []) ->
-    0;
-find_ancestor_rev_pos({RevPos, [RevId|Rest]}, AttsSinceRevs) ->
-    case lists:member({RevPos, RevId}, AttsSinceRevs) of
-    true ->
-        RevPos;
-    false ->
-        find_ancestor_rev_pos({RevPos - 1, Rest}, AttsSinceRevs)
-    end.
-
-open_doc_revs(Db, Id, Revs, Options) ->
-    increment_stat(Db, [couchdb, database_reads]),
-    [{ok, Results}] = open_doc_revs_int(Db, [{Id, Revs}], Options),
-    {ok, [apply_open_options(Result, Options) || Result <- Results]}.
-
-% Each returned result is a list of tuples:
-% {Id, MissingRevs, PossibleAncestors}
-% if no revs are missing, it's omitted from the results.
-get_missing_revs(Db, IdRevsList) ->
-    Results = get_full_doc_infos(Db, [Id1 || {Id1, _Revs} <- IdRevsList]),
-    {ok, find_missing(IdRevsList, Results)}.
-
-find_missing([], []) ->
-    [];
-find_missing([{Id, Revs}|RestIdRevs], [FullInfo | RestLookupInfo])
-        when is_record(FullInfo, full_doc_info) ->
-    case couch_key_tree:find_missing(FullInfo#full_doc_info.rev_tree, Revs) of
-    [] ->
-        find_missing(RestIdRevs, RestLookupInfo);
-    MissingRevs ->
-        #doc_info{revs=RevsInfo} = couch_doc:to_doc_info(FullInfo),
-        LeafRevs = [Rev || #rev_info{rev=Rev} <- RevsInfo],
-        % Find the revs that are possible parents of this rev
-        PossibleAncestors =
-        lists:foldl(fun({LeafPos, LeafRevId}, Acc) ->
-            % this leaf is a "possible ancenstor" of the missing
-            % revs if this LeafPos lessthan any of the missing revs
-            case lists:any(fun({MissingPos, _}) ->
-                    LeafPos < MissingPos end, MissingRevs) of
-            true ->
-                [{LeafPos, LeafRevId} | Acc];
-            false ->
-                Acc
-            end
-        end, [], LeafRevs),
-        [{Id, MissingRevs, PossibleAncestors} |
-                find_missing(RestIdRevs, RestLookupInfo)]
-    end;
-find_missing([{Id, Revs}|RestIdRevs], [not_found | RestLookupInfo]) ->
-    [{Id, Revs, []} | find_missing(RestIdRevs, RestLookupInfo)].
-
-get_doc_info(Db, Id) ->
-    case get_full_doc_info(Db, Id) of
-    #full_doc_info{} = FDI ->
-        {ok, couch_doc:to_doc_info(FDI)};
-    Else ->
-        Else
-    end.
-
-get_full_doc_info(Db, Id) ->
-    [Result] = get_full_doc_infos(Db, [Id]),
-    Result.
-
-get_full_doc_infos(Db, Ids) ->
-    couch_db_engine:open_docs(Db, Ids).
-
-purge_docs(Db, IdRevs) ->
-    purge_docs(Db, IdRevs, []).
-
--spec purge_docs(#db{}, [{UUId, Id, [Rev]}], [PurgeOption]) ->
-    {ok, [Reply]} when
-    UUId :: binary(),
-    Id :: binary() | list(),
-    Rev :: {non_neg_integer(), binary()},
-    PurgeOption :: interactive_edit | replicated_changes,
-    Reply :: {ok, []} | {ok, [Rev]}.
-purge_docs(#db{main_pid = Pid} = Db, UUIDsIdsRevs, Options) ->
-    UUIDsIdsRevs2 = [{UUID, couch_util:to_binary(Id), Revs}
-        || {UUID, Id, Revs}  <- UUIDsIdsRevs],
-    % Check here if any UUIDs already exist when
-    % we're not replicating purge infos
-    IsRepl = lists:member(replicated_changes, Options),
-    if IsRepl -> ok; true ->
-        UUIDs = [UUID || {UUID, _, _} <- UUIDsIdsRevs2],
-        lists:foreach(fun(Resp) ->
-            if Resp == not_found -> ok; true ->
-                Fmt = "Duplicate purge info UIUD: ~s",
-                Reason = io_lib:format(Fmt, [element(2, Resp)]),
-                throw({badreq, Reason})
-            end
-        end, get_purge_infos(Db, UUIDs))
-    end,
-    increment_stat(Db, [couchdb, database_purges]),
-    gen_server:call(Pid, {purge_docs, UUIDsIdsRevs2, Options}).
-
--spec get_purge_infos(#db{}, [UUId]) -> [PurgeInfo] when
-    UUId :: binary(),
-    PurgeInfo :: {PurgeSeq, UUId, Id, [Rev]} | not_found,
-    PurgeSeq :: non_neg_integer(),
-    Id :: binary(),
-    Rev :: {non_neg_integer(), binary()}.
-get_purge_infos(Db, UUIDs) ->
-    couch_db_engine:load_purge_infos(Db, UUIDs).
-
-
-get_minimum_purge_seq(#db{} = Db) ->
-    PurgeSeq = couch_db_engine:get_purge_seq(Db),
-    OldestPurgeSeq = couch_db_engine:get_oldest_purge_seq(Db),
-    PurgeInfosLimit = couch_db_engine:get_purge_infos_limit(Db),
-
-    FoldFun = fun(#doc{id = DocId, body = {Props}}, SeqAcc) ->
-        case DocId of
-            <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> ->
-                ClientSeq = couch_util:get_value(<<"purge_seq">>, Props),
-                DbName = couch_db:name(Db),
-                % If there's a broken doc we have to keep every
-                % purge info until the doc is fixed or removed.
-                Fmt = "Invalid purge doc '~s' on ~p with purge_seq '~w'",
-                case ClientSeq of
-                    CS when is_integer(CS), CS >= PurgeSeq - PurgeInfosLimit ->
-                        {ok, SeqAcc};
-                    CS when is_integer(CS) ->
-                        case purge_client_exists(DbName, DocId, Props) of
-                            true ->
-                                {ok, erlang:min(CS, SeqAcc)};
-                            false ->
-                                couch_log:error(Fmt, [DocId, DbName, ClientSeq]),
-                                {ok, SeqAcc}
-                        end;
-                    _ ->
-                        couch_log:error(Fmt, [DocId, DbName, ClientSeq]),
-                        {ok, erlang:min(OldestPurgeSeq, SeqAcc)}
-                end;
-            _ ->
-                {stop, SeqAcc}
-        end
-    end,
-    InitMinSeq = PurgeSeq - PurgeInfosLimit,
-    Opts = [
-        {start_key, list_to_binary(?LOCAL_DOC_PREFIX ++ "purge-")}
-    ],
-    {ok, MinIdxSeq} = couch_db:fold_local_docs(Db, FoldFun, InitMinSeq, Opts),
-    FinalSeq = case MinIdxSeq < PurgeSeq - PurgeInfosLimit of
-        true -> MinIdxSeq;
-        false -> erlang:max(0, PurgeSeq - PurgeInfosLimit)
-    end,
-    % Log a warning if we've got a purge sequence exceeding the
-    % configured threshold.
-    if FinalSeq >= (PurgeSeq - PurgeInfosLimit) -> ok; true ->
-        Fmt = "The purge sequence for '~s' exceeds configured threshold",
-        couch_log:warning(Fmt, [couch_db:name(Db)])
-    end,
-    FinalSeq.
-
-
-purge_client_exists(DbName, DocId, Props) ->
-    % Warn about clients that have not updated their purge
-    % checkpoints in the last "index_lag_warn_seconds"
-    LagWindow = config:get_integer(
-            "purge", "index_lag_warn_seconds", 86400), % Default 24 hours
-
-    {Mega, Secs, _} = os:timestamp(),
-    NowSecs = Mega * 1000000 + Secs,
-    LagThreshold = NowSecs - LagWindow,
-
-    try
-        Exists = couch_db_plugin:is_valid_purge_client(DbName, Props),
-        if not Exists -> ok; true ->
-            Updated = couch_util:get_value(<<"updated_on">>, Props),
-            if is_integer(Updated) and Updated > LagThreshold -> ok; true ->
-                Diff = NowSecs - Updated,
-                Fmt1 = "Purge checkpoint '~s' not updated in ~p seconds
-                    in database ~p",
-                couch_log:error(Fmt1, [DocId, Diff, DbName])
-            end
-        end,
-        Exists
-    catch _:_ ->
-        % If we fail to check for a client we have to assume that
-        % it exists.
-        Fmt2 = "Failed to check purge checkpoint using
-            document '~p' in database ~p",
-        couch_log:error(Fmt2, [DocId, DbName]),
-        true
-    end.
-
-
-set_purge_infos_limit(#db{main_pid=Pid}=Db, Limit) when Limit > 0 ->
-    check_is_admin(Db),
-    gen_server:call(Pid, {set_purge_infos_limit, Limit}, infinity);
-set_purge_infos_limit(_Db, _Limit) ->
-    throw(invalid_purge_infos_limit).
-
-
-get_after_doc_read_fun(#db{after_doc_read = Fun}) ->
-    Fun.
-
-get_before_doc_update_fun(#db{before_doc_update = Fun}) ->
-    Fun.
-
-get_committed_update_seq(#db{committed_update_seq=Seq}) ->
-    Seq.
-
-get_update_seq(#db{} = Db)->
-    couch_db_engine:get_update_seq(Db).
-
-get_user_ctx(#db{user_ctx = UserCtx}) ->
-    UserCtx;
-get_user_ctx(?OLD_DB_REC = Db) ->
-    ?OLD_DB_USER_CTX(Db).
-
-get_purge_seq(#db{}=Db) ->
-    couch_db_engine:get_purge_seq(Db).
-
-get_oldest_purge_seq(#db{}=Db) ->
-    couch_db_engine:get_oldest_purge_seq(Db).
-
-get_purge_infos_limit(#db{}=Db) ->
-    couch_db_engine:get_purge_infos_limit(Db).
-
-get_pid(#db{main_pid = Pid}) ->
-    Pid.
-
-get_del_doc_count(Db) ->
-    {ok, couch_db_engine:get_del_doc_count(Db)}.
-
-get_doc_count(Db) ->
-    {ok, couch_db_engine:get_doc_count(Db)}.
-
-get_uuid(#db{}=Db) ->
-    couch_db_engine:get_uuid(Db).
-
-get_epochs(#db{}=Db) ->
-    Epochs = couch_db_engine:get_epochs(Db),
-    validate_epochs(Epochs),
-    Epochs.
-
-get_filepath(#db{filepath = FilePath}) ->
-    FilePath.
-
-get_instance_start_time(#db{instance_start_time = IST}) ->
-    IST.
-
-get_compacted_seq(#db{}=Db) ->
-    couch_db_engine:get_compacted_seq(Db).
-
-get_compactor_pid(#db{compactor_pid = Pid}) ->
-    Pid.
-
-get_compactor_pid_sync(#db{main_pid=Pid}=Db) ->
-    case gen_server:call(Pid, compactor_pid, infinity) of
-        CPid when is_pid(CPid) ->
-            CPid;
-        _ ->
-            nil
-    end.
-
-get_db_info(Db) ->
-    #db{
-        name = Name,
-        compactor_pid = Compactor,
-        instance_start_time = StartTime,
-        committed_update_seq = CommittedUpdateSeq
-    } = Db,
-    {ok, DocCount} = get_doc_count(Db),
-    {ok, DelDocCount} = get_del_doc_count(Db),
-    SizeInfo = couch_db_engine:get_size_info(Db),
-    DiskVersion = couch_db_engine:get_disk_version(Db),
-    Uuid = case get_uuid(Db) of
-        undefined -> null;
-        Uuid0 -> Uuid0
-    end,
-    CompactedSeq = case get_compacted_seq(Db) of
-        undefined -> null;
-        Else1 -> Else1
-    end,
-    Props = case couch_db_engine:get_props(Db) of
-        undefined -> null;
-        Else2 -> {Else2}
-    end,
-    InfoList = [
-        {db_name, Name},
-        {engine, couch_db_engine:get_engine(Db)},
-        {doc_count, DocCount},
-        {doc_del_count, DelDocCount},
-        {update_seq, get_update_seq(Db)},
-        {purge_seq, couch_db_engine:get_purge_seq(Db)},
-        {compact_running, Compactor /= nil},
-        {sizes, {SizeInfo}},
-        {instance_start_time, StartTime},
-        {disk_format_version, DiskVersion},
-        {committed_update_seq, CommittedUpdateSeq},
-        {compacted_seq, CompactedSeq},
-        {props, Props},
-        {uuid, Uuid}
-    ],
-    {ok, InfoList}.
-
-get_partition_info(#db{} = Db, Partition) when is_binary(Partition) ->
-    Info = couch_db_engine:get_partition_info(Db, Partition),
-    {ok, Info};
-get_partition_info(_Db, _Partition) ->
-    throw({bad_request, <<"`partition` is not valid">>}).
-
-
-get_design_doc(#db{name = <<"shards/", _/binary>> = ShardDbName}, DDocId0) ->
-    DDocId = couch_util:normalize_ddoc_id(DDocId0),
-    DbName = mem3:dbname(ShardDbName),
-    {_, Ref} = spawn_monitor(fun() ->
-        exit(fabric:open_doc(DbName, DDocId, []))
-    end),
-    receive {'DOWN', Ref, _, _, Response} ->
-        Response
-    end;
-get_design_doc(#db{} = Db, DDocId0) ->
-    DDocId = couch_util:normalize_ddoc_id(DDocId0),
-    couch_db:open_doc_int(Db, DDocId, [ejson_body]).
-
-get_design_docs(#db{name = <<"shards/", _/binary>> = ShardDbName}) ->
-    DbName = mem3:dbname(ShardDbName),
-    {_, Ref} = spawn_monitor(fun() -> exit(fabric:design_docs(DbName)) end),
-    receive {'DOWN', Ref, _, _, Response} ->
-        Response
-    end;
-get_design_docs(#db{} = Db) ->
-    FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
-    {ok, Docs} = fold_design_docs(Db, FoldFun, [], []),
-    {ok, lists:reverse(Docs)}.
-
-get_design_doc_count(#db{} = Db) ->
-    FoldFun = fun(_, Acc) -> {ok, Acc + 1} end,
-    fold_design_docs(Db, FoldFun, 0, []).
-
-check_is_admin(#db{user_ctx=UserCtx}=Db) ->
-    case is_admin(Db) of
-        true -> ok;
-        false ->
-            Reason = <<"You are not a db or server admin.">>,
-            throw_security_error(UserCtx, Reason)
-    end.
-
-check_is_member(#db{user_ctx=UserCtx}=Db) ->
-    case is_member(Db) of
-        true -> ok;
-        false -> throw_security_error(UserCtx)
-    end.
-
-is_admin(#db{user_ctx=UserCtx}=Db) ->
-    case couch_db_plugin:check_is_admin(Db) of
-        true -> true;
-        false ->
-            {Admins} = get_admins(Db),
-            is_authorized(UserCtx, Admins)
-    end.
-
-is_member(#db{user_ctx=UserCtx}=Db) ->
-    case is_admin(Db) of
-        true -> true;
-        false ->
-            case is_public_db(Db) of
-                true -> true;
-                false ->
-                    {Members} = get_members(Db),
-                    is_authorized(UserCtx, Members)
-            end
-    end.
-
-is_public_db(#db{}=Db) ->
-    {Members} = get_members(Db),
-    Names = couch_util:get_value(<<"names">>, Members, []),
-    Roles = couch_util:get_value(<<"roles">>, Members, []),
-    Names =:= [] andalso Roles =:= [].
-
-is_authorized(#user_ctx{name=UserName,roles=UserRoles}, Security) ->
-    Names = couch_util:get_value(<<"names">>, Security, []),
-    Roles = couch_util:get_value(<<"roles">>, Security, []),
-    case check_security(roles, UserRoles, [<<"_admin">> | Roles]) of
-        true -> true;
-        false -> check_security(names, UserName, Names)
-    end.
-
-check_security(roles, [], _) ->
-    false;
-check_security(roles, UserRoles, Roles) ->
-    UserRolesSet = ordsets:from_list(UserRoles),
-    RolesSet = ordsets:from_list(Roles),
-    not ordsets:is_disjoint(UserRolesSet, RolesSet);
-check_security(names, _, []) ->
-    false;
-check_security(names, null, _) ->
-    false;
-check_security(names, UserName, Names) ->
-    lists:member(UserName, Names).
-
-throw_security_error(#user_ctx{name=null}=UserCtx) ->
-    Reason = <<"You are not authorized to access this db.">>,
-    throw_security_error(UserCtx, Reason);
-throw_security_error(#user_ctx{name=_}=UserCtx) ->
-    Reason = <<"You are not allowed to access this db.">>,
-    throw_security_error(UserCtx, Reason).
-throw_security_error(#user_ctx{}=UserCtx, Reason) ->
-    Error = security_error_type(UserCtx),
-    throw({Error, Reason}).
-
-security_error_type(#user_ctx{name=null}) ->
-    unauthorized;
-security_error_type(#user_ctx{name=_}) ->
-    forbidden.
-
-
-get_admins(#db{security=SecProps}) ->
-    couch_util:get_value(<<"admins">>, SecProps, {[]}).
-
-get_members(#db{security=SecProps}) ->
-    % we fallback to readers here for backwards compatibility
-    couch_util:get_value(<<"members">>, SecProps,
-        couch_util:get_value(<<"readers">>, SecProps, {[]})).
-
-get_security(#db{security=SecProps}) ->
-    {SecProps};
-get_security(?OLD_DB_REC = Db) ->
-    {?OLD_DB_SECURITY(Db)}.
-
-set_security(#db{main_pid=Pid}=Db, {NewSecProps}) when is_list(NewSecProps) ->
-    check_is_admin(Db),
-    ok = validate_security_object(NewSecProps),
-    gen_server:call(Pid, {set_security, NewSecProps}, infinity);
-set_security(_, _) ->
-    throw(bad_request).
-
-set_user_ctx(#db{} = Db, UserCtx) ->
-    {ok, Db#db{user_ctx = UserCtx}}.
-
-validate_security_object(SecProps) ->
-    Admins = couch_util:get_value(<<"admins">>, SecProps, {[]}),
-    % we fallback to readers here for backwards compatibility
-    Members = couch_util:get_value(<<"members">>, SecProps,
-        couch_util:get_value(<<"readers">>, SecProps, {[]})),
-    ok = validate_names_and_roles(Admins),
-    ok = validate_names_and_roles(Members),
-    ok.
-
-% validate user input
-validate_names_and_roles({Props}) when is_list(Props) ->
-    case couch_util:get_value(<<"names">>, Props, []) of
-    Ns when is_list(Ns) ->
-            [throw("names must be a JSON list of strings") ||N <- Ns, not is_binary(N)],
-            Ns;
-    _ ->
-        throw("names must be a JSON list of strings")
-    end,
-    case couch_util:get_value(<<"roles">>, Props, []) of
-    Rs when is_list(Rs) ->
-        [throw("roles must be a JSON list of strings") ||R <- Rs, not is_binary(R)],
-        Rs;
-    _ ->
-        throw("roles must be a JSON list of strings")
-    end,
-    ok;
-validate_names_and_roles(_) ->
-    throw("admins or members must be a JSON list of strings").
-
-get_revs_limit(#db{} = Db) ->
-    couch_db_engine:get_revs_limit(Db).
-
-set_revs_limit(#db{main_pid=Pid}=Db, Limit) when Limit > 0 ->
-    check_is_admin(Db),
-    gen_server:call(Pid, {set_revs_limit, Limit}, infinity);
-set_revs_limit(_Db, _Limit) ->
-    throw(invalid_revs_limit).
-
-name(#db{name=Name}) ->
-    Name;
-name(?OLD_DB_REC = Db) ->
-    ?OLD_DB_NAME(Db).
-
-
-validate_docid(#db{} = Db, DocId) when is_binary(DocId) ->
-    couch_doc:validate_docid(DocId, name(Db)),
-    case is_partitioned(Db) of
-        true ->
-            couch_partition:validate_docid(DocId);
-        false ->
-            ok
-    end.
-
-
-doc_from_json_obj_validate(#db{} = Db, DocJson) ->
-    Doc = couch_doc:from_json_obj_validate(DocJson, name(Db)),
-    {Props} = DocJson,
-    case couch_util:get_value(<<"_id">>, Props) of
-        DocId when is_binary(DocId) ->
-            % Only validate the docid if it was provided
-            validate_docid(Db, DocId);
-        _ ->
-            ok
-    end,
-    Doc.
-
-
-update_doc(Db, Doc, Options) ->
-    update_doc(Db, Doc, Options, interactive_edit).
-
-update_doc(Db, Doc, Options, UpdateType) ->
-    case update_docs(Db, [Doc], Options, UpdateType) of
-    {ok, [{ok, NewRev}]} ->
-        {ok, NewRev};
-    {ok, [{{_Id, _Rev}, Error}]} ->
-        throw(Error);
-    {ok, [Error]} ->
-        throw(Error);
-    {ok, []} ->
-        % replication success
-        {Pos, [RevId | _]} = Doc#doc.revs,
-        {ok, {Pos, RevId}}
-    end.
-
-update_docs(Db, Docs) ->
-    update_docs(Db, Docs, []).
-
-% group_alike_docs groups the sorted documents into sublist buckets, by id.
-% ([DocA, DocA, DocB, DocC], []) -> [[DocA, DocA], [DocB], [DocC]]
-group_alike_docs(Docs) ->
-    % Here we're just asserting that our doc sort is stable so that
-    % if we have duplicate docids we don't have to worry about the
-    % behavior of lists:sort/2 which isn't documented anyhwere as
-    % being stable.
-    WithPos = lists:zip(Docs, lists:seq(1, length(Docs))),
-    SortFun = fun({D1, P1}, {D2, P2}) -> {D1#doc.id, P1} =< {D2#doc.id, P2} end,
-    SortedDocs = [D || {D, _} <- lists:sort(SortFun, WithPos)],
-    group_alike_docs(SortedDocs, []).
-
-group_alike_docs([], Buckets) ->
-    lists:reverse(lists:map(fun lists:reverse/1, Buckets));
-group_alike_docs([Doc|Rest], []) ->
-    group_alike_docs(Rest, [[Doc]]);
-group_alike_docs([Doc|Rest], [Bucket|RestBuckets]) ->
-    [#doc{id=BucketId}|_] = Bucket,
-    case Doc#doc.id == BucketId of
-    true ->
-        % add to existing bucket
-        group_alike_docs(Rest, [[Doc|Bucket]|RestBuckets]);
-    false ->
-        % add to new bucket
-       group_alike_docs(Rest, [[Doc]|[Bucket|RestBuckets]])
-    end.
-
-validate_doc_update(#db{}=Db, #doc{id= <<"_design/",_/binary>>}=Doc, _GetDiskDocFun) ->
-    case catch check_is_admin(Db) of
-        ok -> validate_ddoc(Db, Doc);
-        Error -> Error
-    end;
-validate_doc_update(#db{validate_doc_funs = undefined} = Db, Doc, Fun) ->
-    ValidationFuns = load_validation_funs(Db),
-    validate_doc_update(Db#db{validate_doc_funs=ValidationFuns}, Doc, Fun);
-validate_doc_update(#db{validate_doc_funs=[]}, _Doc, _GetDiskDocFun) ->
-    ok;
-validate_doc_update(_Db, #doc{id= <<"_local/",_/binary>>}, _GetDiskDocFun) ->
-    ok;
-validate_doc_update(Db, Doc, GetDiskDocFun) ->
-    case get(io_priority) of
-        {internal_repl, _} ->
-            ok;
-        _ ->
-            validate_doc_update_int(Db, Doc, GetDiskDocFun)
-    end.
-
-validate_ddoc(Db, DDoc) ->
-    try
-        ok = couch_mrview:validate(Db, couch_doc:with_ejson_body(DDoc))
-    catch
-        throw:{invalid_design_doc, Reason} ->
-            {bad_request, invalid_design_doc, Reason};
-        throw:{compilation_error, Reason} ->
-            {bad_request, compilation_error, Reason};
-        throw:Error ->
-            Error
-    end.
-
-validate_doc_update_int(Db, Doc, GetDiskDocFun) ->
-    Fun = fun() ->
-        DiskDoc = GetDiskDocFun(),
-        JsonCtx = couch_util:json_user_ctx(Db),
-        SecObj = get_security(Db),
-        try
-            [case Fun(Doc, DiskDoc, JsonCtx, SecObj) of
-                ok -> ok;
-                Error -> throw(Error)
-             end || Fun <- Db#db.validate_doc_funs],
-            ok
-        catch
-            throw:Error ->
-                Error
-        end
-    end,
-    couch_stats:update_histogram([couchdb, query_server, vdu_process_time],
-                                 Fun).
-
-
-% to be safe, spawn a middleman here
-load_validation_funs(#db{main_pid=Pid, name = <<"shards/", _/binary>>}=Db) ->
-    {_, Ref} = spawn_monitor(fun() ->
-        exit(ddoc_cache:open(mem3:dbname(Db#db.name), validation_funs))
-    end),
-    receive
-        {'DOWN', Ref, _, _, {ok, Funs}} ->
-            gen_server:cast(Pid, {load_validation_funs, Funs}),
-            Funs;
-        {'DOWN', Ref, _, _, {database_does_not_exist, _StackTrace}} ->
-            ok = couch_server:close_db_if_idle(Db#db.name),
-            erlang:error(database_does_not_exist);
-        {'DOWN', Ref, _, _, Reason} ->
-            couch_log:error("could not load validation funs ~p", [Reason]),
-            throw(internal_server_error)
-    end;
-load_validation_funs(#db{main_pid=Pid}=Db) ->
-    {ok, DDocInfos} = get_design_docs(Db),
-    OpenDocs = fun
-        (#full_doc_info{}=D) ->
-            {ok, Doc} = open_doc_int(Db, D, [ejson_body]),
-            Doc
-    end,
-    DDocs = lists:map(OpenDocs, DDocInfos),
-    Funs = lists:flatmap(fun(DDoc) ->
-        case couch_doc:get_validate_doc_fun(DDoc) of
-            nil -> [];
-            Fun -> [Fun]
-        end
-    end, DDocs),
-    gen_server:cast(Pid, {load_validation_funs, Funs}),
-    Funs.
-
-reload_validation_funs(#db{} = Db) ->
-    gen_server:cast(Db#db.main_pid, {load_validation_funs, undefined}).
-
-prep_and_validate_update(Db, #doc{id=Id,revs={RevStart, Revs}}=Doc,
-        OldFullDocInfo, LeafRevsDict, AllowConflict) ->
-    case Revs of
-    [PrevRev|_] ->
-        case dict:find({RevStart, PrevRev}, LeafRevsDict) of
-        {ok, {#leaf{deleted=Deleted, ptr=DiskSp}, DiskRevs}} ->
-            case couch_doc:has_stubs(Doc) of
-            true ->
-                DiskDoc = make_doc(Db, Id, Deleted, DiskSp, DiskRevs),
-                Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
-                {validate_doc_update(Db, Doc2, fun() -> DiskDoc end), Doc2};
-            false ->
-                LoadDiskDoc = fun() -> make_doc(Db,Id,Deleted,DiskSp,DiskRevs) end,
-                {validate_doc_update(Db, Doc, LoadDiskDoc), Doc}
-            end;
-        error when AllowConflict ->
-            couch_doc:merge_stubs(Doc, #doc{}), % will generate error if
-                                                        % there are stubs
-            {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
-        error ->
-            {conflict, Doc}
-        end;
-    [] ->
-        % new doc, and we have existing revs.
-        % reuse existing deleted doc
-        if OldFullDocInfo#full_doc_info.deleted orelse AllowConflict ->
-            {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
-        true ->
-            {conflict, Doc}
-        end
-    end.
-
-
-
-prep_and_validate_updates(_Db, [], [], _AllowConflict, AccPrepped,
-        AccFatalErrors) ->
-    AccPrepped2 = lists:reverse(lists:map(fun lists:reverse/1, AccPrepped)),
-    {AccPrepped2, AccFatalErrors};
-prep_and_validate_updates(Db, [DocBucket|RestBuckets], [not_found|RestLookups],
-        AllowConflict, AccPrepped, AccErrors) ->
-    % no existing revs are known,
-    {PreppedBucket, AccErrors3} = lists:foldl(
-        fun(#doc{revs=Revs}=Doc, {AccBucket, AccErrors2}) ->
-            case couch_doc:has_stubs(Doc) of
-            true ->
-                couch_doc:merge_stubs(Doc, #doc{}); % will throw exception
-            false -> ok
-            end,
-            case Revs of
-            {0, []} ->
-                case validate_doc_update(Db, Doc, fun() -> nil end) of
-                ok ->
-                    {[Doc | AccBucket], AccErrors2};
-                Error ->
-                    {AccBucket, [{doc_tag(Doc), Error} | AccErrors2]}
-                end;
-            _ ->
-                % old revs specified but none exist, a conflict
-                {AccBucket, [{doc_tag(Doc), conflict} | AccErrors2]}
-            end
-        end,
-        {[], AccErrors}, DocBucket),
-
-    prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict,
-            [PreppedBucket | AccPrepped], AccErrors3);
-prep_and_validate_updates(Db, [DocBucket|RestBuckets],
-        [#full_doc_info{rev_tree=OldRevTree}=OldFullDocInfo|RestLookups],
-        AllowConflict, AccPrepped, AccErrors) ->
-    Leafs = couch_key_tree:get_all_leafs(OldRevTree),
-    LeafRevsDict = dict:from_list([
-        {{Start, RevId}, {Leaf, Revs}} ||
-        {Leaf, {Start, [RevId | _]} = Revs} <- Leafs
-    ]),
-    {PreppedBucket, AccErrors3} = lists:foldl(
-        fun(Doc, {Docs2Acc, AccErrors2}) ->
-            case prep_and_validate_update(Db, Doc, OldFullDocInfo,
-                    LeafRevsDict, AllowConflict) of
-            {ok, Doc2} ->
-                {[Doc2 | Docs2Acc], AccErrors2};
-            {Error, _} ->
-                % Record the error
-                {Docs2Acc, [{doc_tag(Doc), Error} |AccErrors2]}
-            end
-        end,
-        {[], AccErrors}, DocBucket),
-    prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict,
-            [PreppedBucket | AccPrepped], AccErrors3).
-
-
-update_docs(Db, Docs, Options) ->
-    update_docs(Db, Docs, Options, interactive_edit).
-
-
-prep_and_validate_replicated_updates(_Db, [], [], AccPrepped, AccErrors) ->
-    Errors2 = [{{Id, {Pos, Rev}}, Error} ||
-            {#doc{id=Id,revs={Pos,[Rev|_]}}, Error} <- AccErrors],
-    AccPrepped2 = lists:reverse(lists:map(fun lists:reverse/1, AccPrepped)),
-    {AccPrepped2, lists:reverse(Errors2)};
-prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldInfo], AccPrepped, AccErrors) ->
-    case OldInfo of
-    not_found ->
-        {ValidatedBucket, AccErrors3} = lists:foldl(
-            fun(Doc, {AccPrepped2, AccErrors2}) ->
-                case couch_doc:has_stubs(Doc) of
-                true ->
-                    couch_doc:merge_stubs(Doc, #doc{}); % will throw exception
-                false -> ok
-                end,
-                case validate_doc_update(Db, Doc, fun() -> nil end) of
-                ok ->
-                    {[Doc | AccPrepped2], AccErrors2};
-                Error ->
-                    {AccPrepped2, [{Doc, Error} | AccErrors2]}
-                end
-            end,
-            {[], AccErrors}, Bucket),
-        prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo, [ValidatedBucket | AccPrepped], AccErrors3);
-    #full_doc_info{rev_tree=OldTree} ->
-        OldLeafs = couch_key_tree:get_all_leafs_full(OldTree),
-        OldLeafsLU = [{Start, RevId} || {Start, [{RevId, _}|_]} <- OldLeafs],
-        NewPaths = lists:map(fun couch_doc:to_path/1, Bucket),
-        NewRevTree = couch_key_tree:multi_merge(OldTree, NewPaths),
-        Leafs = couch_key_tree:get_all_leafs_full(NewRevTree),
-        LeafRevsFullDict = dict:from_list( [{{Start, RevId}, FullPath} || {Start, [{RevId, _}|_]}=FullPath <- Leafs]),
-        {ValidatedBucket, AccErrors3} =
-        lists:foldl(
-            fun(#doc{id=Id,revs={Pos, [RevId|_]}}=Doc, {AccValidated, AccErrors2}) ->
-                IsOldLeaf = lists:member({Pos, RevId}, OldLeafsLU),
-                case dict:find({Pos, RevId}, LeafRevsFullDict) of
-                {ok, {Start, Path}} when not IsOldLeaf ->
-                    % our unflushed doc is a leaf node. Go back on the path
-                    % to find the previous rev that's on disk.
-
-                    LoadPrevRevFun = fun() ->
-                                make_first_doc_on_disk(Db,Id,Start-1, tl(Path))
-                            end,
-
-                    case couch_doc:has_stubs(Doc) of
-                    true ->
-                        DiskDoc = case LoadPrevRevFun() of
-                            #doc{} = DiskDoc0 ->
-                                DiskDoc0;
-                            _ ->
-                                % Force a missing_stub exception
-                                couch_doc:merge_stubs(Doc, #doc{})
-                        end,
-                        Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
-                        GetDiskDocFun = fun() -> DiskDoc end;
-                    false ->
-                        Doc2 = Doc,
-                        GetDiskDocFun = LoadPrevRevFun
-                    end,
-
-                    case validate_doc_update(Db, Doc2, GetDiskDocFun) of
-                    ok ->
-                        {[Doc2 | AccValidated], AccErrors2};
-                    Error ->
-                        {AccValidated, [{Doc, Error} | AccErrors2]}
-                    end;
-                _ ->
-                    % this doc isn't a leaf or already exists in the tree.
-                    % ignore but consider it a success.
-                    {AccValidated, AccErrors2}
-                end
-            end,
-            {[], AccErrors}, Bucket),
-        prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo,
-                [ValidatedBucket | AccPrepped], AccErrors3)
-    end.
-
-
-
-new_revid(#doc{body=Body, revs={OldStart,OldRevs}, atts=Atts, deleted=Deleted}) ->
-    DigestedAtts = lists:foldl(fun(Att, Acc) ->
-        [N, T, M] = couch_att:fetch([name, type, md5], Att),
-        case M == <<>> of
-            true -> Acc;
-            false -> [{N, T, M} | Acc]
-        end
-    end, [], Atts),
-    case DigestedAtts of
-        Atts2 when length(Atts) =/= length(Atts2) ->
-            % We must have old style non-md5 attachments
-            ?l2b(integer_to_list(couch_util:rand32()));
-        Atts2 ->
-            OldRev = case OldRevs of [] -> 0; [OldRev0|_] -> OldRev0 end,
-            couch_hash:md5_hash(term_to_binary([Deleted, OldStart, OldRev, Body, Atts2], [{minor_version, 1}]))
-    end.
-
-new_revs([], OutBuckets, IdRevsAcc) ->
-    {lists:reverse(OutBuckets), IdRevsAcc};
-new_revs([Bucket|RestBuckets], OutBuckets, IdRevsAcc) ->
-    {NewBucket, IdRevsAcc3} = lists:mapfoldl(
-        fun(#doc{revs={Start, RevIds}}=Doc, IdRevsAcc2)->
-        NewRevId = new_revid(Doc),
-        {Doc#doc{revs={Start+1, [NewRevId | RevIds]}},
-            [{doc_tag(Doc), {ok, {Start+1, NewRevId}}} | IdRevsAcc2]}
-    end, IdRevsAcc, Bucket),
-    new_revs(RestBuckets, [NewBucket|OutBuckets], IdRevsAcc3).
-
-check_dup_atts(#doc{atts=Atts}=Doc) ->
-    lists:foldl(fun(Att, Names) ->
-        Name = couch_att:fetch(name, Att),
-        case ordsets:is_element(Name, Names) of
-            true -> throw({bad_request, <<"Duplicate attachments">>});
-            false -> ordsets:add_element(Name, Names)
-        end
-    end, ordsets:new(), Atts),
-    Doc.
-
-tag_docs([]) ->
-    [];
-tag_docs([#doc{meta=Meta}=Doc | Rest]) ->
-    [Doc#doc{meta=[{ref, make_ref()} | Meta]} | tag_docs(Rest)].
-
-doc_tag(#doc{meta=Meta}) ->
-    case lists:keyfind(ref, 1, Meta) of
-        {ref, Ref} when is_reference(Ref) -> Ref;
-        false -> throw(doc_not_tagged);
-        Else -> throw({invalid_doc_tag, Else})
-    end.
-
-update_docs(Db, Docs0, Options, replicated_changes) ->
-    Docs = tag_docs(Docs0),
-
-    PrepValidateFun = fun(Db0, DocBuckets0, ExistingDocInfos) ->
-        prep_and_validate_replicated_updates(Db0, DocBuckets0,
-            ExistingDocInfos, [], [])
-    end,
-
-    {ok, DocBuckets, NonRepDocs, DocErrors}
-        = before_docs_update(Db, Docs, PrepValidateFun, replicated_changes),
-
-    DocBuckets2 = [[doc_flush_atts(Db, check_dup_atts(Doc))
-            || Doc <- Bucket] || Bucket <- DocBuckets],
-    {ok, _} = write_and_commit(Db, DocBuckets2,
-        NonRepDocs, [merge_conflicts | Options]),
-    {ok, DocErrors};
-
-update_docs(Db, Docs0, Options, interactive_edit) ->
-    Docs = tag_docs(Docs0),
-
-    AllOrNothing = lists:member(all_or_nothing, Options),
-    PrepValidateFun = fun(Db0, DocBuckets0, ExistingDocInfos) ->
-        prep_and_validate_updates(Db0, DocBuckets0, ExistingDocInfos,
-            AllOrNothing, [], [])
-    end,
-
-    {ok, DocBuckets, NonRepDocs, DocErrors}
-        = before_docs_update(Db, Docs, PrepValidateFun, interactive_edit),
-
-    if (AllOrNothing) and (DocErrors /= []) ->
-        RefErrorDict = dict:from_list([{doc_tag(Doc), Doc} || Doc <- Docs]),
-        {aborted, lists:map(fun({Ref, Error}) ->
-            #doc{id=Id,revs={Start,RevIds}} = dict:fetch(Ref, RefErrorDict),
-            case {Start, RevIds} of
-                {Pos, [RevId | _]} -> {{Id, {Pos, RevId}}, Error};
-                {0, []} -> {{Id, {0, <<>>}}, Error}
-            end
-        end, DocErrors)};
-    true ->
-        Options2 = if AllOrNothing -> [merge_conflicts];
-                true -> [] end ++ Options,
-        DocBuckets2 = [[
-                doc_flush_atts(Db, set_new_att_revpos(
-                        check_dup_atts(Doc)))
-                || Doc <- B] || B <- DocBuckets],
-        {DocBuckets3, IdRevs} = new_revs(DocBuckets2, [], []),
-
-        {ok, CommitResults} = write_and_commit(Db, DocBuckets3,
-            NonRepDocs, Options2),
-
-        ResultsDict = lists:foldl(fun({Key, Resp}, ResultsAcc) ->
-            dict:store(Key, Resp, ResultsAcc)
-        end, dict:from_list(IdRevs), CommitResults ++ DocErrors),
-        {ok, lists:map(fun(Doc) ->
-            dict:fetch(doc_tag(Doc), ResultsDict)
-        end, Docs)}
-    end.
-
-% Returns the first available document on disk. Input list is a full rev path
-% for the doc.
-make_first_doc_on_disk(_Db, _Id, _Pos, []) ->
-    nil;
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, #doc{}} | RestPath]) ->
-    make_first_doc_on_disk(Db, Id, Pos-1, RestPath);
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, ?REV_MISSING}|RestPath]) ->
-    make_first_doc_on_disk(Db, Id, Pos - 1, RestPath);
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, #leaf{deleted=IsDel, ptr=Sp}} |_]=DocPath) ->
-    Revs = [Rev || {Rev, _} <- DocPath],
-    make_doc(Db, Id, IsDel, Sp, {Pos, Revs}).
-
-collect_results_with_metrics(Pid, MRef, []) ->
-    Begin = os:timestamp(),
-    try
-        collect_results(Pid, MRef, [])
-    after
-        ResultsTime = timer:now_diff(os:timestamp(), Begin) div 1000,
-        couch_stats:update_histogram(
-            [couchdb, collect_results_time],
-            ResultsTime
-        )
-    end.
-
-collect_results(Pid, MRef, ResultsAcc) ->
-    receive
-    {result, Pid, Result} ->
-        collect_results(Pid, MRef, [Result | ResultsAcc]);
-    {done, Pid} ->
-        {ok, ResultsAcc};
-    {retry, Pid} ->
-        retry;
-    {'DOWN', MRef, _, _, Reason} ->
-        exit(Reason)
-    end.
-
-write_and_commit(#db{main_pid=Pid, user_ctx=Ctx}=Db, DocBuckets1,
-        NonRepDocs, Options) ->
-    DocBuckets = prepare_doc_summaries(Db, DocBuckets1),
-    MergeConflicts = lists:member(merge_conflicts, Options),
-    MRef = erlang:monitor(process, Pid),
-    try
-        Pid ! {update_docs, self(), DocBuckets, NonRepDocs, MergeConflicts},
-        case collect_results_with_metrics(Pid, MRef, []) of
-        {ok, Results} -> {ok, Results};
-        retry ->
-            % This can happen if the db file we wrote to was swapped out by
-            % compaction. Retry by reopening the db and writing to the current file
-            {ok, Db2} = open(Db#db.name, [{user_ctx, Ctx}]),
-            DocBuckets2 = [
-                [doc_flush_atts(Db2, Doc) || Doc <- Bucket] ||
-                Bucket <- DocBuckets1
-            ],
-            % We only retry once
-            DocBuckets3 = prepare_doc_summaries(Db2, DocBuckets2),
-            close(Db2),
-            Pid ! {update_docs, self(), DocBuckets3, NonRepDocs, MergeConflicts},
-            case collect_results_with_metrics(Pid, MRef, []) of
-            {ok, Results} -> {ok, Results};
-            retry -> throw({update_error, compaction_retry})
-            end
-        end
-    after
-        erlang:demonitor(MRef, [flush])
-    end.
-
-
-prepare_doc_summaries(Db, BucketList) ->
-    [lists:map(
-        fun(#doc{body = Body, atts = Atts} = Doc0) ->
-            DiskAtts = [couch_att:to_disk_term(Att) || Att <- Atts],
-            {ok, SizeInfo} = couch_att:size_info(Atts),
-            AttsStream = case Atts of
-                [Att | _] ->
-                    {stream, StreamEngine} = couch_att:fetch(data, Att),
-                    StreamEngine;
-                [] ->
-                    nil
-            end,
-            Doc1 = Doc0#doc{
-                atts = DiskAtts,
-                meta = [
-                    {size_info, SizeInfo},
-                    {atts_stream, AttsStream},
-                    {ejson_size, couch_ejson_size:encoded_size(Body)}
-                ] ++ Doc0#doc.meta
-            },
-            couch_db_engine:serialize_doc(Db, Doc1)
-        end,
-        Bucket) || Bucket <- BucketList].
-
-
-before_docs_update(#db{validate_doc_funs = VDFuns} = Db, Docs, PVFun, UpdateType) ->
-    increment_stat(Db, [couchdb, database_writes]),
-
-    % Separate _local docs from normal docs
-    IsLocal = fun
-        (#doc{id= <<?LOCAL_DOC_PREFIX, _/binary>>}) -> true;
-        (_) -> false
-    end,
-    {NonRepDocs, Docs2} = lists:partition(IsLocal, Docs),
-
-    BucketList = group_alike_docs(Docs2),
-
-    DocBuckets = lists:map(fun(Bucket) ->
-        lists:map(fun(Doc) ->
-            DocWithBody = couch_doc:with_ejson_body(Doc),
-            couch_db_plugin:before_doc_update(Db, DocWithBody, UpdateType)
-        end, Bucket)
-    end, BucketList),
-
-    ValidatePred = fun
-        (#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>}) -> true;
-        (#doc{atts = Atts}) -> Atts /= []
-    end,
-
-    case (VDFuns /= []) orelse lists:any(ValidatePred, Docs2) of
-        true ->
-            % lookup the doc by id and get the most recent
-            Ids = [Id || [#doc{id = Id} | _] <- DocBuckets],
-            ExistingDocs = get_full_doc_infos(Db, Ids),
-            {DocBuckets2, DocErrors} = PVFun(Db, DocBuckets, ExistingDocs),
-             % remove empty buckets
-            DocBuckets3 = [Bucket || Bucket <- DocBuckets2, Bucket /= []],
-            {ok, DocBuckets3, NonRepDocs, DocErrors};
-        false ->
-            {ok, DocBuckets, NonRepDocs, []}
-    end.
-
-
-set_new_att_revpos(#doc{revs={RevPos,_Revs},atts=Atts0}=Doc) ->
-    Atts = lists:map(
-        fun(Att) ->
-            case couch_att:fetch(data, Att) of
-                % already commited to disk, don't set new rev
-                {stream, _} -> Att;
-                {Fd, _} when is_pid(Fd) -> Att;
-                % write required so update RevPos
-                _ -> couch_att:store(revpos, RevPos+1, Att)
-            end
-        end, Atts0),
-    Doc#doc{atts = Atts}.
-
-
-doc_flush_atts(Db, Doc) ->
-    Doc#doc{atts=[couch_att:flush(Db, Att) || Att <- Doc#doc.atts]}.
-
-
-compressible_att_type(MimeType) when is_binary(MimeType) ->
-    compressible_att_type(?b2l(MimeType));
-compressible_att_type(MimeType) ->
-    TypeExpList = re:split(
-        config:get("attachments", "compressible_types", ""),
-        "\\s*,\\s*",
-        [{return, list}]
-    ),
-    lists:any(
-        fun(TypeExp) ->
-            Regexp = ["^\\s*", re:replace(TypeExp, "\\*", ".*"),
-                "(?:\\s*;.*?)?\\s*", $$],
-            re:run(MimeType, Regexp, [caseless]) =/= nomatch
-        end,
-        [T || T <- TypeExpList, T /= []]
-    ).
-
-% From RFC 2616 3.6.1 - Chunked Transfer Coding
-%
-%   In other words, the origin server is willing to accept
-%   the possibility that the trailer fields might be silently
-%   discarded along the path to the client.
-%
-% I take this to mean that if "Trailers: Content-MD5\r\n"
-% is present in the request, but there is no Content-MD5
-% trailer, we're free to ignore this inconsistency and
-% pretend that no Content-MD5 exists.
-with_stream(Db, Att, Fun) ->
-    [InMd5, Type, Enc] = couch_att:fetch([md5, type, encoding], Att),
-    BufferSize = list_to_integer(
-        config:get("couchdb", "attachment_stream_buffer_size", "4096")),
-    Options = case (Enc =:= identity) andalso compressible_att_type(Type) of
-        true ->
-            CompLevel = list_to_integer(
-                config:get("attachments", "compression_level", "0")
-            ),
-            [
-                {buffer_size, BufferSize},
-                {encoding, gzip},
-                {compression_level, CompLevel}
-            ];
-        _ ->
-            [{buffer_size, BufferSize}]
-    end,
-    {ok, OutputStream} = open_write_stream(Db, Options),
-    ReqMd5 = case Fun(OutputStream) of
-        {md5, FooterMd5} ->
-            case InMd5 of
-                md5_in_footer -> FooterMd5;
-                _ -> InMd5
-            end;
-        _ ->
-            InMd5
-    end,
-    {StreamEngine, Len, IdentityLen, Md5, IdentityMd5} =
-        couch_stream:close(OutputStream),
-    couch_util:check_md5(IdentityMd5, ReqMd5),
-    {AttLen, DiskLen, NewEnc} = case Enc of
-    identity ->
-        case {Md5, IdentityMd5} of
-        {Same, Same} ->
-            {Len, IdentityLen, identity};
-        _ ->
-            {Len, IdentityLen, gzip}
-        end;
-    gzip ->
-        case couch_att:fetch([att_len, disk_len], Att) of
-            [AL, DL] when AL =:= undefined orelse DL =:= undefined ->
-                % Compressed attachment uploaded through the standalone API.
-                {Len, Len, gzip};
-            [AL, DL] ->
-                % This case is used for efficient push-replication, where a
-                % compressed attachment is located in the body of multipart
-                % content-type request.
-                {AL, DL, gzip}
-        end
-    end,
-    couch_att:store([
-        {data, {stream, StreamEngine}},
-        {att_len, AttLen},
-        {disk_len, DiskLen},
-        {md5, Md5},
-        {encoding, NewEnc}
-    ], Att).
-
-
-open_write_stream(Db, Options) ->
-    couch_db_engine:open_write_stream(Db, Options).
-
-
-open_read_stream(Db, AttState) ->
-    couch_db_engine:open_read_stream(Db, AttState).
-
-
-is_active_stream(Db, StreamEngine) ->
-    couch_db_engine:is_active_stream(Db, StreamEngine).
-
-
-calculate_start_seq(_Db, _Node, Seq) when is_integer(Seq) ->
-    Seq;
-calculate_start_seq(Db, Node, {Seq, Uuid}) ->
-    % Treat the current node as the epoch node
-    calculate_start_seq(Db, Node, {Seq, Uuid, Node});
-calculate_start_seq(Db, _Node, {Seq, {split, Uuid}, EpochNode}) ->
-    case is_owner(EpochNode, Seq, get_epochs(Db)) of
-        true ->
-            % Find last replicated sequence from split source to target
-            mem3_rep:find_split_target_seq(Db, EpochNode, Uuid, Seq);
-        false ->
-            couch_log:warning("~p calculate_start_seq not owner "
-                "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p, epochs: ~p",
-                [?MODULE, Db#db.name, Seq, Uuid, EpochNode, get_epochs(Db)]),
-            0
-    end;
-calculate_start_seq(Db, _Node, {Seq, Uuid, EpochNode}) ->
-    case is_prefix(Uuid, get_uuid(Db)) of
-        true ->
-            case is_owner(EpochNode, Seq, get_epochs(Db)) of
-                true -> Seq;
-                false ->
-                    couch_log:warning("~p calculate_start_seq not owner "
-                        "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p, epochs: ~p",
-                        [?MODULE, Db#db.name, Seq, Uuid, EpochNode,
-                            get_epochs(Db)]),
-                    0
-            end;
-        false ->
-            couch_log:warning("~p calculate_start_seq uuid prefix mismatch "
-                "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p",
-                [?MODULE, Db#db.name, Seq, Uuid, EpochNode]),
-            %% The file was rebuilt, most likely in a different
-            %% order, so rewind.
-            0
-    end;
-calculate_start_seq(Db, _Node, {replace, OriginalNode, Uuid, Seq}) ->
-    case is_prefix(Uuid, couch_db:get_uuid(Db)) of
-        true ->
-            try
-                start_seq(get_epochs(Db), OriginalNode, Seq)
-            catch throw:epoch_mismatch ->
-                couch_log:warning("~p start_seq duplicate uuid on node: ~p "
-                    "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p",
-                    [?MODULE, node(), Db#db.name, Seq, Uuid, OriginalNode]),
-                0
-            end;
-        false ->
-            {replace, OriginalNode, Uuid, Seq}
-    end.
-
-
-validate_epochs(Epochs) ->
-    %% Assert uniqueness.
-    case length(Epochs) == length(lists:ukeysort(2, Epochs)) of
-        true  -> ok;
-        false -> erlang:error(duplicate_epoch)
-    end,
-    %% Assert order.
-    case Epochs == lists:sort(fun({_, A}, {_, B}) -> B =< A end, Epochs) of
-        true  -> ok;
-        false -> erlang:error(epoch_order)
-    end.
-
-
-is_prefix(Pattern, Subject) ->
-     binary:longest_common_prefix([Pattern, Subject]) == size(Pattern).
-
-
-is_owner(Node, Seq, Epochs) ->
-    Node =:= owner_of(Epochs, Seq).
-
-
-owner_of(Db, Seq) when not is_list(Db) ->
-    owner_of(get_epochs(Db), Seq);
-owner_of([], _Seq) ->
-    undefined;
-owner_of([{EpochNode, EpochSeq} | _Rest], Seq) when Seq > EpochSeq ->
-    EpochNode;
-owner_of([_ | Rest], Seq) ->
-    owner_of(Rest, Seq).
-
-
-start_seq([{OrigNode, EpochSeq} | _], OrigNode, Seq) when Seq > EpochSeq ->
-    %% OrigNode is the owner of the Seq so we can safely stream from there
-    Seq;
-start_seq([{_, NewSeq}, {OrigNode, _} | _], OrigNode, Seq) when Seq > NewSeq ->
-    %% We transferred this file before Seq was written on OrigNode, so we need
-    %% to stream from the beginning of the next epoch. Note that it is _not_
-    %% necessary for the current node to own the epoch beginning at NewSeq
-    NewSeq;
-start_seq([_ | Rest], OrigNode, Seq) ->
-    start_seq(Rest, OrigNode, Seq);
-start_seq([], _OrigNode, _Seq) ->
-    throw(epoch_mismatch).
-
-
-fold_docs(Db, UserFun, UserAcc) ->
-    fold_docs(Db, UserFun, UserAcc, []).
-
-fold_docs(Db, UserFun, UserAcc, Options) ->
-    couch_db_engine:fold_docs(Db, UserFun, UserAcc, Options).
-
-
-fold_local_docs(Db, UserFun, UserAcc, Options) ->
-    couch_db_engine:fold_local_docs(Db, UserFun, UserAcc, Options).
-
-
-fold_design_docs(Db, UserFun, UserAcc, Options1) ->
-    Options2 = set_design_doc_keys(Options1),
-    couch_db_engine:fold_docs(Db, UserFun, UserAcc, Options2).
-
-
-fold_changes(Db, StartSeq, UserFun, UserAcc) ->
-    fold_changes(Db, StartSeq, UserFun, UserAcc, []).
-
-
-fold_changes(Db, StartSeq, UserFun, UserAcc, Opts) ->
-    couch_db_engine:fold_changes(Db, StartSeq, UserFun, UserAcc, Opts).
-
-
-fold_purge_infos(Db, StartPurgeSeq, Fun, Acc) ->
-    fold_purge_infos(Db, StartPurgeSeq, Fun, Acc, []).
-
-
-fold_purge_infos(Db, StartPurgeSeq, UFun, UAcc, Opts) ->
-    couch_db_engine:fold_purge_infos(Db, StartPurgeSeq, UFun, UAcc, Opts).
-
-
-count_changes_since(Db, SinceSeq) ->
-    couch_db_engine:count_changes_since(Db, SinceSeq).
-
-
-%%% Internal function %%%
-open_doc_revs_int(Db, IdRevs, Options) ->
-    Ids = [Id || {Id, _Revs} <- IdRevs],
-    LookupResults = get_full_doc_infos(Db, Ids),
-    lists:zipwith(
-        fun({Id, Revs}, Lookup) ->
-            case Lookup of
-            #full_doc_info{rev_tree=RevTree} ->
-                {FoundRevs, MissingRevs} =
-                case Revs of
-                all ->
-                    {couch_key_tree:get_all_leafs(RevTree), []};
-                _ ->
-                    case lists:member(latest, Options) of
-                    true ->
-                        couch_key_tree:get_key_leafs(RevTree, Revs);
-                    false ->
-                        couch_key_tree:get(RevTree, Revs)
-                    end
-                end,
-                FoundResults =
-                lists:map(fun({Value, {Pos, [Rev|_]}=FoundRevPath}) ->
-                    case Value of
-                    ?REV_MISSING ->
-                        % we have the rev in our list but know nothing about it
-                        {{not_found, missing}, {Pos, Rev}};
-                    #leaf{deleted=IsDeleted, ptr=SummaryPtr} ->
-                        {ok, make_doc(Db, Id, IsDeleted, SummaryPtr, FoundRevPath)}
-                    end
-                end, FoundRevs),
-                Results = FoundResults ++ [{{not_found, missing}, MissingRev} || MissingRev <- MissingRevs],
-                {ok, Results};
-            not_found when Revs == all ->
-                {ok, []};
-            not_found ->
-                {ok, [{{not_found, missing}, Rev} || Rev <- Revs]}
-            end
-        end,
-        IdRevs, LookupResults).
-
-open_doc_int(Db, <<?LOCAL_DOC_PREFIX, _/binary>> = Id, Options) ->
-    case couch_db_engine:open_local_docs(Db, [Id]) of
-    [#doc{} = Doc] ->
-        apply_open_options({ok, Doc}, Options);
-    [not_found] ->
-        {not_found, missing}
-    end;
-open_doc_int(Db, #doc_info{id=Id,revs=[RevInfo|_]}=DocInfo, Options) ->
-    #rev_info{deleted=IsDeleted,rev={Pos,RevId},body_sp=Bp} = RevInfo,
-    Doc = make_doc(Db, Id, IsDeleted, Bp, {Pos,[RevId]}),
-    apply_open_options(
-       {ok, Doc#doc{meta=doc_meta_info(DocInfo, [], Options)}}, Options);
-open_doc_int(Db, #full_doc_info{id=Id,rev_tree=RevTree}=FullDocInfo, Options) ->
-    #doc_info{revs=[#rev_info{deleted=IsDeleted,rev=Rev,body_sp=Bp}|_]} =
-        DocInfo = couch_doc:to_doc_info(FullDocInfo),
-    {[{_, RevPath}], []} = couch_key_tree:get(RevTree, [Rev]),
-    Doc = make_doc(Db, Id, IsDeleted, Bp, RevPath),
-    apply_open_options(
-        {ok, Doc#doc{meta=doc_meta_info(DocInfo, RevTree, Options)}}, Options);
-open_doc_int(Db, Id, Options) ->
-    case get_full_doc_info(Db, Id) of
-    #full_doc_info{} = FullDocInfo ->
-        open_doc_int(Db, FullDocInfo, Options);
-    not_found ->
-        {not_found, missing}
-    end.
-
-doc_meta_info(#doc_info{high_seq=Seq,revs=[#rev_info{rev=Rev}|RestInfo]}, RevTree, Options) ->
-    case lists:member(revs_info, Options) of
-    false -> [];
-    true ->
-        {[{Pos, RevPath}],[]} =
-            couch_key_tree:get_full_key_paths(RevTree, [Rev]),
-
-        [{revs_info, Pos, lists:map(
-            fun({Rev1, ?REV_MISSING}) ->
-                {Rev1, missing};
-            ({Rev1, Leaf}) ->
-                case Leaf#leaf.deleted of
-                true ->
-                    {Rev1, deleted};
-                false ->
-                    {Rev1, available}
-                end
-            end, RevPath)}]
-    end ++
-    case lists:member(conflicts, Options) of
-    false -> [];
-    true ->
-        case [Rev1 || #rev_info{rev=Rev1,deleted=false} <- RestInfo] of
-        [] -> [];
-        ConflictRevs -> [{conflicts, ConflictRevs}]
-        end
-    end ++
-    case lists:member(deleted_conflicts, Options) of
-    false -> [];
-    true ->
-        case [Rev1 || #rev_info{rev=Rev1,deleted=true} <- RestInfo] of
-        [] -> [];
-        DelConflictRevs -> [{deleted_conflicts, DelConflictRevs}]
-        end
-    end ++
-    case lists:member(local_seq, Options) of
-    false -> [];
-    true -> [{local_seq, Seq}]
-    end.
-
-
-make_doc(_Db, Id, Deleted, nil = _Bp, RevisionPath) ->
-    #doc{
-        id = Id,
-        revs = RevisionPath,
-        body = [],
-        atts = [],
-        deleted = Deleted
-    };
-make_doc(#db{} = Db, Id, Deleted, Bp, {Pos, Revs}) ->
-    RevsLimit = get_revs_limit(Db),
-    Doc0 = couch_db_engine:read_doc_body(Db, #doc{
-        id = Id,
-        revs = {Pos, lists:sublist(Revs, 1, RevsLimit)},
-        body = Bp,
-        deleted = Deleted
-    }),
-    Doc1 = case Doc0#doc.atts of
-        BinAtts when is_binary(BinAtts) ->
-            Doc0#doc{
-                atts = couch_compress:decompress(BinAtts)
-            };
-        ListAtts when is_list(ListAtts) ->
-            Doc0
-    end,
-    after_doc_read(Db, Doc1#doc{
-        atts = [couch_att:from_disk_term(Db, T) || T <- Doc1#doc.atts]
-    }).
-
-
-after_doc_read(#db{} = Db, Doc) ->
-    DocWithBody = couch_doc:with_ejson_body(Doc),
-    couch_db_plugin:after_doc_read(Db, DocWithBody).
-
-increment_stat(#db{options = Options}, Stat) ->
-    case lists:member(sys_db, Options) of
-    true ->
-        ok;
-    false ->
-        couch_stats:increment_counter(Stat)
-    end.
-
--spec normalize_dbname(list() | binary()) -> binary().
-
-normalize_dbname(DbName) when is_list(DbName) ->
-    normalize_dbname(list_to_binary(DbName));
-normalize_dbname(DbName) when is_binary(DbName) ->
-    mem3:dbname(couch_util:drop_dot_couch_ext(DbName)).
-
-
--spec dbname_suffix(list() | binary()) -> binary().
-
-dbname_suffix(DbName) ->
-    filename:basename(normalize_dbname(DbName)).
-
-
-validate_dbname(DbName) when is_list(DbName) ->
-    validate_dbname(?l2b(DbName));
-validate_dbname(DbName) when is_binary(DbName) ->
-    Normalized = normalize_dbname(DbName),
-    couch_db_plugin:validate_dbname(
-        DbName, Normalized, fun validate_dbname_int/2).
-
-validate_dbname_int(DbName, Normalized) when is_binary(DbName) ->
-    DbNoExt = couch_util:drop_dot_couch_ext(DbName),
-    case re:run(DbNoExt, ?DBNAME_REGEX, [{capture,none}, dollar_endonly]) of
-        match ->
-            ok;
-        nomatch ->
-            case is_system_db_name(Normalized) of
-                true -> ok;
-                false -> {error, {illegal_database_name, DbName}}
-            end
-    end.
-
-is_system_db_name(DbName) when is_list(DbName) ->
-    is_system_db_name(?l2b(DbName));
-is_system_db_name(DbName) when is_binary(DbName) ->
-    Normalized = normalize_dbname(DbName),
-    Suffix = filename:basename(Normalized),
-    case {filename:dirname(Normalized), lists:member(Suffix, ?SYSTEM_DATABASES)} of
-        {<<".">>, Result} -> Result;
-        {_Prefix, false} -> false;
-        {Prefix, true} ->
-            ReOpts =  [{capture,none}, dollar_endonly],
-            re:run(Prefix, ?DBNAME_REGEX, ReOpts) == match
-    end.
-
-set_design_doc_keys(Options1) ->
-    Dir = case lists:keyfind(dir, 1, Options1) of
-        {dir, D0} -> D0;
-        _ -> fwd
-    end,
-    Options2 = set_design_doc_start_key(Options1, Dir),
-    set_design_doc_end_key(Options2, Dir).
-
-
--define(FIRST_DDOC_KEY, <<"_design/">>).
--define(LAST_DDOC_KEY, <<"_design0">>).
-
-
-set_design_doc_start_key(Options, fwd) ->
-    Key1 = couch_util:get_value(start_key, Options, ?FIRST_DDOC_KEY),
-    Key2 = case Key1 < ?FIRST_DDOC_KEY of
-        true -> ?FIRST_DDOC_KEY;
-        false -> Key1
-    end,
-    lists:keystore(start_key, 1, Options, {start_key, Key2});
-set_design_doc_start_key(Options, rev) ->
-    Key1 = couch_util:get_value(start_key, Options, ?LAST_DDOC_KEY),
-    Key2 = case Key1 > ?LAST_DDOC_KEY of
-        true -> ?LAST_DDOC_KEY;
-        false -> Key1
-    end,
-    lists:keystore(start_key, 1, Options, {start_key, Key2}).
-
-
-set_design_doc_end_key(Options, fwd) ->
-    case couch_util:get_value(end_key_gt, Options) of
-        undefined ->
-            Key1 = couch_util:get_value(end_key, Options, ?LAST_DDOC_KEY),
-            Key2 = case Key1 > ?LAST_DDOC_KEY of
-                true -> ?LAST_DDOC_KEY;
-                false -> Key1
-            end,
-            lists:keystore(end_key, 1, Options, {end_key, Key2});
-        EKeyGT ->
-            Key2 = case EKeyGT > ?LAST_DDOC_KEY of
-                true -> ?LAST_DDOC_KEY;
-                false -> EKeyGT
-            end,
-            lists:keystore(end_key_gt, 1, Options, {end_key_gt, Key2})
-    end;
-set_design_doc_end_key(Options, rev) ->
-    case couch_util:get_value(end_key_gt, Options) of
-        undefined ->
-            Key1 = couch_util:get_value(end_key, Options, ?LAST_DDOC_KEY),
-            Key2 = case Key1 < ?FIRST_DDOC_KEY of
-                true -> ?FIRST_DDOC_KEY;
-                false -> Key1
-            end,
-            lists:keystore(end_key, 1, Options, {end_key, Key2});
-        EKeyGT ->
-            Key2 = case EKeyGT < ?FIRST_DDOC_KEY of
-                true -> ?FIRST_DDOC_KEY;
-                false -> EKeyGT
-            end,
-            lists:keystore(end_key_gt, 1, Options, {end_key_gt, Key2})
-    end.
-
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-setup_all() ->
-    ok = meck:new(couch_epi, [passthrough]),
-    ok = meck:expect(couch_epi, decide, fun(_, _, _, _, _) -> no_decision end),
-    ok.
-
-teardown_all(_) ->
-    meck:unload().
-
-setup() ->
-    meck:reset([couch_epi]).
-
-teardown(_) ->
-    ok.
-
-validate_dbname_success_test_() ->
-    Cases =
-        generate_cases_with_shards("long/co$mplex-/path+/something")
-        ++ generate_cases_with_shards("something")
-        ++ lists:append(
-            [generate_cases_with_shards(?b2l(SystemDb))
-                || SystemDb <- ?SYSTEM_DATABASES]),
-    {
-        setup,
-        fun setup_all/0,
-        fun teardown_all/1,
-        {
-            foreach,
-            fun setup/0,
-            fun teardown/1,
-            [should_pass_validate_dbname(A) || {_, A} <- Cases]
-        }
-    }.
-
-validate_dbname_fail_test_() ->
-    Cases = generate_cases("_long/co$mplex-/path+/_something")
-       ++ generate_cases("_something")
-       ++ generate_cases_with_shards("long/co$mplex-/path+/_something#")
-       ++ generate_cases_with_shards("long/co$mplex-/path+/some.thing")
-       ++ generate_cases("!abcdefg/werwej/_users")
-       ++ generate_cases_with_shards("!abcdefg/werwej/_users"),
-    {
-        setup,
-        fun setup_all/0,
-        fun teardown_all/1,
-        {
-            foreach,
-            fun setup/0,
-            fun teardown/1,
-            [should_fail_validate_dbname(A) || {_, A} <- Cases]
-        }
-    }.
-
-normalize_dbname_test_() ->
-    Cases = generate_cases_with_shards("long/co$mplex-/path+/_something")
-       ++ generate_cases_with_shards("_something"),
-    WithExpected = [{?l2b(filename:rootname(A)), B} || {A, B} <- Cases],
-    [{test_name({Expected, Db}), ?_assertEqual(Expected, normalize_dbname(Db))}
-        || {Expected, Db} <- WithExpected].
-
-dbname_suffix_test_() ->
-    Cases = generate_cases_with_shards("long/co$mplex-/path+/_something")
-       ++ generate_cases_with_shards("_something"),
-    WithExpected = [{?l2b(filename:basename(Arg)), Db} || {Arg, Db} <- Cases],
-    [{test_name({Expected, Db}), ?_assertEqual(Expected, dbname_suffix(Db))}
-        || {Expected, Db} <- WithExpected].
-
-is_system_db_name_test_() ->
-    Cases = lists:append([
-        generate_cases_with_shards("long/co$mplex-/path+/" ++ ?b2l(Db))
-            || Db <- ?SYSTEM_DATABASES]
-        ++ [generate_cases_with_shards(?b2l(Db)) || Db <- ?SYSTEM_DATABASES
-    ]),
-    WithExpected = [{?l2b(filename:basename(filename:rootname(Arg))), Db}
-        || {Arg, Db} <- Cases],
-    [{test_name({Expected, Db}) ++ " in ?SYSTEM_DATABASES",
-        ?_assert(is_system_db_name(Db))} || {Expected, Db} <- WithExpected].
-
-should_pass_validate_dbname(DbName) ->
-    {test_name(DbName), ?_assertEqual(ok, validate_dbname(DbName))}.
-
-should_fail_validate_dbname(DbName) ->
-    {test_name(DbName), ?_test(begin
-        Result = validate_dbname(DbName),
-        ?assertMatch({error, {illegal_database_name, _}}, Result),
-        {error, {illegal_database_name, FailedDbName}} = Result,
-        ?assertEqual(to_binary(DbName), FailedDbName),
-        ok
-    end)}.
-
-calculate_start_seq_test_() ->
-    {
-        setup,
-        fun setup_start_seq_all/0,
-        fun teardown_start_seq_all/1,
-        {
-            foreach,
-            fun setup_start_seq/0,
-            fun teardown_start_seq/1,
-            [
-                t_calculate_start_seq_uuid_mismatch(),
-                t_calculate_start_seq_is_owner(),
-                t_calculate_start_seq_not_owner(),
-                t_calculate_start_seq_raw(),
-                t_calculate_start_seq_epoch_mismatch()
-            ]
-        }
-    }.
-
-setup_start_seq_all() ->
-    meck:new(couch_db_engine, [passthrough]),
-    meck:expect(couch_db_engine, get_uuid, fun(_) -> <<"foo">> end),
-    ok = meck:expect(couch_log, warning, 2, ok),
-    Epochs = [
-        {node2, 10},
-        {node1, 1}
-    ],
-    meck:expect(couch_db_engine, get_epochs, fun(_) -> Epochs end).
-
-teardown_start_seq_all(_) ->
-    meck:unload().
-
-setup_start_seq() ->
-    meck:reset([
-        couch_db_engine,
-        couch_log
-    ]).
-
-teardown_start_seq(_) ->
-    ok.
-
-t_calculate_start_seq_uuid_mismatch() ->
-    ?_test(begin
-        Db = test_util:fake_db([]),
-        Seq = calculate_start_seq(Db, node2, {15, <<"baz">>}),
-        ?assertEqual(0, Seq)
-    end).
-
-t_calculate_start_seq_is_owner() ->
-    ?_test(begin
-        Db = test_util:fake_db([]),
-        Seq = calculate_start_seq(Db, node2, {15, <<"foo">>}),
-        ?assertEqual(15, Seq)
-    end).
-
-t_calculate_start_seq_not_owner() ->
-    ?_test(begin
-        Db = test_util:fake_db([]),
-        Seq = calculate_start_seq(Db, node1, {15, <<"foo">>}),
-        ?assertEqual(0, Seq)
-    end).
-
-t_calculate_start_seq_raw() ->
-    ?_test(begin
-        Db = test_util:fake_db([]),
-        Seq = calculate_start_seq(Db, node1, 13),
-        ?assertEqual(13, Seq)
-    end).
-
-t_calculate_start_seq_epoch_mismatch() ->
-    ?_test(begin
-        Db = test_util:fake_db([]),
-        SeqIn = {replace, not_this_node, get_uuid(Db), 42},
-        Seq = calculate_start_seq(Db, node1, SeqIn),
-        ?assertEqual(0, Seq)
-    end).
-
-is_owner_test() ->
-    ?assertNot(is_owner(foo, 1, [])),
-    ?assertNot(is_owner(foo, 1, [{foo, 1}])),
-    ?assert(is_owner(foo, 2, [{foo, 1}])),
-    ?assert(is_owner(foo, 50, [{bar, 100}, {foo, 1}])),
-    ?assert(is_owner(foo, 50, [{baz, 200}, {bar, 100}, {foo, 1}])),
-    ?assert(is_owner(bar, 150, [{baz, 200}, {bar, 100}, {foo, 1}])),
-    ?assertError(duplicate_epoch, validate_epochs([{foo, 1}, {bar, 1}])),
-    ?assertError(epoch_order, validate_epochs([{foo, 100}, {bar, 200}])).
-
-to_binary(DbName) when is_list(DbName) ->
-    ?l2b(DbName);
-to_binary(DbName) when is_binary(DbName) ->
-    DbName.
-
-test_name({Expected, DbName}) ->
-    lists:flatten(io_lib:format("~p -> ~p", [DbName, Expected]));
-test_name(DbName) ->
-    lists:flatten(io_lib:format("~p", [DbName])).
-
-generate_cases_with_shards(DbName) ->
-    DbNameWithShard = add_shard(DbName),
-    DbNameWithShardAndExtension = add_shard(DbName) ++ ".couch",
-    Cases = [
-        DbName, ?l2b(DbName),
-        DbNameWithShard, ?l2b(DbNameWithShard),
-        DbNameWithShardAndExtension, ?l2b(DbNameWithShardAndExtension)
-    ],
-    [{DbName, Case} || Case <- Cases].
-
-add_shard(DbName) ->
-    "shards/00000000-3fffffff/" ++ DbName ++ ".1415960794".
-
-generate_cases(DbName) ->
-    [{DbName, DbName}, {DbName, ?l2b(DbName)}].
-
--endif.
diff --git a/src/couch/src/couch_db_engine.erl b/src/couch/src/couch_db_engine.erl
deleted file mode 100644
index 9adc992..0000000
--- a/src/couch/src/couch_db_engine.erl
+++ /dev/null
@@ -1,1105 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_engine).
-
-
--include("couch_db.hrl").
--include("couch_db_int.hrl").
-
-
--type filepath() :: iolist().
--type docid() :: binary().
--type rev() :: {non_neg_integer(), binary()}.
--type revs() :: [rev()].
--type json() :: any().
--type uuid() :: binary().
--type purge_seq() :: non_neg_integer().
-
--type doc_pair() :: {
-        #full_doc_info{} | not_found,
-        #full_doc_info{} | not_found
-    }.
-
--type doc_pairs() :: [doc_pair()].
-
--type db_open_options() :: [
-        create
-    ].
-
--type delete_options() :: [
-        {context, delete | compaction} |
-        sync
-    ].
-
--type purge_info() :: {purge_seq(), uuid(), docid(), revs()}.
--type epochs() :: [{Node::atom(), UpdateSeq::non_neg_integer()}].
--type size_info() :: [{Name::atom(), Size::non_neg_integer()}].
--type partition_info() :: [
-    {partition, Partition::binary()} |
-    {doc_count, DocCount::non_neg_integer()} |
-    {doc_del_count, DocDelCount::non_neg_integer()} |
-    {sizes, size_info()}
-].
-
--type write_stream_options() :: [
-        {buffer_size, Size::pos_integer()} |
-        {encoding, atom()} |
-        {compression_level, non_neg_integer()}
-    ].
-
--type doc_fold_options() :: [
-        {start_key, Key::any()} |
-        {end_key, Key::any()} |
-        {end_key_gt, Key::any()} |
-        {dir, fwd | rev} |
-        include_reductions |
-        include_deleted
-    ].
-
--type changes_fold_options() :: [
-        {dir, fwd | rev}
-    ].
-
--type purge_fold_options() :: [
-        {start_key, Key::any()} |
-        {end_key, Key::any()} |
-        {end_key_gt, Key::any()} |
-        {dir, fwd | rev}
-    ].
-
--type db_handle() :: any().
-
--type doc_fold_fun() :: fun((#full_doc_info{}, UserAcc::any()) ->
-        {ok, NewUserAcc::any()} |
-        {stop, NewUserAcc::any()}).
-
--type local_doc_fold_fun() :: fun((#doc{}, UserAcc::any()) ->
-        {ok, NewUserAcc::any()} |
-        {stop, NewUserAcc::any()}).
-
--type changes_fold_fun() :: fun((#doc_info{}, UserAcc::any()) ->
-        {ok, NewUserAcc::any()} |
-        {stop, NewUserAcc::any()}).
-
--type purge_fold_fun() :: fun((purge_info(), UserAcc::any()) ->
-        {ok, NewUserAcc::any()} |
-        {stop, NewUserAcc::any()}).
-
-
-% This is called by couch_server to determine which
-% engine should be used for the given database. DbPath
-% is calculated based on the DbName and the configured
-% extension for a given engine. The first engine to
-% return true is the engine that will be used for the
-% database.
--callback exists(DbPath::filepath()) -> boolean().
-
-
-% This is called by couch_server to delete a database. It
-% is called from inside the couch_server process which
-% means that the storage engine does not have to guarantee
-% its own consistency checks when executing in this
-% context. Although since this is executed in the context
-% of couch_server it should return relatively quickly.
--callback delete(
-            RootDir::filepath(),
-            DbPath::filepath(),
-            DelOpts::delete_options()) ->
-        ok | {error, Reason::atom()}.
-
-
-% This function can be called from multiple contexts. It
-% will either be called just before a call to delete/3 above
-% or when a compaction is cancelled which executes in the
-% context of a couch_db_updater process. It is intended to
-% remove any temporary files used during compaction that
-% may be used to recover from a failed compaction swap.
--callback delete_compaction_files(
-            RootDir::filepath(),
-            DbPath::filepath(),
-            DelOpts::delete_options()) ->
-        ok.
-
-
-% This is called from the couch_db_updater:init/1 context. As
-% such this means that it is guaranteed to only have one process
-% executing for a given DbPath argument (ie, opening a given
-% database is guaranteed to only happen in a single process).
-% However, multiple process may be trying to open different
-% databases concurrently so if a database requires a shared
-% resource that will require concurrency control at the storage
-% engine layer.
-%
-% The returned DbHandle should be a term that can be freely
-% copied between processes and accessed concurrently. However
-% its guaranteed that the handle will only ever be mutated
-% in a single threaded context (ie, within the couch_db_updater
-% process).
--callback init(DbPath::filepath(), db_open_options()) ->
-    {ok, DbHandle::db_handle()}.
-
-
-% This is called in the context of couch_db_updater:terminate/2
-% and as such has the same properties for init/2. It's guaranteed
-% to be consistent for a given database but may be called by many
-% databases concurrently.
--callback terminate(Reason::any(), DbHandle::db_handle()) -> Ignored::any().
-
-
-% This is called in the context of couch_db_updater:handle_call/3
-% for any message that is unknown. It can be used to handle messages
-% from asynchronous processes like the engine's compactor if it has one.
--callback handle_db_updater_call(Msg::any(), DbHandle::db_handle()) ->
-        {reply, Resp::any(), NewDbHandle::db_handle()} |
-        {stop, Reason::any(), Resp::any(), NewDbHandle::db_handle()}.
-
-
-% This is called in the context of couch_db_updater:handle_info/2
-% and has the same properties as handle_call/3.
--callback handle_db_updater_info(Msg::any(), DbHandle::db_handle()) ->
-    {noreply, NewDbHandle::db_handle()} |
-    {noreply, NewDbHandle::db_handle(), Timeout::timeout()} |
-    {stop, Reason::any(), NewDbHandle::db_handle()}.
-
-
-% These functions are called by any process opening or closing
-% a database. As such they need to be able to handle being
-% called concurrently. For example, the legacy engine uses these
-% to add monitors to the main engine process.
--callback incref(DbHandle::db_handle()) -> {ok, NewDbHandle::db_handle()}.
--callback decref(DbHandle::db_handle()) -> ok.
--callback monitored_by(DbHande::db_handle()) -> [pid()].
-
-
-% This is called in the context of couch_db_updater:handle_info/2
-% and should return the timestamp of the last activity of
-% the database. If a storage has no notion of activity or the
-% value would be hard to report its ok to just return the
-% result of os:timestamp/0 as this will just disable idle
-% databases from automatically closing.
--callback last_activity(DbHandle::db_handle()) -> erlang:timestamp().
-
-
-% All of the get_* functions may be called from many
-% processes concurrently.
-
-% The database should make a note of the update sequence when it
-% was last compacted. If the database doesn't need compacting it
-% can just hard code a return value of 0.
--callback get_compacted_seq(DbHandle::db_handle()) ->
-            CompactedSeq::non_neg_integer().
-
-
-% The number of documents in the database which have all leaf
-% revisions marked as deleted.
--callback get_del_doc_count(DbHandle::db_handle()) ->
-            DelDocCount::non_neg_integer().
-
-
-% This number is reported in the database info properties and
-% as such can be any JSON value.
--callback get_disk_version(DbHandle::db_handle()) -> Version::json().
-
-
-% The number of documents in the database that have one or more
-% leaf revisions not marked as deleted.
--callback get_doc_count(DbHandle::db_handle()) -> DocCount::non_neg_integer().
-
-
-% The epochs track which node owned the database starting at
-% a given update sequence. Each time a database is opened it
-% should look at the epochs. If the most recent entry is not
-% for the current node it should add an entry that will be
-% written the next time a write is performed. An entry is
-% simply a {node(), CurrentUpdateSeq} tuple.
--callback get_epochs(DbHandle::db_handle()) -> Epochs::epochs().
-
-
-% Get the current purge sequence known to the engine. This
-% value should be updated during calls to purge_docs.
--callback get_purge_seq(DbHandle::db_handle()) -> purge_seq().
-
-
-% Get the oldest purge sequence known to the engine
--callback get_oldest_purge_seq(DbHandle::db_handle()) -> purge_seq().
-
-
-% Get the purged infos limit. This should just return the last
-% value that was passed to set_purged_docs_limit/2.
--callback get_purge_infos_limit(DbHandle::db_handle()) -> pos_integer().
-
-
-% Get the revision limit. This should just return the last
-% value that was passed to set_revs_limit/2.
--callback get_revs_limit(DbHandle::db_handle()) -> RevsLimit::pos_integer().
-
-
-% Get the current security properties. This should just return
-% the last value that was passed to set_security/2.
--callback get_security(DbHandle::db_handle()) -> SecProps::any().
-
-
-% Get the current properties.
--callback get_props(DbHandle::db_handle()) -> Props::[any()].
-
-
-% This information is displayed in the database info poperties. It
-% should just be a list of {Name::atom(), Size::non_neg_integer()}
-% tuples that will then be combined across shards. Currently,
-% various modules expect there to at least be values for:
-%
-%   file     - Number of bytes on disk
-%
-%   active   - Theoretical minimum number of bytes to store this db on disk
-%              which is used to guide decisions on compaction
-%
-%   external - Number of bytes that would be required to represent the
-%              contents outside of the database (for capacity and backup
-%              planning)
--callback get_size_info(DbHandle::db_handle()) -> SizeInfo::size_info().
-
-
-% This returns the information for the given partition.
-% It should just be a list of {Name::atom(), Size::non_neg_integer()}
-% It returns the partition name, doc count, deleted doc count and two sizes:
-%
-%   active   - Theoretical minimum number of bytes to store this partition on disk
-%
-%   external - Number of bytes that would be required to represent the
-%              contents of this partition outside of the database
--callback get_partition_info(DbHandle::db_handle(), Partition::binary()) ->
-    partition_info().
-
-
-% The current update sequence of the database. The update
-% sequence should be incrememnted for every revision added to
-% the database.
--callback get_update_seq(DbHandle::db_handle()) -> UpdateSeq::non_neg_integer().
-
-
-% Whenever a database is created it should generate a
-% persistent UUID for identification in case the shard should
-% ever need to be moved between nodes in a cluster.
--callback get_uuid(DbHandle::db_handle()) -> UUID::binary().
-
-
-% These functions are only called by couch_db_updater and
-% as such are guaranteed to be single threaded calls. The
-% database should simply store these values somewhere so
-% they can be returned by the corresponding get_* calls.
-
--callback set_revs_limit(DbHandle::db_handle(), RevsLimit::pos_integer()) ->
-        {ok, NewDbHandle::db_handle()}.
-
-
--callback set_purge_infos_limit(DbHandle::db_handle(), Limit::pos_integer()) ->
-        {ok, NewDbHandle::db_handle()}.
-
-
--callback set_security(DbHandle::db_handle(), SecProps::any()) ->
-        {ok, NewDbHandle::db_handle()}.
-
-
-% This function is only called by couch_db_updater and
-% as such is guaranteed to be single threaded calls. The
-% database should simply store provided property list
-% unaltered.
-
--callback set_props(DbHandle::db_handle(), Props::any()) ->
-        {ok, NewDbHandle::db_handle()}.
-
-
-% Set the current update sequence of the database. The intention is to use this
-% when copying a database such that the destination update sequence should
-% match exactly the source update sequence.
--callback set_update_seq(
-    DbHandle::db_handle(),
-    UpdateSeq::non_neg_integer()) ->
-    {ok, NewDbHandle::db_handle()}.
-
-
-% This function will be called by many processes concurrently.
-% It should return a #full_doc_info{} record or not_found for
-% every provided DocId in the order those DocId's appear in
-% the input.
-%
-% Traditionally this function will only return documents that
-% were present in the database when the DbHandle was retrieved
-% from couch_server. It is currently unknown what would break
-% if a storage engine deviated from that property.
--callback open_docs(DbHandle::db_handle(), DocIds::[docid()]) ->
-        [#full_doc_info{} | not_found].
-
-
-% This function will be called by many processes concurrently.
-% It should return a #doc{} record or not_found for every
-% provided DocId in the order they appear in the input.
-%
-% The same caveats around database snapshots from open_docs
-% apply to this function (although this function is called
-% rather less frequently so it may not be as big of an
-% issue).
--callback open_local_docs(DbHandle::db_handle(), DocIds::[docid()]) ->
-        [#doc{} | not_found].
-
-
-% This function will be called from many contexts concurrently.
-% The provided RawDoc is a #doc{} record that has its body
-% value set to the body value returned from write_doc_body/2.
-%
-% This API exists so that storage engines can store document
-% bodies externally from the #full_doc_info{} record (which
-% is the traditional approach and is recommended).
--callback read_doc_body(DbHandle::db_handle(), RawDoc::doc()) ->
-        doc().
-
-
-% This function will be called from many contexts concurrently.
-% If the storage engine has a purge_info() record for any of the
-% provided UUIDs, those purge_info() records should be returned. The
-% resulting list should have the same length as the input list of
-% UUIDs.
--callback load_purge_infos(DbHandle::db_handle(), [uuid()]) ->
-        [purge_info() | not_found].
-
-
-% This function is called concurrently by any client process
-% that is writing a document. It should accept a #doc{}
-% record and return a #doc{} record with a mutated body it
-% wishes to have written to disk by write_doc_body/2.
-%
-% This API exists so that storage engines can compress
-% document bodies in parallel by client processes rather
-% than forcing all compression to occur single threaded
-% in the context of the couch_db_updater process.
--callback serialize_doc(DbHandle::db_handle(), Doc::doc()) ->
-        doc().
-
-
-% This function is called in the context of a couch_db_updater
-% which means its single threaded for the given DbHandle.
-%
-% The returned #doc{} record should have its Body set to a value
-% that will be stored in the #full_doc_info{} record's revision
-% tree leaves which is passed to read_doc_body/2 above when
-% a client wishes to read a document.
-%
-% The BytesWritten return value is used to determine the number
-% of active bytes in the database which can is used to make
-% a determination of when to compact this database.
--callback write_doc_body(DbHandle::db_handle(), Doc::doc()) ->
-        {ok, FlushedDoc::doc(), BytesWritten::non_neg_integer()}.
-
-
-% This function is called from the context of couch_db_updater
-% and as such is guaranteed single threaded for the given
-% DbHandle.
-%
-% This is probably the most complicated function in the entire
-% API due to a few subtle behavior requirements required by
-% CouchDB's storage model.
-%
-% The Pairs argument is a list of pairs (2-tuples) of
-% #full_doc_info{} records. The first element of the pair is
-% the #full_doc_info{} that exists on disk. The second element
-% is the new version that should be written to disk. There are
-% two basic cases that should be followed:
-%
-%     1. {not_found, #full_doc_info{}} - A new document was created
-%     2. {#full_doc_info{}, #full_doc_info{}} - A document was updated
-%
-% The cases are fairly straight forward as long as proper
-% accounting for moving entries in the update sequence are accounted
-% for.
-%
-% The LocalDocs variable is applied separately. Its important to
-% note for new storage engine authors that these documents are
-% separate because they should *not* be included as part of the
-% changes index for the database.
-%
-% Traditionally an invocation of write_doc_infos should be all
-% or nothing in so much that if an error occurs (or the VM dies)
-% then the database doesn't retain any of the changes. However
-% as long as a storage engine maintains consistency this should
-% not be an issue as it has never been a guarantee and the
-% batches are non-deterministic (from the point of view of the
-% client).
--callback write_doc_infos(
-    DbHandle::db_handle(),
-    Pairs::doc_pairs(),
-    LocalDocs::[#doc{}]) ->
-        {ok, NewDbHandle::db_handle()}.
-
-
-% This function is called from the context of couch_db_updater
-% and as such is guaranteed single threaded for the given
-% DbHandle.
-%
-% Each doc_pair() is a 2-tuple of #full_doc_info{} records. The
-% first element of the pair is the #full_doc_info{} that exists
-% on disk. The second element is the new version that should be
-% written to disk. There are three basic cases that should be considered:
-%
-%     1. {#full_doc_info{}, #full_doc_info{}} - A document was partially purged
-%     2. {#full_doc_info{}, not_found} - A document was completely purged
-%     3. {not_found, not_found} - A no-op purge
-%
-% In case 1, non-tail-append engines may have to remove revisions
-% specifically rather than rely on compaction to remove them. Also
-% note that the new #full_doc_info{} will have a different update_seq
-% that will need to be reflected in the changes feed.
-%
-% In case 2 you'll notice is "purged completely" which
-% means it needs to be removed from the database including the
-% update sequence.
-%
-% In case 3 we just need to store the purge_info() to know that it
-% was processed even though it produced no changes to the database.
-%
-% The purge_info() tuples contain the purge_seq, uuid, docid and
-% revisions that were requested to be purged. This should be persisted
-% in such a way that we can efficiently load purge_info() by its UUID
-% as well as iterate over purge_info() entries in order of their PurgeSeq.
--callback purge_docs(DbHandle::db_handle(), [doc_pair()], [purge_info()]) ->
-        {ok, NewDbHandle::db_handle()}.
-
-
-% This function should be called from a single threaded context and
-% should be used to copy purge infos from on database to another
-% when copying a database
--callback copy_purge_infos(DbHandle::db_handle(), [purge_info()]) ->
-        {ok, NewDbHandle::db_handle()}.
-
-
-% This function is called in the context of couch_db_udpater and
-% as such is single threaded for any given DbHandle.
-%
-% This call is made periodically to ensure that the database has
-% stored all updates on stable storage. (ie, here is where you fsync).
--callback commit_data(DbHandle::db_handle()) ->
-        {ok, NewDbHande::db_handle()}.
-
-
-% This function is called by multiple processes concurrently.
-%
-% This function along with open_read_stream are part of the
-% attachments API. For the time being I'm leaving these mostly
-% undocumented. There are implementations of this in both the
-% legacy btree engine as well as the alternative engine
-% implementations for the curious, however this is a part of the
-% API for which I'd like feed back.
-%
-% Currently an engine can elect to not implement these API's
-% by throwing the atom not_supported.
--callback open_write_stream(
-    DbHandle::db_handle(),
-    Options::write_stream_options()) ->
-        {ok, pid()}.
-
-
-% See the documentation for open_write_stream
--callback open_read_stream(DbHandle::db_handle(), StreamDiskInfo::any()) ->
-        {ok, {Module::atom(), ReadStreamState::any()}}.
-
-
-% See the documentation for open_write_stream
--callback is_active_stream(DbHandle::db_handle(), ReadStreamState::any()) ->
-        boolean().
-
-
-% This funciton is called by many processes concurrently.
-%
-% This function is called to fold over the documents in
-% the database sorted by the raw byte collation order of
-% the document id. For each document id, the supplied user
-% function should be invoked with the first argument set
-% to the #full_doc_info{} record and the second argument
-% set to the current user supplied accumulator. The return
-% value of the user function is a 2-tuple of {Go, NewUserAcc}.
-% The NewUserAcc value should then replace the current
-% user accumulator. If Go is the atom ok, iteration over
-% documents should continue. If Go is the atom stop, then
-% iteration should halt and the return value should be
-% {ok, NewUserAcc}.
-%
-% Possible options to this function include:
-%
-%     1. start_key - Start iteration at the provided key or
-%        or just after if the key doesn't exist
-%     2. end_key - Stop iteration just after the provided key
-%     3. end_key_gt - Stop iteration prior to visiting the provided
-%        key
-%     4. dir - The atom fwd or rev. This is to be able to iterate
-%        over documents in reverse order. The logic for comparing
-%        start_key, end_key, and end_key_gt are then reversed (ie,
-%        when rev, start_key should be greater than end_key if the
-%        user wishes to see results)
-%     5. include_reductions - This is a hack for _all_docs since
-%        it currently relies on reductions to count an offset. This
-%        is a terrible hack that will need to be addressed by the
-%        API in the future. If this option is present the supplied
-%        user function expects three arguments, where the first
-%        argument is a #full_doc_info{} record, the second argument
-%        is the current list of reductions to the left of the current
-%        document, and the third argument is the current user
-%        accumulator. The return value from the user function is
-%        unaffected. However the final return value of the function
-%        should include the final total reductions as the second
-%        element of a 3-tuple. Like I said, this is a hack.
-%     6. include_deleted - By default deleted documents are not
-%        included in fold_docs calls. However in some special
-%        cases we do want to see them (as of now, just in couch_changes
-%        during the design document changes optimization)
-%
-% Historically, if a process calls this function repeatedly it
-% would see the same results returned even if there were concurrent
-% updates happening. However there doesn't seem to be any instance of
-% that actually happening so a storage engine that includes new results
-% between invocations shouldn't have any issues.
--callback fold_docs(
-    DbHandle::db_handle(),
-    UserFold::doc_fold_fun(),
-    UserAcc::any(),
-    doc_fold_options()) ->
-        {ok, LastUserAcc::any()}.
-
-
-% This function may be called by many processes concurrently.
-%
-% This should behave exactly the same as fold_docs/4 except that it
-% should only return local documents and the first argument to the
-% user function is a #doc{} record, not a #full_doc_info{}.
--callback fold_local_docs(
-    DbHandle::db_handle(),
-    UserFold::local_doc_fold_fun(),
-    UserAcc::any(),
-    doc_fold_options()) ->
-        {ok, LastUserAcc::any()}.
-
-
-% This function may be called by many processes concurrently.
-%
-% This function is called to fold over the documents (not local
-% documents) in order of their most recent update. Each document
-% in the database should have exactly one entry in this sequence.
-% If a document is updated during a call to this function it should
-% not be included twice as that will probably lead to Very Bad Things.
-%
-% This should behave similarly to fold_docs/4 in that the supplied
-% user function should be invoked with a #full_doc_info{} record
-% as the first argument and the current user accumulator as the
-% second argument. The same semantics for the return value from the
-% user function should be handled as in fold_docs/4.
-%
-% The StartSeq parameter indicates where the fold should start
-% *after*. As in, if a change with a value of StartSeq exists in the
-% database it should not be included in the fold.
-%
-% The only option currently supported by the API is the `dir`
-% option that should behave the same as for fold_docs.
--callback fold_changes(
-    DbHandle::db_handle(),
-    StartSeq::non_neg_integer(),
-    UserFold::changes_fold_fun(),
-    UserAcc::any(),
-    changes_fold_options()) ->
-        {ok, LastUserAcc::any()}.
-
-
-% This function may be called by many processes concurrently.
-%
-% This function is called to fold over purged requests in order of
-% their oldest purge (increasing purge_seq order)
-%
-% The StartPurgeSeq parameter indicates where the fold should start *after*.
--callback fold_purge_infos(
-    DbHandle::db_handle(),
-    StartPurgeSeq::purge_seq(),
-    UserFold::purge_fold_fun(),
-    UserAcc::any(),
-    purge_fold_options()) ->
-        {ok, LastUserAcc::any()}.
-
-
-% This function may be called by many processes concurrently.
-%
-% This function is called to count the number of documents changed
-% since the given UpdateSeq (ie, not including the possible change
-% at exactly UpdateSeq). It is currently only used internally to
-% provide a status update in a replication's _active_tasks entry
-% to indicate how many documents are left to be processed.
-%
-% This is a fairly difficult thing to support in engine's that don't
-% behave exactly like a tree with efficient support for counting rows
-% between keys. As such returning 0 or even just the difference between
-% the current update sequence is possibly the best some storage engines
-% can provide. This may lead to some confusion when interpreting the
-% _active_tasks entry if the storage engine isn't accounted for by the
-% client.
--callback count_changes_since(
-    DbHandle::db_handle(),
-    UpdateSeq::non_neg_integer()) ->
-        TotalChanges::non_neg_integer().
-
-
-% This function is called in the context of couch_db_updater and as
-% such is guaranteed to be single threaded for the given DbHandle.
-%
-% If a storage engine requires compaction this is a trigger to start
-% it off. However a storage engine can do whatever it wants here. As
-% this is fairly engine specific there's not a lot guidance that is
-% generally applicable.
-%
-% When compaction is finished the compactor should use
-% gen_server:cast/2 to send a {compact_done, CompactEngine, CompactInfo}
-% message to the Parent pid provided. Currently CompactEngine
-% must be the same engine that started the compaction and CompactInfo
-% is an arbitrary term that's passed to finish_compaction/4.
--callback start_compaction(
-    DbHandle::db_handle(),
-    DbName::binary(),
-    Options::db_open_options(),
-    Parent::pid()) ->
-        {ok, NewDbHandle::db_handle(), CompactorPid::pid()}.
-
-
-% This function is called in the context of couch_db_udpater and as
-% such is guarnateed to be single threaded for the given DbHandle.
-%
-% Same as for start_compaction, this will be extremely specific to
-% any given storage engine.
-%
-% The split in the API here is so that if the storage engine needs
-% to update the DbHandle state of the couch_db_updater it can as
-% finish_compaction/4 is called in the context of the couch_db_updater.
--callback finish_compaction(
-    OldDbHandle::db_handle(),
-    DbName::binary(),
-    Options::db_open_options(),
-    CompactInfo::any()) ->
-        {ok, CompactedDbHandle::db_handle(), CompactorPid::pid() | undefined}.
-
-
--export([
-    exists/2,
-    delete/4,
-    delete_compaction_files/4,
-
-    init/3,
-    terminate/2,
-    handle_db_updater_call/3,
-    handle_db_updater_info/2,
-
-    incref/1,
-    decref/1,
-    monitored_by/1,
-
-    last_activity/1,
-
-    get_engine/1,
-    get_compacted_seq/1,
-    get_del_doc_count/1,
-    get_disk_version/1,
-    get_doc_count/1,
-    get_epochs/1,
-    get_purge_seq/1,
-    get_oldest_purge_seq/1,
-    get_purge_infos_limit/1,
-    get_revs_limit/1,
-    get_security/1,
-    get_props/1,
-    get_size_info/1,
-    get_partition_info/2,
-    get_update_seq/1,
-    get_uuid/1,
-
-    set_revs_limit/2,
-    set_security/2,
-    set_purge_infos_limit/2,
-    set_props/2,
-
-    set_update_seq/2,
-
-    open_docs/2,
-    open_local_docs/2,
-    read_doc_body/2,
-    load_purge_infos/2,
-
-    serialize_doc/2,
-    write_doc_body/2,
-    write_doc_infos/3,
-    purge_docs/3,
-    copy_purge_infos/2,
-    commit_data/1,
-
-    open_write_stream/2,
-    open_read_stream/2,
-    is_active_stream/2,
-
-    fold_docs/4,
-    fold_local_docs/4,
-    fold_changes/5,
-    fold_purge_infos/5,
-    count_changes_since/2,
-
-    start_compaction/1,
-    finish_compaction/2,
-    trigger_on_compact/1
-]).
-
-
-exists(Engine, DbPath) ->
-    Engine:exists(DbPath).
-
-
-delete(Engine, RootDir, DbPath, DelOpts) when is_list(DelOpts) ->
-    Engine:delete(RootDir, DbPath, DelOpts).
-
-
-delete_compaction_files(Engine, RootDir, DbPath, DelOpts)
-        when is_list(DelOpts) ->
-    Engine:delete_compaction_files(RootDir, DbPath, DelOpts).
-
-
-init(Engine, DbPath, Options) ->
-    case Engine:init(DbPath, Options) of
-         {ok, EngineState} ->
-             {ok, {Engine, EngineState}};
-         Error ->
-             throw(Error)
-    end.
-
-
-terminate(Reason, #db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:terminate(Reason, EngineState).
-
-
-handle_db_updater_call(Msg, _From, #db{} = Db) ->
-    #db{
-        engine = {Engine, EngineState}
-    } = Db,
-    case Engine:handle_db_updater_call(Msg, EngineState) of
-        {reply, Resp, NewState} ->
-            {reply, Resp, Db#db{engine = {Engine, NewState}}};
-        {stop, Reason, Resp, NewState} ->
-            {stop, Reason, Resp, Db#db{engine = {Engine, NewState}}}
-    end.
-
-
-handle_db_updater_info(Msg, #db{} = Db) ->
-    #db{
-        name = Name,
-        engine = {Engine, EngineState}
-    } = Db,
-    case Engine:handle_db_updater_info(Msg, EngineState) of
-        {noreply, NewState} ->
-            {noreply, Db#db{engine = {Engine, NewState}}};
-        {noreply, NewState, Timeout} ->
-            {noreply, Db#db{engine = {Engine, NewState}}, Timeout};
-        {stop, Reason, NewState} ->
-            couch_log:error("DB ~s shutting down: ~p", [Name, Msg]),
-            {stop, Reason, Db#db{engine = {Engine, NewState}}}
-    end.
-
-
-incref(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewState} = Engine:incref(EngineState),
-    {ok, Db#db{engine = {Engine, NewState}}}.
-
-
-decref(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:decref(EngineState).
-
-
-monitored_by(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:monitored_by(EngineState).
-
-
-last_activity(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:last_activity(EngineState).
-
-
-get_engine(#db{} = Db) ->
-    #db{engine = {Engine, _}} = Db,
-    Engine.
-
-
-get_compacted_seq(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_compacted_seq(EngineState).
-
-
-get_del_doc_count(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_del_doc_count(EngineState).
-
-
-get_disk_version(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_disk_version(EngineState).
-
-
-get_doc_count(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_doc_count(EngineState).
-
-
-get_epochs(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_epochs(EngineState).
-
-
-get_purge_seq(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_purge_seq(EngineState).
-
-
-get_oldest_purge_seq(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_oldest_purge_seq(EngineState).
-
-
-get_purge_infos_limit(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_purge_infos_limit(EngineState).
-
-
-get_revs_limit(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_revs_limit(EngineState).
-
-
-get_security(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_security(EngineState).
-
-
-get_props(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_props(EngineState).
-
-
-get_size_info(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_size_info(EngineState).
-
-
-get_partition_info(#db{} = Db, Partition) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_partition_info(EngineState, Partition).
-
-
-get_update_seq(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_update_seq(EngineState).
-
-get_uuid(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:get_uuid(EngineState).
-
-
-set_revs_limit(#db{} = Db, RevsLimit) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:set_revs_limit(EngineState, RevsLimit),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-set_purge_infos_limit(#db{} = Db, PurgedDocsLimit) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:set_purge_infos_limit(EngineState, PurgedDocsLimit),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-set_security(#db{} = Db, SecProps) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:set_security(EngineState, SecProps),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-set_props(#db{} = Db, Props) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:set_props(EngineState, Props),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-set_update_seq(#db{} = Db, UpdateSeq) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:set_update_seq(EngineState, UpdateSeq),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-open_docs(#db{} = Db, DocIds) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:open_docs(EngineState, DocIds).
-
-
-open_local_docs(#db{} = Db, DocIds) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:open_local_docs(EngineState, DocIds).
-
-
-read_doc_body(#db{} = Db, RawDoc) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:read_doc_body(EngineState, RawDoc).
-
-
-load_purge_infos(#db{} = Db, UUIDs) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:load_purge_infos(EngineState, UUIDs).
-
-
-serialize_doc(#db{} = Db, #doc{} = Doc) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:serialize_doc(EngineState, Doc).
-
-
-write_doc_body(#db{} = Db, #doc{} = Doc) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:write_doc_body(EngineState, Doc).
-
-
-write_doc_infos(#db{} = Db, DocUpdates, LocalDocs) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:write_doc_infos(EngineState, DocUpdates, LocalDocs),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-purge_docs(#db{} = Db, DocUpdates, Purges) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:purge_docs(
-        EngineState, DocUpdates, Purges),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-copy_purge_infos(#db{} = Db, Purges) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:copy_purge_infos(
-        EngineState, Purges),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-commit_data(#db{} = Db) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    {ok, NewSt} = Engine:commit_data(EngineState),
-    {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-open_write_stream(#db{} = Db, Options) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:open_write_stream(EngineState, Options).
-
-
-open_read_stream(#db{} = Db, StreamDiskInfo) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:open_read_stream(EngineState, StreamDiskInfo).
-
-
-is_active_stream(#db{} = Db, ReadStreamState) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:is_active_stream(EngineState, ReadStreamState).
-
-
-fold_docs(#db{} = Db, UserFun, UserAcc, Options) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:fold_docs(EngineState, UserFun, UserAcc, Options).
-
-
-fold_local_docs(#db{} = Db, UserFun, UserAcc, Options) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:fold_local_docs(EngineState, UserFun, UserAcc, Options).
-
-
-fold_changes(#db{} = Db, StartSeq, UserFun, UserAcc, Options) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:fold_changes(EngineState, StartSeq, UserFun, UserAcc, Options).
-
-
-fold_purge_infos(#db{} = Db, StartPurgeSeq, UserFun, UserAcc, Options) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:fold_purge_infos(
-            EngineState, StartPurgeSeq, UserFun, UserAcc, Options).
-
-
-count_changes_since(#db{} = Db, StartSeq) ->
-    #db{engine = {Engine, EngineState}} = Db,
-    Engine:count_changes_since(EngineState, StartSeq).
-
-
-start_compaction(#db{} = Db) ->
-    #db{
-        engine = {Engine, EngineState},
-        name = DbName,
-        options = Options
-    } = Db,
-    {ok, NewEngineState, Pid} = Engine:start_compaction(
-            EngineState, DbName, Options, self()),
-    {ok, Db#db{
-        engine = {Engine, NewEngineState},
-        compactor_pid = Pid
-    }}.
-
-
-finish_compaction(Db, CompactInfo) ->
-    #db{
-        engine = {Engine, St},
-        name = DbName,
-        options = Options
-    } = Db,
-    NewDb = case Engine:finish_compaction(St, DbName, Options, CompactInfo) of
-        {ok, NewState, undefined} ->
-            couch_event:notify(DbName, compacted),
-            Db#db{
-                engine = {Engine, NewState},
-                compactor_pid = nil
-            };
-        {ok, NewState, CompactorPid} when is_pid(CompactorPid) ->
-            Db#db{
-                engine = {Engine, NewState},
-                compactor_pid = CompactorPid
-            }
-    end,
-    ok = gen_server:call(couch_server, {db_updated, NewDb}, infinity),
-    {ok, NewDb}.
-
-
-trigger_on_compact(DbName) ->
-    {ok, DDocs} = get_ddocs(DbName),
-    couch_db_plugin:on_compact(DbName, DDocs).
-
-
-get_ddocs(<<"shards/", _/binary>> = DbName) ->
-    {_, Ref} = spawn_monitor(fun() ->
-        exit(fabric:design_docs(mem3:dbname(DbName)))
-    end),
-    receive
-        {'DOWN', Ref, _, _, {ok, JsonDDocs}} ->
-            {ok, lists:map(fun(JsonDDoc) ->
-                couch_doc:from_json_obj(JsonDDoc)
-            end, JsonDDocs)};
-        {'DOWN', Ref, _, _, Else} ->
-            Else
-    end;
-get_ddocs(DbName) ->
-    couch_util:with_db(DbName, fun(Db) ->
-        FoldFun = fun(FDI, Acc) ->
-            {ok, Doc} = couch_db:open_doc_int(Db, FDI, []),
-            {ok, [Doc | Acc]}
-        end,
-        {ok, Docs} = couch_db:fold_design_docs(Db, FoldFun, [], []),
-        {ok, lists:reverse(Docs)}
-    end).
diff --git a/src/couch/src/couch_db_header.erl b/src/couch/src/couch_db_header.erl
deleted file mode 100644
index 355364f..0000000
--- a/src/couch/src/couch_db_header.erl
+++ /dev/null
@@ -1,405 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_header).
-
-
--export([
-    new/0,
-    from/1,
-    is_header/1,
-    upgrade/1,
-    set/2
-]).
-
--export([
-    disk_version/1,
-    update_seq/1,
-    id_tree_state/1,
-    seq_tree_state/1,
-    latest/1,
-    local_tree_state/1,
-    purge_seq/1,
-    purged_docs/1,
-    security_ptr/1,
-    revs_limit/1,
-    uuid/1,
-    epochs/1,
-    compacted_seq/1
-]).
-
-
-% This should be updated anytime a header change happens that requires more
-% than filling in new defaults.
-%
-% As long the changes are limited to new header fields (with inline
-% defaults) added to the end of the record, then there is no need to increment
-% the disk revision number.
-%
-% if the disk revision is incremented, then new upgrade logic will need to be
-% added to couch_db_updater:init_db.
-
--define(LATEST_DISK_VERSION, 6).
-
--record(db_header, {
-    disk_version = ?LATEST_DISK_VERSION,
-    update_seq = 0,
-    unused = 0,
-    id_tree_state = nil,
-    seq_tree_state = nil,
-    local_tree_state = nil,
-    purge_seq = 0,
-    purged_docs = nil,
-    security_ptr = nil,
-    revs_limit = 1000,
-    uuid,
-    epochs,
-    compacted_seq
-}).
-
-
-new() ->
-    #db_header{
-        uuid = couch_uuids:random(),
-        epochs = [{node(), 0}]
-    }.
-
-
-from(Header0) ->
-    Header = upgrade(Header0),
-    #db_header{
-        uuid = Header#db_header.uuid,
-        epochs = Header#db_header.epochs,
-        compacted_seq = Header#db_header.compacted_seq
-    }.
-
-
-is_header(Header) ->
-    try
-        upgrade(Header),
-        true
-    catch _:_ ->
-        false
-    end.
-
-
-upgrade(Header) ->
-    Funs = [
-        fun upgrade_tuple/1,
-        fun upgrade_disk_version/1,
-        fun upgrade_uuid/1,
-        fun upgrade_epochs/1,
-        fun upgrade_compacted_seq/1
-    ],
-    lists:foldl(fun(F, HdrAcc) ->
-        F(HdrAcc)
-    end, Header, Funs).
-
-
-set(Header0, Fields) ->
-    % A subtlety here is that if a database was open during
-    % the release upgrade that updates to uuids and epochs then
-    % this dynamic upgrade also assigns a uuid and epoch.
-    Header = upgrade(Header0),
-    lists:foldl(fun({Field, Value}, HdrAcc) ->
-        set_field(HdrAcc, Field, Value)
-    end, Header, Fields).
-
-
-disk_version(Header) ->
-    get_field(Header, disk_version).
-
-
-update_seq(Header) ->
-    get_field(Header, update_seq).
-
-
-id_tree_state(Header) ->
-    get_field(Header, id_tree_state).
-
-
-seq_tree_state(Header) ->
-    get_field(Header, seq_tree_state).
-
-
-local_tree_state(Header) ->
-    get_field(Header, local_tree_state).
-
-
-purge_seq(Header) ->
-    get_field(Header, purge_seq).
-
-
-purged_docs(Header) ->
-    get_field(Header, purged_docs).
-
-
-security_ptr(Header) ->
-    get_field(Header, security_ptr).
-
-
-revs_limit(Header) ->
-    get_field(Header, revs_limit).
-
-
-uuid(Header) ->
-    get_field(Header, uuid).
-
-
-epochs(Header) ->
-    get_field(Header, epochs).
-
-
-compacted_seq(Header) ->
-    get_field(Header, compacted_seq).
-
-
-get_field(Header, Field) ->
-    Idx = index(Field),
-    case Idx > tuple_size(Header) of
-        true -> undefined;
-        false -> element(index(Field), Header)
-    end.
-
-
-set_field(Header, Field, Value) ->
-    setelement(index(Field), Header, Value).
-
-
-index(Field) ->
-    couch_util:get_value(Field, indexes()).
-
-
-indexes() ->
-    Fields = record_info(fields, db_header),
-    Indexes = lists:seq(2, record_info(size, db_header)),
-    lists:zip(Fields, Indexes).
-
-
-upgrade_tuple(Old) when is_record(Old, db_header) ->
-    Old;
-upgrade_tuple(Old) when is_tuple(Old) ->
-    NewSize = record_info(size, db_header),
-    if tuple_size(Old) < NewSize -> ok; true ->
-        erlang:error({invalid_header_size, Old})
-    end,
-    {_, New} = lists:foldl(fun(Val, {Idx, Hdr}) ->
-        {Idx+1, setelement(Idx, Hdr, Val)}
-    end, {1, #db_header{}}, tuple_to_list(Old)),
-    if is_record(New, db_header) -> ok; true ->
-        erlang:error({invalid_header_extension, {Old, New}})
-    end,
-    New.
-
--define(OLD_DISK_VERSION_ERROR,
-    "Database files from versions smaller than 0.10.0 are no longer supported").
-
-upgrade_disk_version(#db_header{}=Header) ->
-    case element(2, Header) of
-        1 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
-        2 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
-        3 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
-        4 -> Header#db_header{security_ptr = nil}; % [0.10 - 0.11)
-        5 -> Header; % pre 1.2
-        ?LATEST_DISK_VERSION -> Header;
-        _ ->
-            Reason = "Incorrect disk header version",
-            throw({database_disk_version_error, Reason})
-    end.
-
-
-upgrade_uuid(#db_header{}=Header) ->
-    case Header#db_header.uuid of
-        undefined ->
-            % Upgrading this old db file to a newer
-            % on disk format that includes a UUID.
-            Header#db_header{uuid=couch_uuids:random()};
-        _ ->
-            Header
-    end.
-
-
-upgrade_epochs(#db_header{}=Header) ->
-    NewEpochs = case Header#db_header.epochs of
-        undefined ->
-            % This node is taking over ownership of shard with
-            % and old version of couch file. Before epochs there
-            % was always an implicit assumption that a file was
-            % owned since eternity by the node it was on. This
-            % just codifies that assumption.
-            [{node(), 0}];
-        [{Node, _} | _] = Epochs0 when Node == node() ->
-            % Current node is the current owner of this db
-            Epochs0;
-        Epochs1 ->
-            % This node is taking over ownership of this db
-            % and marking the update sequence where it happened.
-            [{node(), Header#db_header.update_seq} | Epochs1]
-    end,
-    % Its possible for a node to open a db and claim
-    % ownership but never make a write to the db. This
-    % removes nodes that claimed ownership but never
-    % changed the database.
-    DedupedEpochs = remove_dup_epochs(NewEpochs),
-    Header#db_header{epochs=DedupedEpochs}.
-
-
-% This is slightly relying on the udpate_seq's being sorted
-% in epochs due to how we only ever push things onto the
-% front. Although if we ever had a case where the update_seq
-% is not monotonically increasing I don't know that we'd
-% want to remove dupes (by calling a sort on the input to this
-% function). So for now we don't sort but are relying on the
-% idea that epochs is always sorted.
-remove_dup_epochs([_]=Epochs) ->
-    Epochs;
-remove_dup_epochs([{N1, S}, {_N2, S}]) ->
-    % Seqs match, keep the most recent owner
-    [{N1, S}];
-remove_dup_epochs([_, _]=Epochs) ->
-    % Seqs don't match.
-    Epochs;
-remove_dup_epochs([{N1, S}, {_N2, S} | Rest]) ->
-    % Seqs match, keep the most recent owner
-    remove_dup_epochs([{N1, S} | Rest]);
-remove_dup_epochs([{N1, S1}, {N2, S2} | Rest]) ->
-    % Seqs don't match, recurse to check others
-    [{N1, S1} | remove_dup_epochs([{N2, S2} | Rest])].
-
-
-upgrade_compacted_seq(#db_header{}=Header) ->
-    case Header#db_header.compacted_seq of
-        undefined ->
-            Header#db_header{compacted_seq=0};
-        _ ->
-            Header
-    end.
-
-latest(?LATEST_DISK_VERSION) ->
-    true;
-latest(N) when is_integer(N), N < ?LATEST_DISK_VERSION ->
-    false;
-latest(_Else) ->
-    undefined.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-mk_header(Vsn) ->
-    {
-        db_header, % record name
-        Vsn, % disk version
-        100, % update_seq
-        0, % unused
-        foo, % id_tree_state
-        bar, % seq_tree_state
-        bam, % local_tree_state
-        1, % purge_seq
-        baz, % purged_docs
-        bang, % security_ptr
-        999 % revs_limit
-    }.
-
-
-upgrade_v3_test() ->
-    Vsn3Header = mk_header(3),
-    NewHeader = upgrade_tuple(Vsn3Header),
-
-    % Tuple upgrades don't change
-    ?assert(is_record(NewHeader, db_header)),
-    ?assertEqual(3, disk_version(NewHeader)),
-    ?assertEqual(100, update_seq(NewHeader)),
-    ?assertEqual(foo, id_tree_state(NewHeader)),
-    ?assertEqual(bar, seq_tree_state(NewHeader)),
-    ?assertEqual(bam, local_tree_state(NewHeader)),
-    ?assertEqual(1, purge_seq(NewHeader)),
-    ?assertEqual(baz, purged_docs(NewHeader)),
-    ?assertEqual(bang, security_ptr(NewHeader)),
-    ?assertEqual(999, revs_limit(NewHeader)),
-    ?assertEqual(undefined, uuid(NewHeader)),
-    ?assertEqual(undefined, epochs(NewHeader)),
-
-    ?assertThrow({database_disk_version_error, _},
-                 upgrade_disk_version(NewHeader)).
-
-
-upgrade_v5_test() ->
-    Vsn5Header = mk_header(5),
-    NewHeader = upgrade_disk_version(upgrade_tuple(Vsn5Header)),
-
-    ?assert(is_record(NewHeader, db_header)),
-    ?assertEqual(5, disk_version(NewHeader)),
-
-    % Security ptr isn't changed for v5 headers
-    ?assertEqual(bang, security_ptr(NewHeader)).
-
-
-upgrade_uuid_test() ->
-    Vsn5Header = mk_header(5),
-
-    % Upgraded headers get a new UUID
-    NewHeader = upgrade_uuid(upgrade_disk_version(upgrade_tuple(Vsn5Header))),
-    ?assertMatch(<<_:32/binary>>, uuid(NewHeader)),
-
-    % Headers with a UUID don't have their UUID changed
-    NewNewHeader = upgrade_uuid(upgrade_disk_version(upgrade_tuple(NewHeader))),
-    ?assertEqual(uuid(NewHeader), uuid(NewNewHeader)),
-
-    % Derived empty headers maintain the same UUID
-    ResetHeader = from(NewNewHeader),
-    ?assertEqual(uuid(NewHeader), uuid(ResetHeader)).
-
-
-upgrade_epochs_test() ->
-    Vsn5Header = mk_header(5),
-
-    % Upgraded headers get a default epochs set
-    NewHeader = upgrade(Vsn5Header),
-    ?assertEqual([{node(), 0}], epochs(NewHeader)),
-
-    % Fake an old entry in epochs
-    FakeFields = [
-        {update_seq, 20},
-        {epochs, [{'someothernode@someotherhost', 0}]}
-    ],
-    NotOwnedHeader = set(NewHeader, FakeFields),
-
-    OwnedEpochs = [
-        {node(), 20},
-        {'someothernode@someotherhost', 0}
-    ],
-
-    % Upgrading a header not owned by the local node updates
-    % the epochs appropriately.
-    NowOwnedHeader = upgrade(NotOwnedHeader),
-    ?assertEqual(OwnedEpochs, epochs(NowOwnedHeader)),
-
-    % Headers with epochs stay the same after upgrades
-    NewNewHeader = upgrade(NowOwnedHeader),
-    ?assertEqual(OwnedEpochs, epochs(NewNewHeader)),
-
-    % Getting a reset header maintains the epoch data
-    ResetHeader = from(NewNewHeader),
-    ?assertEqual(OwnedEpochs, epochs(ResetHeader)).
-
-
-get_uuid_from_old_header_test() ->
-    Vsn5Header = mk_header(5),
-    ?assertEqual(undefined, uuid(Vsn5Header)).
-
-
-get_epochs_from_old_header_test() ->
-    Vsn5Header = mk_header(5),
-    ?assertEqual(undefined, epochs(Vsn5Header)).
-
-
--endif.
diff --git a/src/couch/src/couch_db_int.hrl b/src/couch/src/couch_db_int.hrl
deleted file mode 100644
index 7da0ce5..0000000
--- a/src/couch/src/couch_db_int.hrl
+++ /dev/null
@@ -1,76 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
--record(db, {
-    vsn = 1,
-    name,
-    filepath,
-
-    engine = {couch_bt_engine, undefined},
-
-    main_pid = nil,
-    compactor_pid = nil,
-
-    committed_update_seq,
-
-    instance_start_time, % number of microsecs since jan 1 1970 as a binary string
-
-    user_ctx = #user_ctx{},
-    security = [],
-    validate_doc_funs = undefined,
-
-    before_doc_update = nil, % nil | fun(Doc, Db) -> NewDoc
-    after_doc_read = nil,    % nil | fun(Doc, Db) -> NewDoc
-
-    % feature removed in 3.x, but field kept to avoid changing db record size
-    % and breaking rolling cluster upgrade
-    waiting_delayed_commit_deprecated,
-
-    options = [],
-    compression
-}).
-
-
--define(OLD_DB_REC, {
-    db,
-    _, % MainPid
-    _, % CompactorPid
-    _, % InstanceStartTime
-    _, % Fd
-    _, % FdMonitor
-    _, % Header
-    _, % CommittedUpdateSeq
-    _, % IdTree
-    _, % SeqTree
-    _, % LocalTree
-    _, % UpdateSeq
-    _, % Name
-    _, % FilePath
-    _, % ValidateDocFuns
-    _, % Security
-    _, % SecurityPtr
-    _, % UserCtx
-    _, % WaitingDelayedCommit
-    _, % RevsLimit
-    _, % FsyncOptions
-    _, % Options
-    _, % Compression
-    _, % BeforeDocUpdate
-    _  % AfterDocRead
-}).
-
-
--define(OLD_DB_NAME(Db), element(2, Db)).
--define(OLD_DB_MAIN_PID(Db), element(13, Db)).
--define(OLD_DB_USER_CTX(Db), element(18, Db)).
--define(OLD_DB_SECURITY(Db), element(16, Db)).
diff --git a/src/couch/src/couch_db_plugin.erl b/src/couch/src/couch_db_plugin.erl
deleted file mode 100644
index c3684c6..0000000
--- a/src/couch/src/couch_db_plugin.erl
+++ /dev/null
@@ -1,96 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_plugin).
-
--export([
-    validate_dbname/3,
-    before_doc_update/3,
-    after_doc_read/2,
-    validate_docid/1,
-    check_is_admin/1,
-    is_valid_purge_client/2,
-    on_compact/2,
-    on_delete/2
-]).
-
--define(SERVICE_ID, couch_db).
-
--include_lib("couch/include/couch_db.hrl").
-
-%% ------------------------------------------------------------------
-%% API Function Definitions
-%% ------------------------------------------------------------------
-
-validate_dbname(DbName, Normalized, Default) ->
-    maybe_handle(validate_dbname, [DbName, Normalized], Default).
-
-before_doc_update(Db, Doc0, UpdateType) ->
-    Fun = couch_db:get_before_doc_update_fun(Db),
-    case with_pipe(before_doc_update, [Doc0, Db, UpdateType]) of
-        [Doc1, _Db, UpdateType1] when is_function(Fun) ->
-            Fun(Doc1, Db, UpdateType1);
-        [Doc1, _Db, _UpdateType] ->
-            Doc1
-    end.
-
-after_doc_read(Db, Doc0) ->
-    Fun = couch_db:get_after_doc_read_fun(Db),
-    case with_pipe(after_doc_read, [Doc0, Db]) of
-        [Doc1, _Db] when is_function(Fun) -> Fun(Doc1, Db);
-        [Doc1, _Db] -> Doc1
-    end.
-
-validate_docid(Id) ->
-    Handle = couch_epi:get_handle(?SERVICE_ID),
-    %% callbacks return true only if it specifically allow the given Id
-    couch_epi:any(Handle, ?SERVICE_ID, validate_docid, [Id], []).
-
-check_is_admin(Db) ->
-    Handle = couch_epi:get_handle(?SERVICE_ID),
-    %% callbacks return true only if it specifically allow the given Id
-    couch_epi:any(Handle, ?SERVICE_ID, check_is_admin, [Db], []).
-
-is_valid_purge_client(DbName, Props) ->
-    Handle = couch_epi:get_handle(?SERVICE_ID),
-    %% callbacks return true only if it specifically allow the given Id
-    couch_epi:any(Handle, ?SERVICE_ID, is_valid_purge_client, [DbName, Props], []).
-
-on_compact(DbName, DDocs) ->
-    Handle = couch_epi:get_handle(?SERVICE_ID),
-    couch_epi:apply(Handle, ?SERVICE_ID, on_compact, [DbName, DDocs], []).
-
-on_delete(DbName, Options) ->
-    Handle = couch_epi:get_handle(?SERVICE_ID),
-    couch_epi:apply(Handle, ?SERVICE_ID, on_delete, [DbName, Options], []).
-
-%% ------------------------------------------------------------------
-%% Internal Function Definitions
-%% ------------------------------------------------------------------
-
-with_pipe(Func, Args) ->
-    do_apply(Func, Args, [pipe]).
-
-do_apply(Func, Args, Opts) ->
-    Handle = couch_epi:get_handle(?SERVICE_ID),
-    couch_epi:apply(Handle, ?SERVICE_ID, Func, Args, Opts).
-
-maybe_handle(Func, Args, Default) ->
-    Handle = couch_epi:get_handle(?SERVICE_ID),
-    case couch_epi:decide(Handle, ?SERVICE_ID, Func, Args, []) of
-       no_decision when is_function(Default) ->
-           apply(Default, Args);
-       no_decision ->
-           Default;
-       {decided, Result} ->
-           Result
-    end.
diff --git a/src/couch/src/couch_db_split.erl b/src/couch/src/couch_db_split.erl
deleted file mode 100644
index 3a1f98d..0000000
--- a/src/couch/src/couch_db_split.erl
+++ /dev/null
@@ -1,503 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_split).
-
-
--export([
-    split/3,
-    copy_local_docs/3,
-    cleanup_target/2
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
-
-
--define(DEFAULT_BUFFER_SIZE, 16777216).
-
-
--record(state, {
-    source_db,
-    source_uuid,
-    targets,
-    pickfun,
-    max_buffer_size = ?DEFAULT_BUFFER_SIZE,
-    hashfun
-}).
-
--record(target, {
-    db,
-    uuid,
-    buffer = [],
-    buffer_size = 0
-}).
-
--record(racc, {
-    id,
-    source_db,
-    target_db,
-    active = 0,
-    external = 0,
-    atts = []
-}).
-
-
-% Public API
-
-split(Source, #{} = Targets, PickFun) when
-        map_size(Targets) >= 2, is_function(PickFun, 3) ->
-    case couch_db:open_int(Source, [?ADMIN_CTX]) of
-        {ok, SourceDb} ->
-            Engine = get_engine(SourceDb),
-            Partitioned = couch_db:is_partitioned(SourceDb),
-            HashFun = mem3_hash:get_hash_fun(couch_db:name(SourceDb)),
-            try
-                split(SourceDb, Partitioned, Engine, Targets, PickFun, HashFun)
-            catch
-                throw:{target_create_error, DbName, Error, TargetDbs} ->
-                    cleanup_targets(TargetDbs, Engine),
-                    {error, {target_create_error, DbName, Error}}
-            after
-                couch_db:close(SourceDb)
-            end;
-        {not_found, _} ->
-            {error, missing_source}
-    end.
-
-
-copy_local_docs(Source, #{} = Targets0, PickFun) when
-        is_binary(Source), is_function(PickFun, 3) ->
-    case couch_db:open_int(Source, [?ADMIN_CTX]) of
-        {ok, SourceDb} ->
-            try
-                Targets = maps:map(fun(_, DbName) ->
-                    {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
-                    #target{db = Db, uuid = couch_db:get_uuid(Db)}
-                end, Targets0),
-                SourceName = couch_db:name(SourceDb),
-                try
-                    State = #state{
-                        source_db = SourceDb,
-                        source_uuid = couch_db:get_uuid(SourceDb),
-                        targets = Targets,
-                        pickfun = PickFun,
-                        hashfun = mem3_hash:get_hash_fun(SourceName)
-                    },
-                    copy_local_docs(State),
-                    ok
-                after
-                    maps:map(fun(_, #target{db = Db} = T) ->
-                        couch_db:close(Db),
-                        T#target{db = undefined}
-                    end, Targets)
-                end
-            after
-                couch_db:close(SourceDb)
-            end;
-        {not_found, _} ->
-            {error, missing_source}
-    end.
-
-
-cleanup_target(Source, Target) when is_binary(Source), is_binary(Target) ->
-    case couch_db:open_int(Source, [?ADMIN_CTX]) of
-        {ok, SourceDb} ->
-            try
-                delete_target(Target, get_engine(SourceDb))
-            after
-                couch_db:close(SourceDb)
-            end;
-        {not_found, _} ->
-            {error, missing_source}
-    end.
-
-
-% Private Functions
-
-split(SourceDb, Partitioned, Engine, Targets0, PickFun, {M, F, A} = HashFun) ->
-    Targets = maps:fold(fun(Key, DbName, Map) ->
-        case couch_db:validate_dbname(DbName) of
-            ok ->
-                ok;
-            {error, E} ->
-                throw({target_create_error, DbName, E, Map})
-        end,
-        case couch_server:lock(DbName, <<"shard splitting">>) of
-            ok ->
-                ok;
-            {error, Err} ->
-                throw({target_create_error, DbName, Err, Map})
-        end,
-        {ok, Filepath} = couch_server:get_engine_path(DbName, Engine),
-        Opts = [create, ?ADMIN_CTX] ++ case Partitioned of
-            true -> [{props, [{partitioned, true}, {hash, [M, F, A]}]}];
-            false -> []
-        end,
-        case couch_db:start_link(Engine, DbName, Filepath, Opts) of
-            {ok, Db} ->
-                Map#{Key => #target{db = Db}};
-            {error, Error} ->
-                throw({target_create_error, DbName, Error, Map})
-        end
-    end, #{}, Targets0),
-    Seq = couch_db:get_update_seq(SourceDb),
-    State1 = #state{
-        source_db = SourceDb,
-        targets = Targets,
-        pickfun = PickFun,
-        hashfun = HashFun,
-        max_buffer_size = get_max_buffer_size()
-    },
-    State2 = copy_docs(State1),
-    State3 = copy_checkpoints(State2),
-    State4 = copy_meta(State3),
-    State5 = copy_purge_info(State4),
-    State6 = set_targets_update_seq(State5),
-    stop_targets(State6#state.targets),
-    {ok, Seq}.
-
-
-cleanup_targets(#{} = Targets, Engine) ->
-    maps:map(fun(_, #target{db = Db} = T) ->
-        ok = stop_target_db(Db),
-        DbName = couch_db:name(Db),
-        delete_target(DbName, Engine),
-        couch_server:unlock(DbName),
-        T
-    end, Targets).
-
-
-stop_targets(#{} = Targets) ->
-    maps:map(fun(_, #target{db = Db} = T) ->
-        {ok, Db1} = couch_db_engine:commit_data(Db),
-        ok = stop_target_db(Db1),
-        T
-    end, Targets).
-
-
-stop_target_db(Db) ->
-    couch_db:close(Db),
-    Pid = couch_db:get_pid(Db),
-    catch unlink(Pid),
-    catch exit(Pid, kill),
-    couch_server:unlock(couch_db:name(Db)),
-    ok.
-
-
-delete_target(DbName, Engine) ->
-    RootDir = config:get("couchdb", "database_dir", "."),
-    {ok, Filepath} = couch_server:get_engine_path(DbName, Engine),
-    DelOpt = [{context, compaction}, sync],
-    couch_db_engine:delete(Engine, RootDir, Filepath, DelOpt).
-
-
-pick_target(DocId, #state{} = State, #{} = Targets) ->
-    #state{pickfun = PickFun, hashfun = HashFun} = State,
-    Key = PickFun(DocId, maps:keys(Targets), HashFun),
-    {Key, maps:get(Key, Targets)}.
-
-
-set_targets_update_seq(#state{targets = Targets} = State) ->
-    Seq = couch_db:get_update_seq(State#state.source_db),
-    Targets1 = maps:map(fun(_, #target{db = Db} = Target) ->
-        {ok, Db1} = couch_db_engine:set_update_seq(Db, Seq),
-        Target#target{db = Db1}
-    end, Targets),
-    State#state{targets = Targets1}.
-
-
-copy_checkpoints(#state{} = State) ->
-    #state{source_db = Db, source_uuid = SrcUUID, targets = Targets} = State,
-    FoldFun = fun(#doc{id = Id} = Doc, Acc) ->
-        UpdatedAcc = case Id of
-            <<?LOCAL_DOC_PREFIX, "shard-sync-", _/binary>> ->
-                % Transform mem3 internal replicator checkpoints to avoid
-                % rewinding the changes feed when it sees the new shards
-                maps:map(fun(_, #target{uuid = TgtUUID, buffer = Docs} = T) ->
-                    Doc1 = update_checkpoint_doc(SrcUUID, TgtUUID, Doc),
-                    T#target{buffer = [Doc1 | Docs]}
-                end, Acc);
-            <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> ->
-                % Copy purge checkpoints to all shards
-                maps:map(fun(_, #target{buffer = Docs} = T) ->
-                    T#target{buffer = [Doc | Docs]}
-                end, Acc);
-            <<?LOCAL_DOC_PREFIX, _/binary>> ->
-                % Skip copying these that will be done during
-                % local docs top off right before the shards are switched
-                Acc
-        end,
-        {ok, UpdatedAcc}
-    end,
-    {ok, Targets1} = couch_db_engine:fold_local_docs(Db, FoldFun, Targets, []),
-    Targets2 = maps:map(fun(_, #target{db = TDb, buffer = Docs} = T) ->
-        case Docs of
-            [] ->
-                T;
-            [_ | _] ->
-                Docs1 = lists:reverse(Docs),
-                {ok, TDb1} = couch_db_engine:write_doc_infos(TDb, [], Docs1),
-                {ok, TDb2} = couch_db_engine:commit_data(TDb1),
-                T#target{db = TDb2, buffer = []}
-        end
-    end, Targets1),
-    State#state{targets = Targets2}.
-
-
-update_checkpoint_doc(Old, New, #doc{body = {Props}} = Doc) ->
-    NewProps = case couch_util:get_value(<<"target_uuid">>, Props) of
-        Old ->
-            replace_kv(Props, {<<"target_uuid">>, Old, New});
-        Other when is_binary(Other) ->
-            replace_kv(Props, {<<"source_uuid">>, Old, New})
-    end,
-    NewId = update_checkpoint_id(Doc#doc.id, Old, New),
-    Doc#doc{id = NewId, body = {NewProps}}.
-
-
-update_checkpoint_id(Id, Old, New) ->
-    OldHash = mem3_rep:local_id_hash(Old),
-    NewHash = mem3_rep:local_id_hash(New),
-    binary:replace(Id, OldHash, NewHash).
-
-
-replace_kv({[]}, _) ->
-    {[]};
-replace_kv({KVs}, Replacement) ->
-    {[replace_kv(KV, Replacement) || KV <- KVs]};
-replace_kv([], _) ->
-    [];
-replace_kv(List, Replacement) when is_list(List) ->
-    [replace_kv(V, Replacement) || V <- List];
-replace_kv({K, V}, {K, V, NewV}) ->
-    {K, NewV};
-replace_kv({K, V}, Replacement) ->
-    {K, replace_kv(V, Replacement)};
-replace_kv(V, _) ->
-    V.
-
-
-copy_meta(#state{source_db = SourceDb, targets = Targets} = State) ->
-    RevsLimit = couch_db:get_revs_limit(SourceDb),
-    {SecProps} = couch_db:get_security(SourceDb),
-    PurgeLimit = couch_db:get_purge_infos_limit(SourceDb),
-    Targets1 = maps:map(fun(_, #target{db = Db} = T) ->
-        {ok, Db1} = couch_db_engine:set_revs_limit(Db, RevsLimit),
-        {ok, Db2} = couch_db_engine:set_security(Db1, SecProps),
-        {ok, Db3} = couch_db_engine:set_purge_infos_limit(Db2, PurgeLimit),
-        T#target{db = Db3}
-    end, Targets),
-    State#state{targets = Targets1}.
-
-
-copy_purge_info(#state{source_db = Db} = State) ->
-    {ok, NewState} = couch_db:fold_purge_infos(Db, 0, fun purge_cb/2, State),
-    Targets = maps:map(fun(_, #target{} = T) ->
-        commit_purge_infos(T)
-    end, NewState#state.targets),
-    NewState#state{targets = Targets}.
-
-
-acc_and_flush(Item, #target{}= Target, MaxBuffer, FlushCb) ->
-    #target{buffer = Buffer, buffer_size = BSize} = Target,
-    BSize1 = BSize + ?term_size(Item),
-    Target1 = Target#target{buffer = [Item | Buffer], buffer_size = BSize1},
-    case BSize1 > MaxBuffer of
-        true -> FlushCb(Target1);
-        false -> Target1
-    end.
-
-
-purge_cb({_PSeq, _UUID, Id, _Revs} = PI, #state{targets = Targets} = State) ->
-    {Key, Target} = pick_target(Id, State, Targets),
-    MaxBuffer = State#state.max_buffer_size,
-    Target1 = acc_and_flush(PI, Target, MaxBuffer, fun commit_purge_infos/1),
-    {ok, State#state{targets = Targets#{Key => Target1}}}.
-
-
-commit_purge_infos(#target{buffer = [], db = Db} = Target) ->
-    Target#target{db = Db};
-
-commit_purge_infos(#target{buffer = PIs0, db = Db} = Target) ->
-    PIs = lists:reverse(PIs0),
-    {ok, Db1} = couch_db_engine:copy_purge_infos(Db, PIs),
-    {ok, Db2} = couch_db_engine:commit_data(Db1),
-    Target#target{buffer = [], buffer_size = 0, db = Db2}.
-
-
-copy_docs(#state{source_db = Db} = State) ->
-    {ok, NewState} = couch_db:fold_changes(Db, 0, fun changes_cb/2, State),
-    CommitTargets = maps:map(fun(_, #target{} = T) ->
-        commit_docs(T)
-    end, NewState#state.targets),
-    NewState#state{targets = CommitTargets}.
-
-
-% Backwards compatibility clause. Seq trees used to hold #doc_infos at one time
-changes_cb(#doc_info{id = Id}, #state{source_db = Db} = State) ->
-    [FDI = #full_doc_info{}] = couch_db_engine:open_docs(Db, [Id]),
-    changes_cb(FDI, State);
-
-changes_cb(#full_doc_info{id = Id} = FDI, #state{} = State) ->
-    #state{source_db = SourceDb, targets = Targets} = State,
-    {Key, Target} = pick_target(Id, State, Targets),
-    FDI1 = process_fdi(FDI, SourceDb, Target#target.db),
-    MaxBuffer = State#state.max_buffer_size,
-    Target1 = acc_and_flush(FDI1, Target, MaxBuffer, fun commit_docs/1),
-    {ok, State#state{targets = Targets#{Key => Target1}}}.
-
-
-commit_docs(#target{buffer = [], db = Db} = Target) ->
-    Target#target{db = Db};
-
-commit_docs(#target{buffer = FDIs, db = Db} = Target) ->
-    Pairs = [{not_found, FDI} || FDI <- lists:reverse(FDIs)],
-    {ok, Db1} = couch_db_engine:write_doc_infos(Db, Pairs, []),
-    {ok, Db2} = couch_db_engine:commit_data(Db1),
-    Target#target{buffer = [], buffer_size = 0, db = Db2}.
-
-
-process_fdi(FDI, SourceDb, TargetDb) ->
-    #full_doc_info{id = Id, rev_tree = RTree} = FDI,
-    Acc = #racc{id = Id, source_db = SourceDb, target_db = TargetDb},
-    {NewRTree, NewAcc} = couch_key_tree:mapfold(fun revtree_cb/4, Acc, RTree),
-    {Active, External} = total_sizes(NewAcc),
-    FDI#full_doc_info{
-        rev_tree = NewRTree,
-        sizes = #size_info{active = Active, external = External}
-    }.
-
-
-revtree_cb(_Rev, _Leaf, branch, Acc) ->
-    {[], Acc};
-
-revtree_cb({Pos, RevId}, Leaf, leaf, Acc) ->
-    #racc{id = Id, source_db = SourceDb, target_db = TargetDb} = Acc,
-    #leaf{deleted = Deleted, ptr = Ptr, sizes = LeafSizes} = Leaf,
-    Doc0 = #doc{
-        id = Id,
-        revs = {Pos, [RevId]},
-        deleted = Deleted,
-        body = Ptr
-    },
-    Doc1 = couch_db_engine:read_doc_body(SourceDb, Doc0),
-    #doc{body = Body, atts = AttInfos0} = Doc1,
-    External = case LeafSizes#size_info.external of
-        0 when is_binary(Body) ->
-            couch_compress:uncompressed_size(Body);
-        0 ->
-            couch_ejson_size:encoded_size(Body);
-        N -> N
-    end,
-    AttInfos = if not is_binary(AttInfos0) -> AttInfos0; true ->
-        couch_compress:decompress(AttInfos0)
-    end,
-    Atts = [process_attachment(Att, SourceDb, TargetDb) || Att <- AttInfos],
-    Doc2 = Doc1#doc{atts = Atts},
-    Doc3 = couch_db_engine:serialize_doc(TargetDb, Doc2),
-    {ok, Doc4, Active} = couch_db_engine:write_doc_body(TargetDb, Doc3),
-    % element(3,...) and (4,...) are the stream pointer and size respecitively
-    % (see couch_att.erl) They are numeric for compatibility with older formats
-    AttSizes = [{element(3, A), element(4, A)} || A <- Atts],
-    NewLeaf = Leaf#leaf{
-        ptr = Doc4#doc.body,
-        sizes = #size_info{active = Active, external = External},
-        atts = AttSizes
-    },
-    {NewLeaf, add_sizes(Active, External, AttSizes, Acc)}.
-
-
-% This is copied almost verbatim from the compactor
-process_attachment({Name, Type, BinSp, AttLen, RevPos, ExpectedMd5}, SourceDb,
-        TargetDb) ->
-    % 010 upgrade code
-    {ok, SrcStream} = couch_db_engine:open_read_stream(SourceDb, BinSp),
-    {ok, DstStream} = couch_db_engine:open_write_stream(TargetDb, []),
-    ok = couch_stream:copy(SrcStream, DstStream),
-    {NewStream, AttLen, AttLen, ActualMd5, _IdentityMd5} =
-            couch_stream:close(DstStream),
-    {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
-    couch_util:check_md5(ExpectedMd5, ActualMd5),
-    {Name, Type, NewBinSp, AttLen, AttLen, RevPos, ExpectedMd5, identity};
-
-process_attachment({Name, Type, BinSp, AttLen, DiskLen, RevPos, ExpectedMd5,
-        Enc1}, SourceDb, TargetDb) ->
-    {ok, SrcStream} = couch_db_engine:open_read_stream(SourceDb, BinSp),
-    {ok, DstStream} = couch_db_engine:open_write_stream(TargetDb, []),
-    ok = couch_stream:copy(SrcStream, DstStream),
-    {NewStream, AttLen, _, ActualMd5, _IdentityMd5} =
-            couch_stream:close(DstStream),
-    {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
-    couch_util:check_md5(ExpectedMd5, ActualMd5),
-    Enc = case Enc1 of
-        true -> gzip;  % 0110 upgrade code
-        false -> identity;  % 0110 upgrade code
-        _ -> Enc1
-    end,
-    {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc}.
-
-
-get_engine(Db) ->
-    {ok, DbInfoProps} = couch_db:get_db_info(Db),
-    proplists:get_value(engine, DbInfoProps).
-
-
-add_sizes(Active, External, Atts, #racc{} = Acc) ->
-    #racc{active = ActiveAcc, external = ExternalAcc, atts = AttsAcc} = Acc,
-    NewActiveAcc = ActiveAcc + Active,
-    NewExternalAcc = ExternalAcc + External,
-    NewAttsAcc = lists:umerge(Atts, AttsAcc),
-    Acc#racc{
-        active = NewActiveAcc,
-        external = NewExternalAcc,
-        atts = NewAttsAcc
-    }.
-
-
-total_sizes(#racc{active = Active, external = External, atts = Atts}) ->
-    TotalAtts = lists:foldl(fun({_, S}, A) -> S + A end, 0, Atts),
-    {Active + TotalAtts, External + TotalAtts}.
-
-
-get_max_buffer_size() ->
-    config:get_integer("reshard", "split_buffer_size", ?DEFAULT_BUFFER_SIZE).
-
-
-copy_local_docs(#state{source_db = Db, targets = Targets} = State) ->
-    FoldFun = fun(#doc{id = Id} = Doc, Acc) ->
-        UpdatedAcc = case Id of
-            <<?LOCAL_DOC_PREFIX, "shard-sync-", _/binary>> ->
-                Acc;
-            <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> ->
-                Acc;
-            <<?LOCAL_DOC_PREFIX, _/binary>> ->
-                % Users' and replicator app's checkpoints go to their
-                % respective shards based on the general hashing algorithm
-                {Key, Target} = pick_target(Id, State, Acc),
-                #target{buffer = Docs} = Target,
-                Acc#{Key => Target#target{buffer = [Doc | Docs]}}
-        end,
-        {ok, UpdatedAcc}
-    end,
-    {ok, Targets1} = couch_db:fold_local_docs(Db, FoldFun, Targets, []),
-    Targets2 = maps:map(fun(_, #target{db = TDb, buffer = Docs} = T) ->
-        case Docs of
-            [] ->
-                T;
-            [_ | _] ->
-                Docs1 = lists:reverse(Docs),
-                {ok, _} = couch_db:update_docs(TDb, Docs1),
-                T#target{buffer = []}
-        end
-    end, Targets1),
-    State#state{targets = Targets2}.
diff --git a/src/couch/src/couch_db_updater.erl b/src/couch/src/couch_db_updater.erl
deleted file mode 100644
index 1ca804c..0000000
--- a/src/couch/src/couch_db_updater.erl
+++ /dev/null
@@ -1,955 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_updater).
--behaviour(gen_server).
--vsn(1).
-
--export([add_sizes/3, upgrade_sizes/1]).
--export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]).
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_db_int.hrl").
-
--define(IDLE_LIMIT_DEFAULT, 61000).
--define(DEFAULT_MAX_PARTITION_SIZE, 16#280000000). % 10 GiB
-
-
--record(merge_acc, {
-    revs_limit,
-    merge_conflicts,
-    add_infos = [],
-    rem_seqs = [],
-    cur_seq,
-    full_partitions = []
-}).
-
-
-init({Engine, DbName, FilePath, Options0}) ->
-    erlang:put(io_priority, {db_update, DbName}),
-    update_idle_limit_from_config(),
-    DefaultSecObj = default_security_object(DbName),
-    Options = [{default_security_object, DefaultSecObj} | Options0],
-    try
-        {ok, EngineState} = couch_db_engine:init(Engine, FilePath, Options),
-        Db = init_db(DbName, FilePath, EngineState, Options),
-        case lists:member(sys_db, Options) of
-            false ->
-                couch_stats_process_tracker:track([couchdb, open_databases]);
-            true ->
-                ok
-        end,
-        % Don't load validation funs here because the fabric query is
-        % liable to race conditions. Instead see
-        % couch_db:validate_doc_update, which loads them lazily.
-        NewDb = Db#db{main_pid = self()},
-        proc_lib:init_ack({ok, NewDb}),
-        gen_server:enter_loop(?MODULE, [], NewDb, idle_limit())
-    catch
-        throw:InitError ->
-            proc_lib:init_ack(InitError)
-    end.
-
-
-terminate(Reason, Db) ->
-    couch_util:shutdown_sync(Db#db.compactor_pid),
-    couch_db_engine:terminate(Reason, Db),
-    ok.
-
-handle_call(get_db, _From, Db) ->
-    {reply, {ok, Db}, Db, idle_limit()};
-handle_call(start_compact, _From, Db) ->
-    {noreply, NewDb, _Timeout} = handle_cast(start_compact, Db),
-    {reply, {ok, NewDb#db.compactor_pid}, NewDb, idle_limit()};
-handle_call(compactor_pid, _From, #db{compactor_pid = Pid} = Db) ->
-    {reply, Pid, Db, idle_limit()};
-handle_call(cancel_compact, _From, #db{compactor_pid = nil} = Db) ->
-    {reply, ok, Db, idle_limit()};
-handle_call(cancel_compact, _From, #db{compactor_pid = Pid} = Db) ->
-    unlink(Pid),
-    exit(Pid, kill),
-    couch_server:delete_compaction_files(Db#db.name),
-    Db2 = Db#db{compactor_pid = nil},
-    ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
-    {reply, ok, Db2, idle_limit()};
-
-handle_call({set_security, NewSec}, _From, #db{} = Db) ->
-    {ok, NewDb} = couch_db_engine:set_security(Db, NewSec),
-    NewSecDb = commit_data(NewDb#db{
-        security = NewSec
-    }),
-    ok = gen_server:call(couch_server, {db_updated, NewSecDb}, infinity),
-    {reply, ok, NewSecDb, idle_limit()};
-
-handle_call({set_revs_limit, Limit}, _From, Db) ->
-    {ok, Db2} = couch_db_engine:set_revs_limit(Db, Limit),
-    Db3 = commit_data(Db2),
-    ok = gen_server:call(couch_server, {db_updated, Db3}, infinity),
-    {reply, ok, Db3, idle_limit()};
-
-handle_call({set_purge_infos_limit, Limit}, _From, Db) ->
-    {ok, Db2} = couch_db_engine:set_purge_infos_limit(Db, Limit),
-    ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
-    {reply, ok, Db2, idle_limit()};
-
-handle_call({purge_docs, [], _}, _From, Db) ->
-    {reply, {ok, []}, Db, idle_limit()};
-
-handle_call({purge_docs, PurgeReqs0, Options}, _From, Db) ->
-    % Filter out any previously applied updates during
-    % internal replication
-    IsRepl = lists:member(replicated_changes, Options),
-    PurgeReqs = if not IsRepl -> PurgeReqs0; true ->
-        UUIDs = [UUID || {UUID, _Id, _Revs} <- PurgeReqs0],
-        PurgeInfos = couch_db_engine:load_purge_infos(Db, UUIDs),
-        lists:flatmap(fun
-            ({not_found, PReq}) -> [PReq];
-            ({{_, _, _, _}, _}) -> []
-        end, lists:zip(PurgeInfos, PurgeReqs0))
-    end,
-    {ok, NewDb, Replies} = purge_docs(Db, PurgeReqs),
-    {reply, {ok, Replies}, NewDb, idle_limit()};
-
-handle_call(Msg, From, Db) ->
-    case couch_db_engine:handle_db_updater_call(Msg, From, Db) of
-        {reply, Resp, NewDb} ->
-            {reply, Resp, NewDb, idle_limit()};
-        Else ->
-            Else
-    end.
-
-
-handle_cast({load_validation_funs, ValidationFuns}, Db) ->
-    Db2 = Db#db{validate_doc_funs = ValidationFuns},
-    ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
-    {noreply, Db2, idle_limit()};
-handle_cast(start_compact, Db) ->
-    case Db#db.compactor_pid of
-        nil ->
-            % For now we only support compacting to the same
-            % storage engine. After the first round of patches
-            % we'll add a field that sets the target engine
-            % type to compact to with a new copy compactor.
-            UpdateSeq = couch_db_engine:get_update_seq(Db),
-            Args = [Db#db.name, UpdateSeq],
-            couch_log:info("Starting compaction for db \"~s\" at ~p", Args),
-            {ok, Db2} = couch_db_engine:start_compaction(Db),
-            ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
-            {noreply, Db2, idle_limit()};
-        _ ->
-            % compact currently running, this is a no-op
-            {noreply, Db, idle_limit()}
-    end;
-handle_cast({compact_done, _Engine, CompactInfo}, #db{} = OldDb) ->
-    {ok, NewDb} = couch_db_engine:finish_compaction(OldDb, CompactInfo),
-    {noreply, NewDb};
-
-handle_cast(wakeup, Db) ->
-    {noreply, Db, idle_limit()};
-
-handle_cast(Msg, #db{name = Name} = Db) ->
-    couch_log:error("Database `~s` updater received unexpected cast: ~p",
-                    [Name, Msg]),
-    {stop, Msg, Db}.
-
-
-handle_info({update_docs, Client, GroupedDocs, NonRepDocs, MergeConflicts},
-        Db) ->
-    GroupedDocs2 = sort_and_tag_grouped_docs(Client, GroupedDocs),
-    if NonRepDocs == [] ->
-        {GroupedDocs3, Clients} = collect_updates(GroupedDocs2,
-                [Client], MergeConflicts);
-    true ->
-        GroupedDocs3 = GroupedDocs2,
-        Clients = [Client]
-    end,
-    NonRepDocs2 = [{Client, NRDoc} || NRDoc <- NonRepDocs],
-    try update_docs_int(Db, GroupedDocs3, NonRepDocs2, MergeConflicts) of
-    {ok, Db2, UpdatedDDocIds} ->
-        ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
-        case {couch_db:get_update_seq(Db), couch_db:get_update_seq(Db2)} of
-            {Seq, Seq} -> ok;
-            _ -> couch_event:notify(Db2#db.name, updated)
-        end,
-        if NonRepDocs2 /= [] ->
-            couch_event:notify(Db2#db.name, local_updated);
-        true -> ok
-        end,
-        [catch(ClientPid ! {done, self()}) || ClientPid <- Clients],
-        Db3 = case length(UpdatedDDocIds) > 0 of
-            true ->
-                % Ken and ddoc_cache are the only things that
-                % use the unspecified ddoc_updated message. We
-                % should update them to use the new message per
-                % ddoc.
-                lists:foreach(fun(DDocId) ->
-                    couch_event:notify(Db2#db.name, {ddoc_updated, DDocId})
-                end, UpdatedDDocIds),
-                couch_event:notify(Db2#db.name, ddoc_updated),
-                ddoc_cache:refresh(Db2#db.name, UpdatedDDocIds),
-                refresh_validate_doc_funs(Db2);
-            false ->
-                Db2
-        end,
-        {noreply, Db3, hibernate_if_no_idle_limit()}
-    catch
-        throw: retry ->
-            [catch(ClientPid ! {retry, self()}) || ClientPid <- Clients],
-            {noreply, Db, hibernate_if_no_idle_limit()}
-    end;
-handle_info({'EXIT', _Pid, normal}, Db) ->
-    {noreply, Db, idle_limit()};
-handle_info({'EXIT', _Pid, Reason}, Db) ->
-    {stop, Reason, Db};
-handle_info(timeout, #db{name=DbName} = Db) ->
-    IdleLimitMSec = update_idle_limit_from_config(),
-    case couch_db:is_idle(Db) of
-        true ->
-            LastActivity = couch_db_engine:last_activity(Db),
-            DtMSec = timer:now_diff(os:timestamp(), LastActivity) div 1000,
-            MSecSinceLastActivity = max(0, DtMSec),
-            case MSecSinceLastActivity > IdleLimitMSec of
-                true ->
-                    ok = couch_server:close_db_if_idle(DbName);
-                false ->
-                    ok
-            end;
-        false ->
-            ok
-    end,
-    % Send a message to wake up and then hibernate. Hibernation here is done to
-    % force a thorough garbage collection.
-    gen_server:cast(self(), wakeup),
-    {noreply, Db, hibernate};
-
-handle_info(Msg, Db) ->
-    case couch_db_engine:handle_db_updater_info(Msg, Db) of
-        {noreply, NewDb} ->
-            {noreply, NewDb, idle_limit()};
-        Else ->
-            Else
-    end.
-
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-sort_and_tag_grouped_docs(Client, GroupedDocs) ->
-    % These groups should already be sorted but sometimes clients misbehave.
-    % The merge_updates function will fail and the database can end up with
-    % duplicate documents if the incoming groups are not sorted, so as a sanity
-    % check we sort them again here. See COUCHDB-2735.
-    Cmp = fun([#doc{id=A}|_], [#doc{id=B}|_]) -> A < B end,
-    lists:map(fun(DocGroup) ->
-        [{Client, maybe_tag_doc(D)} || D <- DocGroup]
-    end, lists:sort(Cmp, GroupedDocs)).
-
-maybe_tag_doc(#doc{id=Id, revs={Pos,[_Rev|PrevRevs]}, meta=Meta0}=Doc) ->
-    case lists:keymember(ref, 1, Meta0) of
-        true ->
-            Doc;
-        false ->
-            Key = {Id, {Pos-1, PrevRevs}},
-            Doc#doc{meta=[{ref, Key} | Meta0]}
-    end.
-
-merge_updates([[{_,#doc{id=X}}|_]=A|RestA], [[{_,#doc{id=X}}|_]=B|RestB]) ->
-    [A++B | merge_updates(RestA, RestB)];
-merge_updates([[{_,#doc{id=X}}|_]|_]=A, [[{_,#doc{id=Y}}|_]|_]=B) when X < Y ->
-    [hd(A) | merge_updates(tl(A), B)];
-merge_updates([[{_,#doc{id=X}}|_]|_]=A, [[{_,#doc{id=Y}}|_]|_]=B) when X > Y ->
-    [hd(B) | merge_updates(A, tl(B))];
-merge_updates([], RestB) ->
-    RestB;
-merge_updates(RestA, []) ->
-    RestA.
-
-collect_updates(GroupedDocsAcc, ClientsAcc, MergeConflicts) ->
-    receive
-        % Only collect updates with the same MergeConflicts flag and without
-        % local docs. It's easier to just avoid multiple _local doc
-        % updaters than deal with their possible conflicts, and local docs
-        % writes are relatively rare. Can be optmized later if really needed.
-        {update_docs, Client, GroupedDocs, [], MergeConflicts} ->
-            GroupedDocs2 = sort_and_tag_grouped_docs(Client, GroupedDocs),
-            GroupedDocsAcc2 =
-                merge_updates(GroupedDocsAcc, GroupedDocs2),
-            collect_updates(GroupedDocsAcc2, [Client | ClientsAcc],
-                    MergeConflicts)
-    after 0 ->
-        {GroupedDocsAcc, ClientsAcc}
-    end.
-
-
-init_db(DbName, FilePath, EngineState, Options) ->
-    % convert start time tuple to microsecs and store as a binary string
-    {MegaSecs, Secs, MicroSecs} = os:timestamp(),
-    StartTime = ?l2b(io_lib:format("~p",
-            [(MegaSecs*1000000*1000000) + (Secs*1000000) + MicroSecs])),
-
-    BDU = couch_util:get_value(before_doc_update, Options, nil),
-    ADR = couch_util:get_value(after_doc_read, Options, nil),
-
-    NonCreateOpts = [Opt || Opt <- Options, Opt /= create],
-
-    InitDb = #db{
-        name = DbName,
-        filepath = FilePath,
-        engine = EngineState,
-        instance_start_time = StartTime,
-        options = NonCreateOpts,
-        before_doc_update = BDU,
-        after_doc_read = ADR
-    },
-
-    DbProps = couch_db_engine:get_props(InitDb),
-
-    InitDb#db{
-        committed_update_seq = couch_db_engine:get_update_seq(InitDb),
-        security = couch_db_engine:get_security(InitDb),
-        options = lists:keystore(props, 1, NonCreateOpts, {props, DbProps})
-    }.
-
-
-refresh_validate_doc_funs(#db{name = <<"shards/", _/binary>> = Name} = Db) ->
-    spawn(fabric, reset_validation_funs, [mem3:dbname(Name)]),
-    Db#db{validate_doc_funs = undefined};
-refresh_validate_doc_funs(Db0) ->
-    Db = Db0#db{user_ctx=?ADMIN_USER},
-    {ok, DesignDocs} = couch_db:get_design_docs(Db),
-    ProcessDocFuns = lists:flatmap(
-        fun(DesignDocInfo) ->
-            {ok, DesignDoc} = couch_db:open_doc_int(
-                Db, DesignDocInfo, [ejson_body]),
-            case couch_doc:get_validate_doc_fun(DesignDoc) of
-            nil -> [];
-            Fun -> [Fun]
-            end
-        end, DesignDocs),
-    Db#db{validate_doc_funs=ProcessDocFuns}.
-
-% rev tree functions
-
-flush_trees(_Db, [], AccFlushedTrees) ->
-    {ok, lists:reverse(AccFlushedTrees)};
-flush_trees(#db{} = Db,
-        [InfoUnflushed | RestUnflushed], AccFlushed) ->
-    #full_doc_info{update_seq=UpdateSeq, rev_tree=Unflushed} = InfoUnflushed,
-    {Flushed, FinalAcc} = couch_key_tree:mapfold(
-        fun(_Rev, Value, Type, SizesAcc) ->
-            case Value of
-                % This node is a document summary that needs to be
-                % flushed to disk.
-                #doc{} = Doc ->
-                    check_doc_atts(Db, Doc),
-                    ExternalSize = get_meta_body_size(Value#doc.meta),
-                    {size_info, AttSizeInfo} =
-                            lists:keyfind(size_info, 1, Doc#doc.meta),
-                    {ok, NewDoc, WrittenSize} =
-                            couch_db_engine:write_doc_body(Db, Doc),
-                    Leaf = #leaf{
-                        deleted = Doc#doc.deleted,
-                        ptr = NewDoc#doc.body,
-                        seq = UpdateSeq,
-                        sizes = #size_info{
-                            active = WrittenSize,
-                            external = ExternalSize
-                        },
-                        atts = AttSizeInfo
-                    },
-                    {Leaf, add_sizes(Type, Leaf, SizesAcc)};
-                #leaf{} ->
-                    {Value, add_sizes(Type, Value, SizesAcc)};
-                _ ->
-                    {Value, SizesAcc}
-            end
-        end, {0, 0, []}, Unflushed),
-    {FinalAS, FinalES, FinalAtts} = FinalAcc,
-    TotalAttSize = lists:foldl(fun({_, S}, A) -> S + A end, 0, FinalAtts),
-    NewInfo = InfoUnflushed#full_doc_info{
-        rev_tree = Flushed,
-        sizes = #size_info{
-            active = FinalAS + TotalAttSize,
-            external = FinalES + TotalAttSize
-        }
-    },
-    flush_trees(Db, RestUnflushed, [NewInfo | AccFlushed]).
-
-
-check_doc_atts(Db, Doc) ->
-    {atts_stream, Stream} = lists:keyfind(atts_stream, 1, Doc#doc.meta),
-    % Make sure that the attachments were written to the currently
-    % active attachment stream. If compaction swaps during a write
-    % request we may have to rewrite our attachment bodies.
-    if Stream == nil -> ok; true ->
-        case couch_db:is_active_stream(Db, Stream) of
-            true ->
-                ok;
-            false ->
-                % Stream where the attachments were written to is
-                % no longer the current attachment stream. This
-                % can happen when a database is switched at
-                % compaction time.
-                couch_log:debug("Stream where the attachments were"
-                                " written has changed."
-                                " Possibly retrying.", []),
-                throw(retry)
-        end
-    end.
-
-
-add_sizes(Type, #leaf{sizes=Sizes, atts=AttSizes}, Acc) ->
-    % Maybe upgrade from disk_size only
-    #size_info{
-        active = ActiveSize,
-        external = ExternalSize
-    } = upgrade_sizes(Sizes),
-    {ASAcc, ESAcc, AttsAcc} = Acc,
-    NewASAcc = ActiveSize + ASAcc,
-    NewESAcc = ESAcc + if Type == leaf -> ExternalSize; true -> 0 end,
-    NewAttsAcc = lists:umerge(AttSizes, AttsAcc),
-    {NewASAcc, NewESAcc, NewAttsAcc}.
-
-
-upgrade_sizes(#size_info{}=SI) ->
-    SI;
-upgrade_sizes({D, E}) ->
-    #size_info{active=D, external=E};
-upgrade_sizes(S) when is_integer(S) ->
-    #size_info{active=S, external=0}.
-
-
... 68054 lines suppressed ...

[couchdb] 07/24: Clean up couch_debug

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit e909cdbb5d1ed30bc7bdfd8ac6b4af36ca1b9224
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Apr 14 00:21:52 2021 -0400

    Clean up couch_debug
    
    Remove logic handling index directories and couch_files. Definitely not a
    comprehensive cleanup. We should probably have a separate PR to pick out what
    would be useful for `main`.
---
 src/couch/src/couch_debug.erl | 38 --------------------------------------
 1 file changed, 38 deletions(-)

diff --git a/src/couch/src/couch_debug.erl b/src/couch/src/couch_debug.erl
index 290d095..80feb61 100644
--- a/src/couch/src/couch_debug.erl
+++ b/src/couch/src/couch_debug.erl
@@ -370,8 +370,6 @@ fold_tree(Tree, Acc, Fun) ->
 linked_processes_info(Pid, Info) ->
     link_tree(Pid, Info, fun(P, Props) -> {process_name(P), Props} end).
 
-print_linked_processes(couch_index_server) ->
-    print_couch_index_server_processes();
 print_linked_processes(Name) when is_atom(Name) ->
     case whereis(Name) of
         undefined -> {error, {unknown, Name}};
@@ -386,42 +384,6 @@ print_linked_processes(Pid) when is_pid(Pid) ->
     Tree = linked_processes_info(Pid, Info),
     print_tree(Tree, TableSpec).
 
-id("couch_file:init" ++ _, Pid, _Props) ->
-    case couch_file:process_info(Pid) of
-        {{file_descriptor, prim_file, {Port, Fd}}, FilePath} ->
-            term2str([
-                term2str(Fd), ":",
-                term2str(Port), ":",
-                shorten_path(FilePath)]);
-        undefined ->
-            ""
-    end;
-id(_IdStr, _Pid, _Props) ->
-    "".
-
-print_couch_index_server_processes() ->
-    Info = [reductions, message_queue_len, memory],
-    TableSpec = [
-        {50, left, name}, {12, centre, reductions},
-        {19, centre, message_queue_len}, {14, centre, memory}, {id}
-    ],
-
-    Tree = link_tree(whereis(couch_index_server), Info, fun(P, Props) ->
-        IdStr = process_name(P),
-        {IdStr, [{id, id(IdStr, P, Props)} | Props]}
-    end),
-    print_tree(Tree, TableSpec).
-
-shorten_path(Path) ->
-    ViewDir = list_to_binary(config:get("couchdb", "view_index_dir")),
-    DatabaseDir = list_to_binary(config:get("couchdb", "database_dir")),
-    File = list_to_binary(Path),
-    Len = max(
-        binary:longest_common_prefix([File, DatabaseDir]),
-        binary:longest_common_prefix([File, ViewDir])
-    ),
-    <<_:Len/binary, Rest/binary>> = File,
-    binary_to_list(Rest).
 
 %% Pretty print functions
 

[couchdb] 02/24: Clean up config files

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit ba6819b0086b2b80a62d8a094df889616d3744dc
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Tue Apr 13 19:38:23 2021 -0400

    Clean up config files
    
    Remove default.ini and local.ini sections related to applications and
    functionality which is no longer there.
    
    One odd change is the commented out`partitioned` flag. The reason is because
    the "flags" logic is kept, and seems to be functional, however partitioned flag
    is not functional. `couch_flags` loads flags using `list_to_existing_atom/1`
    and since `partitioned` is not an existing atom it continuously emits crash
    logs from the `_up` point related to that.
---
 rel/overlay/etc/default.ini | 215 +-------------------------------------------
 rel/overlay/etc/local.ini   |  13 ---
 2 files changed, 1 insertion(+), 227 deletions(-)

diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index dded4a9..58cd6a0 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -4,33 +4,17 @@ name = {{package_author_name}}
 
 [couchdb]
 uuid = {{uuid}}
-database_dir = {{data_dir}}
-view_index_dir = {{view_index_dir}}
 ; util_driver_dir =
 ; plugin_dir =
 os_process_timeout = 5000 ; 5 seconds. for view servers.
 max_dbs_open = 500
-; Method used to compress everything that is appended to database and view index files, except
-; for attachments (see the attachments section). Available methods are:
-;
-; none         - no compression
-; snappy       - use google snappy, a very fast compressor/decompressor
-; deflate_N    - use zlib's deflate, N is the compression level which ranges from 1 (fastest,
-;                lowest compression ratio) to 9 (slowest, highest compression ratio)
-file_compression = snappy
-; Higher values may give better read performance due to less read operations
-; and/or more OS page cache hits, but they can also increase overall response
-; time for writes when there are many attachment write requests in parallel.
-attachment_stream_buffer_size = 4096
 ; Default security object for databases if not explicitly set
 ; everyone - same as couchdb 1.0, everyone can read/write
 ; admin_only - only admins can read/write
 ; admin_local - sharded dbs on :5984 are read/write for everyone,
 ;               local dbs on :5986 are read/write for admins only
 default_security = admin_only
-; btree_chunk_size = 1279
 ; maintenance_mode = false
-; stem_interactive_updates = true
 ; uri_file =
 ; The speed of processing the _changes feed with doc_ids filter can be
 ; influenced directly with this setting - increase for faster processing at the
@@ -64,68 +48,15 @@ max_document_size = 8000000 ; bytes
 ; Maximum attachment size.
 ; max_attachment_size = infinity
 ;
-; Do not update the least recently used DB cache on reads, only writes
-;update_lru_on_read = false
-;
-; The default storage engine to use when creating databases
-; is set as a key into the [couchdb_engines] section.
-default_engine = couch
-;
 ; Enable this to only "soft-delete" databases when DELETE /{db} requests are
 ; made. This will place a .recovery directory in your data directory and
 ; move deleted databases/shards there instead. You can then manually delete
 ; these files later, as desired.
 ;enable_database_recovery = false
 ;
-; Set the maximum size allowed for a partition. This helps users avoid
-; inadvertently abusing partitions resulting in hot shards. The default
-; is 10GiB. A value of 0 or less will disable partition size checks.
-;max_partition_size = 10737418240
-;
-; When true, system databases _users and _replicator are created immediately
-; on startup if not present.
-;single_node = false
-
 ; Allow edits on the _security object in the user db. By default, it's disabled.
 users_db_security_editable = false
 
-[purge]
-; Allowed maximum number of documents in one purge request
-;max_document_id_number = 100
-;
-; Allowed maximum number of accumulated revisions in one purge request
-;max_revisions_number = 1000
-;
-; Allowed durations when index is not updated for local purge checkpoint
-; document. Default is 24 hours.
-;index_lag_warn_seconds = 86400
-
-[couchdb_engines]
-; The keys in this section are the filename extension that
-; the specified engine module will use. This is important so
-; that couch_server is able to find an existing database without
-; having to ask every configured engine.
-couch = couch_bt_engine
-
-[process_priority]
-; Selectively disable altering process priorities for modules that request it.
-; * NOTE: couch_server priority has been shown to lead to CouchDB hangs and
-;     failures on Erlang releases 21.0 - 21.3.8.12 and 22.0 -> 22.2.4. Do not
-;     enable when running with those versions.
-;couch_server = false
-
-[cluster]
-q=2
-n=3
-; placement = metro-dc-a:2,metro-dc-b:1
-
-; Supply a comma-delimited list of node names that this node should
-; contact in order to join a cluster. If a seedlist is configured the ``_up``
-; endpoint will return a 404 until the node has successfully contacted at
-; least one of the members of the seedlist and replicated an up-to-date copy
-; of the ``_nodes``, ``_dbs``, and ``_users`` system databases.
-; seedlist = couchdb@node1.example.com,couchdb@node2.example.com
-
 [chttpd]
 ; These settings affect the main, clustered port (5984 by default).
 port = {{cluster_port}}
@@ -178,23 +109,6 @@ max_db_number_for_dbs_info_req = 100
 ; rsa:foo = -----BEGIN PUBLIC KEY-----\nMIIBIjAN...IDAQAB\n-----END PUBLIC KEY-----\n
 ; ec:bar = -----BEGIN PUBLIC KEY-----\nMHYwEAYHK...AzztRs\n-----END PUBLIC KEY-----\n
 
-[couch_peruser]
-; If enabled, couch_peruser ensures that a private per-user database
-; exists for each document in _users. These databases are writable only
-; by the corresponding user. Databases are in the following form:
-; userdb-{hex encoded username}
-enable = false
-; If set to true and a user is deleted, the respective database gets
-; deleted as well.
-delete_dbs = false
-; Set a default q value for peruser-created databases that is different from
-; cluster / q
-;q = 1
-; prefix for user databases. If you change this after user dbs have been
-; created, the existing databases won't get deleted if the associated user
-; gets deleted because of the then prefix mismatch.
-database_prefix = userdb-
-
 [httpd]
 port = {{backend_port}}
 bind_address = 127.0.0.1
@@ -220,13 +134,6 @@ enable_xframe_options = false
 ; Maximum allowed http request size. Applies to both clustered and local port.
 max_http_request_size = 4294967296 ; 4GB
 
-; [httpd_design_handlers]
-; _view =
-
-; [ioq]
-; concurrency = 10
-; ratio = 0.01
-
 [ssl]
 port = 6984
 
@@ -238,23 +145,7 @@ port = 6984
 ; max_objects =
 ; max_size = 104857600
 
-; [mem3]
-; nodes_db = _nodes
-; shard_cache_size = 25000
-; shards_db = _dbs
-; sync_concurrency = 10
-
 ; [fabric]
-; all_docs_concurrency = 10
-; changes_duration =
-; shard_timeout_factor = 2
-; uuid_prefix_len = 7
-; request_timeout = 60000
-; all_docs_timeout = 10000
-; attachments_timeout = 60000
-; view_timeout = 3600000
-; partition_view_timeout = 3600000
-;
 ; Custom FDB directory prefix. All the nodes of the same CouchDB instance
 ; should have a matching directory prefix in order to read and write the same
 ; data. Changes to this value take effect only on node start-up.
@@ -279,26 +170,6 @@ port = 6984
 ; Bulk docs transaction batch size in bytes
 ;update_docs_batch_size = 2500000
 
-; [rexi]
-; buffer_count = 2000
-; server_per_node = true
-; stream_limit = 5
-;
-; Use a single message to kill a group of remote workers This is
-; mostly is an upgrade clause to allow operating in a mixed cluster of
-; 2.x and 3.x nodes. After upgrading switch to true to save some
-; network bandwidth
-;use_kill_all = false
-
-; [global_changes]
-; max_event_delay = 25
-; max_write_delay = 500
-; update_db = true
-
-; [view_updater]
-; min_writer_items = 100
-; min_writer_size = 16777216
-
 [couch_httpd_auth]
 ; WARNING! This only affects the node-local port (5986 by default).
 ; You probably want the settings under [chttpd].
@@ -440,7 +311,6 @@ os_process_limit = 100
 ; os_process_soft_limit = 100
 ; Timeout for how long a response from a busy view group server can take.
 ; "infinity" is also a valid configuration value.
-;group_info_timeout = 5000
 ;query_limit = 268435456
 ;partition_query_limit = 268435456
 
@@ -462,15 +332,12 @@ query = mango_eval
 ; the warning.
 ;index_scan_warning_threshold = 10
 
-[indexers]
-couch_mrview = true
-
 [feature_flags]
 ; This enables any database to be created as a partitioned databases (except system db's).
 ; Setting this to false will stop the creation of paritioned databases.
 ; paritioned||allowed* = true will scope the creation of partitioned databases
 ; to databases with 'allowed' prefix.
-partitioned||* = true
+; partitioned||* = true
 
 [uuids]
 ; Known algorithms:
@@ -699,86 +566,6 @@ writer = stderr
 ; Stats collection interval in seconds. Default 10 seconds.
 ;interval = 10
 
-[smoosh.ratio_dbs]
-min_priority = 2.0
-
-[smoosh.ratio_views]
-min_priority = 2.0
-
-[ioq]
-; The maximum number of concurrent in-flight IO requests that
-concurrency = 10
-
-; The fraction of the time that a background IO request will be selected
-; over an interactive IO request when both queues are non-empty
-ratio = 0.01
-
-[ioq.bypass]
-; System administrators can choose to submit specific classes of IO directly
-; to the underlying file descriptor or OS process, bypassing the queues
-; altogether. Installing a bypass can yield higher throughput and lower
-; latency, but relinquishes some control over prioritization. The following
-; classes are recognized with the following defaults:
-
-; Messages on their way to an external process (e.g., couchjs) are bypassed
-os_process = true
-
-; Disk IO fulfilling interactive read requests is bypassed
-read = true
-
-; Disk IO required to update a database is bypassed
-write = true
-
-; Disk IO required to update views and other secondary indexes is bypassed
-view_update = true
-
-; Disk IO issued by the background replication processes that fix any
-; inconsistencies between shard copies is queued
-shard_sync = false
-
-; Disk IO issued by compaction jobs is queued
-compaction = false
-
-[dreyfus]
-; The name and location of the Clouseau Java service required to
-; enable Search functionality.
-; name = clouseau@127.0.0.1
-
-; CouchDB will try to re-connect to Clouseau using a bounded
-; exponential backoff with the following number of iterations.
-; retry_limit = 5
-
-; The default number of results returned from a global search query.
-; limit = 25
-
-; The default number of results returned from a search on a partition
-; of a database.
-; limit_partitions = 2000
-
-; The maximum number of results that can be returned from a global
-; search query (or any search query on a database without user-defined
-; partitions). Attempts to set ?limit=N higher than this value will
-; be rejected.
-; max_limit = 200
-
-; The maximum number of results that can be returned when searching
-; a partition of a database. Attempts to set ?limit=N higher than this
-; value will be rejected. If this config setting is not defined,
-; CouchDB will use the value of `max_limit` instead. If neither is
-; defined, the default is 2000 as stated here.
-; max_limit_partitions = 2000
-
-[reshard]
-;max_jobs = 48
-;max_history = 20
-;max_retries = 1
-;retry_interval_sec = 10
-;delete_source = true
-;update_shard_map_timeout_sec = 60
-;source_close_timeout_sec = 600
-;require_node_param = false
-;require_range_param = false
-
 [couch_jobs]
 ;
 ; Maximum jitter used when checking for active job timeouts
diff --git a/rel/overlay/etc/local.ini b/rel/overlay/etc/local.ini
index 2c9e899..b788e82 100644
--- a/rel/overlay/etc/local.ini
+++ b/rel/overlay/etc/local.ini
@@ -8,19 +8,6 @@
 ;max_document_size = 4294967296 ; bytes
 ;os_process_timeout = 5000
 
-[couch_peruser]
-; If enabled, couch_peruser ensures that a private per-user database
-; exists for each document in _users. These databases are writable only
-; by the corresponding user. Databases are in the following form:
-; userdb-{hex encoded username}
-;enable = true
-; If set to true and a user is deleted, the respective database gets
-; deleted as well.
-;delete_dbs = true
-; Set a default q value for peruser-created databases that is different from
-; cluster / q
-;q = 1
-
 [chttpd]
 ;port = 5984
 ;bind_address = 127.0.0.1

[couchdb] 10/24: Remove most of the functionality from couch_server

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 9e4fc195efd9b1e65ca30744a1e2cf168bf58fbf
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Apr 14 00:46:31 2021 -0400

    Remove most of the functionality from couch_server
    
    Remove all the code related to opening and caching of databases.
    
    However, couch_server did a few other things such as:
    
     * Parse and maintain the CouchDB version
    
     * Return the server "uuid" value
    
     * The gen_server was monitoring config updates and hashing admin passwords
       when they were updated
    
    It was a 50/50 decision to move that functionality out to other modules
    completely or keep it where it is. Since it wasn't just a single thing, and the
    overall PR was getting rather large, opted to pair the exisiting code to the
    minimum, and then later we can do another round of cleanup and find a better
    place for those features.
---
 src/couch/src/couch_server.erl | 872 +----------------------------------------
 1 file changed, 16 insertions(+), 856 deletions(-)

diff --git a/src/couch/src/couch_server.erl b/src/couch/src/couch_server.erl
index 42eab73..8fd074a 100644
--- a/src/couch/src/couch_server.erl
+++ b/src/couch/src/couch_server.erl
@@ -15,43 +15,18 @@
 -behaviour(config_listener).
 -vsn(3).
 
--export([open/2,create/2,delete/2,get_version/0,get_version/1,get_git_sha/0,get_uuid/0]).
--export([all_databases/0, all_databases/2]).
+-export([get_version/0,get_version/1,get_git_sha/0,get_uuid/0]).
 -export([init/1, handle_call/3,sup_start_link/0]).
--export([handle_cast/2,code_change/3,handle_info/2,terminate/2,format_status/2]).
--export([dev_start/0,is_admin/2,has_admins/0,get_stats/0]).
--export([close_lru/0]).
--export([close_db_if_idle/1]).
--export([delete_compaction_files/1]).
--export([exists/1]).
--export([get_engine_extensions/0]).
--export([get_engine_path/2]).
--export([lock/2, unlock/1]).
+-export([handle_cast/2,code_change/3,handle_info/2,terminate/2]).
+-export([is_admin/2,has_admins/0]).
 
 % config_listener api
 -export([handle_config_change/5, handle_config_terminate/3]).
 
 -include_lib("couch/include/couch_db.hrl").
--include("couch_server_int.hrl").
 
--define(MAX_DBS_OPEN, 500).
 -define(RELISTEN_DELAY, 5000).
 
--record(server,{
-    root_dir = [],
-    engines = [],
-    max_dbs_open=?MAX_DBS_OPEN,
-    dbs_open=0,
-    start_time="",
-    update_lru_on_read=true,
-    lru = couch_lru:new()
-    }).
-
-dev_start() ->
-    couch:stop(),
-    up_to_date = make:all([load, debug_info]),
-    couch:start().
-
 get_version() ->
     ?COUCHDB_VERSION. %% Defined in rebar.config.script
 get_version(short) ->
@@ -70,138 +45,9 @@ get_uuid() ->
         UUID -> ?l2b(UUID)
     end.
 
-get_stats() ->
-    {ok, #server{start_time=Time,dbs_open=Open}} =
-            gen_server:call(couch_server, get_server),
-    [{start_time, ?l2b(Time)}, {dbs_open, Open}].
-
 sup_start_link() ->
     gen_server:start_link({local, couch_server}, couch_server, [], []).
 
-open(DbName, Options) ->
-    try
-        validate_open_or_create(DbName, Options),
-        open_int(DbName, Options)
-    catch throw:{?MODULE, Error} ->
-        Error
-    end.
-
-open_int(DbName, Options0) ->
-    Ctx = couch_util:get_value(user_ctx, Options0, #user_ctx{}),
-    case ets:lookup(couch_dbs, DbName) of
-    [#entry{db = Db0, lock = Lock} = Entry] when Lock =/= locked ->
-        update_lru(DbName, Entry#entry.db_options),
-        {ok, Db1} = couch_db:incref(Db0),
-        couch_db:set_user_ctx(Db1, Ctx);
-    _ ->
-        Options = maybe_add_sys_db_callbacks(DbName, Options0),
-        Timeout = couch_util:get_value(timeout, Options, infinity),
-        Create = couch_util:get_value(create_if_missing, Options, false),
-        case gen_server:call(couch_server, {open, DbName, Options}, Timeout) of
-        {ok, Db0} ->
-            {ok, Db1} = couch_db:incref(Db0),
-            couch_db:set_user_ctx(Db1, Ctx);
-        {not_found, no_db_file} when Create ->
-            couch_log:warning("creating missing database: ~s", [DbName]),
-            couch_server:create(DbName, Options);
-        Error ->
-            Error
-        end
-    end.
-
-update_lru(DbName, Options) ->
-    case config:get_boolean("couchdb", "update_lru_on_read", false) of
-        true ->
-            case lists:member(sys_db, Options) of
-                false -> gen_server:cast(couch_server, {update_lru, DbName});
-                true -> ok
-            end;
-        false ->
-            ok
-    end.
-
-close_lru() ->
-    gen_server:call(couch_server, close_lru).
-
-create(DbName, Options) ->
-    try
-        validate_open_or_create(DbName, Options),
-        create_int(DbName, Options)
-    catch throw:{?MODULE, Error} ->
-        Error
-    end.
-
-create_int(DbName, Options0) ->
-    Options = maybe_add_sys_db_callbacks(DbName, Options0),
-    couch_partition:validate_dbname(DbName, Options),
-    case gen_server:call(couch_server, {create, DbName, Options}, infinity) of
-    {ok, Db0} ->
-        Ctx = couch_util:get_value(user_ctx, Options, #user_ctx{}),
-        {ok, Db1} = couch_db:incref(Db0),
-        couch_db:set_user_ctx(Db1, Ctx);
-    Error ->
-        Error
-    end.
-
-delete(DbName, Options) ->
-    gen_server:call(couch_server, {delete, DbName, Options}, infinity).
-
-
-exists(DbName) ->
-    RootDir = config:get("couchdb", "database_dir", "."),
-    Engines = get_configured_engines(),
-    Possible = get_possible_engines(DbName, RootDir, Engines),
-    Possible /= [].
-
-
-delete_compaction_files(DbName) ->
-    delete_compaction_files(DbName, []).
-
-delete_compaction_files(DbName, DelOpts) when is_list(DbName) ->
-    RootDir = config:get("couchdb", "database_dir", "."),
-    lists:foreach(fun({Ext, Engine}) ->
-        FPath = make_filepath(RootDir, DbName, Ext),
-        couch_db_engine:delete_compaction_files(Engine, RootDir, FPath, DelOpts)
-    end, get_configured_engines()),
-    ok;
-delete_compaction_files(DbName, DelOpts) when is_binary(DbName) ->
-    delete_compaction_files(?b2l(DbName), DelOpts).
-
-maybe_add_sys_db_callbacks(DbName, Options) when is_binary(DbName) ->
-    maybe_add_sys_db_callbacks(?b2l(DbName), Options);
-maybe_add_sys_db_callbacks(DbName, Options) ->
-    DbsDbName = config:get("mem3", "shards_db", "_dbs"),
-    NodesDbName = config:get("mem3", "nodes_db", "_nodes"),
-
-    IsReplicatorDb = path_ends_with(DbName, "_replicator"),
-    UsersDbSuffix = config:get("couchdb", "users_db_suffix", "_users"),
-    IsUsersDb = path_ends_with(DbName, "_users")
-        orelse path_ends_with(DbName, UsersDbSuffix),
-    if
-        DbName == DbsDbName ->
-            [sys_db | Options];
-        DbName == NodesDbName ->
-            [sys_db | Options];
-        IsReplicatorDb ->
-            [{before_doc_update, fun couch_replicator_docs:before_doc_update/3},
-             {after_doc_read, fun couch_replicator_docs:after_doc_read/2},
-             sys_db | Options];
-        IsUsersDb ->
-            [{before_doc_update, fun couch_users_db:before_doc_update/3},
-             {after_doc_read, fun couch_users_db:after_doc_read/2},
-             sys_db | Options];
-        true ->
-            Options
-    end.
-
-path_ends_with(Path, Suffix) when is_binary(Suffix) ->
-    Suffix =:= couch_db:dbname_suffix(Path);
-path_ends_with(Path, Suffix) when is_list(Suffix) ->
-    path_ends_with(Path, ?l2b(Suffix)).
-
-check_dbname(DbName) ->
-    couch_db:validate_dbname(DbName).
-
 is_admin(User, ClearPwd) ->
     case config:get("admins", User) of
     "-hashed-" ++ HashedPwdAndSalt ->
@@ -224,22 +70,9 @@ hash_admin_passwords(Persist) ->
             config:set("admins", User, ?b2l(HashedPassword), Persist)
         end, couch_passwords:get_unhashed_admins()).
 
-close_db_if_idle(DbName) ->
-    case ets:lookup(couch_dbs, DbName) of
-        [#entry{}] ->
-            gen_server:cast(couch_server, {close_db_if_idle, DbName});
-        [] ->
-            ok
-    end.
-
-
 init([]) ->
-    couch_util:set_mqd_off_heap(?MODULE),
-    couch_util:set_process_priority(?MODULE, high),
-
     % Mark being able to receive documents with an _access property as a supported feature
     config:enable_feature('access-ready'),
-
     % Mark if fips is enabled
     case
         erlang:function_exported(crypto, info_fips, 0) andalso
@@ -249,83 +82,28 @@ init([]) ->
         false ->
             ok
     end,
+    ok = config:listen_for_changes(?MODULE, nil),
+    hash_admin_passwords(),
+    {ok, nil}.
 
-    % read config and register for configuration changes
+handle_call(Msg, _From, Srv) ->
+    {stop, {bad_call, Msg}, Srv}.
 
-    % just stop if one of the config settings change. couch_server_sup
-    % will restart us and then we will pick up the new settings.
+handle_cast(Msg, Srv) ->
+    {stop, {bad_cast, Msg}, Srv}.
 
-    RootDir = config:get("couchdb", "database_dir", "."),
-    Engines = get_configured_engines(),
-    MaxDbsOpen = list_to_integer(
-            config:get("couchdb", "max_dbs_open", integer_to_list(?MAX_DBS_OPEN))),
-    UpdateLruOnRead =
-        config:get("couchdb", "update_lru_on_read", "false") =:= "true",
-    ok = config:listen_for_changes(?MODULE, nil),
-    ok = couch_file:init_delete_dir(RootDir),
-    hash_admin_passwords(),
-    ets:new(couch_dbs, [
-        set,
-        protected,
-        named_table,
-        {keypos, #entry.name},
-        {read_concurrency, true}
-    ]),
-    ets:new(couch_dbs_pid_to_name, [set, protected, named_table]),
-    ets:new(couch_dbs_locks, [
-        set,
-        public,
-        named_table,
-        {read_concurrency, true}
-    ]),
-    process_flag(trap_exit, true),
-    {ok, #server{root_dir=RootDir,
-                engines = Engines,
-                max_dbs_open=MaxDbsOpen,
-                update_lru_on_read=UpdateLruOnRead,
-                start_time=couch_util:rfc1123_date()}}.
+handle_info(Msg, Srv) ->
+    {stop, {unknown_message, Msg}, Srv}.
 
-terminate(Reason, Srv) ->
-    couch_log:error("couch_server terminating with ~p, state ~2048p",
-                    [Reason,
-                     Srv#server{lru = redacted}]),
-    ets:foldl(fun(#entry{db = Db}, _) ->
-        % Filter out any entry records for open_async
-        % processes that haven't finished.
-        if Db == undefined -> ok; true ->
-            couch_util:shutdown_sync(couch_db:get_pid(Db))
-        end
-    end, nil, couch_dbs),
-    ok.
+code_change(_OldVsn, Srv, _Extra) ->
+    {ok, Srv}.
 
-format_status(_Opt, [_PDict, Srv]) ->
-    Scrubbed = Srv#server{lru=couch_lru:sizes(Srv#server.lru)},
-    [{data, [{"State", ?record_to_keyval(server, Scrubbed)}]}].
+terminate(_Reason, _Srv) ->
+    ok.
 
-handle_config_change("couchdb", "database_dir", _, _, _) ->
-    exit(whereis(couch_server), config_change),
-    remove_handler;
-handle_config_change("couchdb", "update_lru_on_read", "true", _, _) ->
-    {ok, gen_server:call(couch_server,{set_update_lru_on_read,true})};
-handle_config_change("couchdb", "update_lru_on_read", _, _, _) ->
-    {ok, gen_server:call(couch_server,{set_update_lru_on_read,false})};
-handle_config_change("couchdb", "max_dbs_open", Max, _, _) when is_list(Max) ->
-    {ok, gen_server:call(couch_server,{set_max_dbs_open,list_to_integer(Max)})};
-handle_config_change("couchdb", "max_dbs_open", _, _, _) ->
-    {ok, gen_server:call(couch_server,{set_max_dbs_open,?MAX_DBS_OPEN})};
-handle_config_change("couchdb_engines", _, _, _, _) ->
-    {ok, gen_server:call(couch_server, reload_engines)};
 handle_config_change("admins", _, _, Persist, _) ->
     % spawn here so couch event manager doesn't deadlock
     {ok, spawn(fun() -> hash_admin_passwords(Persist) end)};
-handle_config_change("httpd", "authentication_handlers", _, _, _) ->
-    {ok, couch_httpd:stop()};
-handle_config_change("httpd", "bind_address", _, _, _) ->
-    {ok, couch_httpd:stop()};
-handle_config_change("httpd", "port", _, _, _) ->
-    {ok, couch_httpd:stop()};
-handle_config_change("httpd", "max_connections", _, _, _) ->
-    {ok, couch_httpd:stop()};
 handle_config_change(_, _, _, _, _) ->
     {ok, nil}.
 
@@ -333,621 +111,3 @@ handle_config_terminate(_, stop, _) ->
     ok;
 handle_config_terminate(_Server, _Reason, _State) ->
     erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), restart_config_listener).
-
-
-all_databases() ->
-    {ok, DbList} = all_databases(
-        fun(DbName, Acc) -> {ok, [DbName | Acc]} end, []),
-    {ok, lists:usort(DbList)}.
-
-all_databases(Fun, Acc0) ->
-    {ok, #server{root_dir=Root}} = gen_server:call(couch_server, get_server),
-    NormRoot = couch_util:normpath(Root),
-    Extensions = get_engine_extensions(),
-    ExtRegExp = "(" ++ string:join(Extensions, "|") ++ ")",
-    RegExp =
-        "^[a-z0-9\\_\\$()\\+\\-]*" % stock CouchDB name regex
-        "(\\.[0-9]{10,})?"         % optional shard timestamp
-        "\\." ++ ExtRegExp ++ "$", % filename extension
-    FinalAcc = try
-    couch_util:fold_files(Root,
-        RegExp,
-        true,
-            fun(Filename, AccIn) ->
-                NormFilename = couch_util:normpath(Filename),
-                case NormFilename -- NormRoot of
-                [$/ | RelativeFilename] -> ok;
-                RelativeFilename -> ok
-                end,
-                Ext = filename:extension(RelativeFilename),
-                case Fun(?l2b(filename:rootname(RelativeFilename, Ext)), AccIn) of
-                {ok, NewAcc} -> NewAcc;
-                {stop, NewAcc} -> throw({stop, Fun, NewAcc})
-                end
-            end, Acc0)
-    catch throw:{stop, Fun, Acc1} ->
-         Acc1
-    end,
-    {ok, FinalAcc}.
-
-
-make_room(Server, Options) ->
-    case lists:member(sys_db, Options) of
-        false -> maybe_close_lru_db(Server);
-        true -> {ok, Server}
-    end.
-
-maybe_close_lru_db(#server{dbs_open=NumOpen, max_dbs_open=MaxOpen}=Server)
-        when NumOpen < MaxOpen ->
-    {ok, Server};
-maybe_close_lru_db(#server{lru=Lru}=Server) ->
-    case couch_lru:close(Lru) of
-        {true, NewLru} ->
-            {ok, db_closed(Server#server{lru = NewLru}, [])};
-        false ->
-            {error, all_dbs_active}
-    end.
-
-open_async(Server, From, DbName, Options) ->
-    NoLRUServer = Server#server{
-        lru = redacted
-    },
-    Parent = self(),
-    T0 = os:timestamp(),
-    Opener = spawn_link(fun() ->
-        Res = open_async_int(NoLRUServer, DbName, Options),
-        IsSuccess = case Res of
-            {ok, _} -> true;
-            _ -> false
-        end,
-        case IsSuccess andalso lists:member(create, Options) of
-            true ->
-                couch_event:notify(DbName, created);
-            false ->
-                ok
-        end,
-        gen_server:call(Parent, {open_result, DbName, Res}, infinity),
-        unlink(Parent),
-        case IsSuccess of
-            true ->
-                % Track latency times for successful opens
-                Diff = timer:now_diff(os:timestamp(), T0) / 1000,
-                couch_stats:update_histogram([couchdb, db_open_time], Diff);
-            false ->
-                % Log unsuccessful open results
-                couch_log:info("open_result error ~p for ~s", [Res, DbName])
-        end
-    end),
-    ReqType = case lists:member(create, Options) of
-        true -> create;
-        false -> open
-    end,
-    true = ets:insert(couch_dbs, #entry{
-        name = DbName,
-        pid = Opener,
-        lock = locked,
-        waiters = [From],
-        req_type = ReqType,
-        db_options = Options
-    }),
-    true = ets:insert(couch_dbs_pid_to_name, {Opener, DbName}),
-    db_opened(Server, Options).
-
-open_async_int(Server, DbName, Options) ->
-    DbNameList = binary_to_list(DbName),
-    case check_dbname(DbNameList) of
-        ok ->
-            case get_engine(Server, DbNameList, Options) of
-                {ok, {Module, FilePath}} ->
-                    couch_db:start_link(Module, DbName, FilePath, Options);
-                Error2 ->
-                    Error2
-            end;
-        Error1 ->
-            Error1
-    end.
-
-handle_call(close_lru, _From, #server{lru=Lru} = Server) ->
-    case couch_lru:close(Lru) of
-        {true, NewLru} ->
-            {reply, ok, db_closed(Server#server{lru = NewLru}, [])};
-        false ->
-            {reply, {error, all_dbs_active}, Server}
-    end;
-handle_call(open_dbs_count, _From, Server) ->
-    {reply, Server#server.dbs_open, Server};
-handle_call({set_update_lru_on_read, UpdateOnRead}, _From, Server) ->
-    {reply, ok, Server#server{update_lru_on_read=UpdateOnRead}};
-handle_call({set_max_dbs_open, Max}, _From, Server) ->
-    {reply, ok, Server#server{max_dbs_open=Max}};
-handle_call(reload_engines, _From, Server) ->
-    {reply, ok, Server#server{engines = get_configured_engines()}};
-handle_call(get_server, _From, Server) ->
-    {reply, {ok, Server}, Server};
-handle_call({open_result, DbName, {ok, Db}}, {Opener, _}, Server) ->
-    true = ets:delete(couch_dbs_pid_to_name, Opener),
-    DbPid = couch_db:get_pid(Db),
-    case ets:lookup(couch_dbs, DbName) of
-        [] ->
-            % db was deleted during async open
-            exit(DbPid, kill),
-            {reply, ok, Server};
-        [#entry{pid = Opener, req_type = ReqType, waiters = Waiters} = Entry] ->
-            link(DbPid),
-            [gen_server:reply(Waiter, {ok, Db}) || Waiter <- Waiters],
-            % Cancel the creation request if it exists.
-            case ReqType of
-                {create, DbName, _Options, CrFrom} ->
-                    gen_server:reply(CrFrom, file_exists);
-                _ ->
-                    ok
-            end,
-            true = ets:insert(couch_dbs, #entry{
-                name = DbName,
-                db = Db,
-                pid = DbPid,
-                lock = unlocked,
-                db_options = Entry#entry.db_options,
-                start_time = couch_db:get_instance_start_time(Db)
-            }),
-            true = ets:insert(couch_dbs_pid_to_name, {DbPid, DbName}),
-            Lru = case couch_db:is_system_db(Db) of
-                false ->
-                    couch_lru:insert(DbName, Server#server.lru);
-                true ->
-                    Server#server.lru
-            end,
-            {reply, ok, Server#server{lru = Lru}};
-        [#entry{}] ->
-            % A mismatched opener pid means that this open_result message
-            % was in our mailbox but is now stale. Mostly ignore
-            % it except to ensure that the db pid is super dead.
-            exit(couch_db:get_pid(Db), kill),
-            {reply, ok, Server}
-    end;
-handle_call({open_result, DbName, {error, eexist}}, From, Server) ->
-    handle_call({open_result, DbName, file_exists}, From, Server);
-handle_call({open_result, DbName, Error}, {Opener, _}, Server) ->
-    case ets:lookup(couch_dbs, DbName) of
-        [] ->
-            % db was deleted during async open
-            {reply, ok, Server};
-        [#entry{pid = Opener, req_type = ReqType, waiters = Waiters} = Entry] ->
-            [gen_server:reply(Waiter, Error) || Waiter <- Waiters],
-            true = ets:delete(couch_dbs, DbName),
-            true = ets:delete(couch_dbs_pid_to_name, Opener),
-            NewServer = case ReqType of
-                {create, DbName, Options, CrFrom} ->
-                    open_async(Server, CrFrom, DbName, Options);
-                _ ->
-                    Server
-            end,
-            {reply, ok, db_closed(NewServer, Entry#entry.db_options)};
-        [#entry{}] ->
-            % A mismatched pid means that this open_result message
-            % was in our mailbox and is now stale. Ignore it.
-            {reply, ok, Server}
-    end;
-handle_call({open, DbName, Options}, From, Server) ->
-    case ets:lookup(couch_dbs, DbName) of
-    [] ->
-        case make_room(Server, Options) of
-        {ok, Server2} ->
-            {noreply, open_async(Server2, From, DbName, Options)};
-        CloseError ->
-            {reply, CloseError, Server}
-        end;
-    [#entry{waiters = Waiters} = Entry] when is_list(Waiters) ->
-        true = ets:insert(couch_dbs, Entry#entry{waiters = [From | Waiters]}),
-        NumWaiters = length(Waiters),
-        if NumWaiters =< 10 orelse NumWaiters rem 10 /= 0 -> ok; true ->
-            Fmt = "~b clients waiting to open db ~s",
-            couch_log:info(Fmt, [length(Waiters), DbName])
-        end,
-        {noreply, Server};
-    [#entry{db = Db}] ->
-        {reply, {ok, Db}, Server}
-    end;
-handle_call({create, DbName, Options}, From, Server) ->
-    case ets:lookup(couch_dbs, DbName) of
-    [] ->
-        case make_room(Server, Options) of
-        {ok, Server2} ->
-            CrOptions = [create | Options],
-            {noreply, open_async(Server2, From, DbName, CrOptions)};
-        CloseError ->
-            {reply, CloseError, Server}
-        end;
-    [#entry{req_type = open} = Entry] ->
-        % We're trying to create a database while someone is in
-        % the middle of trying to open it. We allow one creator
-        % to wait while we figure out if it'll succeed.
-        CrOptions = [create | Options],
-        Req = {create, DbName, CrOptions, From},
-        true = ets:insert(couch_dbs, Entry#entry{req_type = Req}),
-        {noreply, Server};
-    [_AlreadyRunningDb] ->
-        {reply, file_exists, Server}
-    end;
-handle_call({delete, DbName, Options}, _From, Server) ->
-    DbNameList = binary_to_list(DbName),
-    case check_dbname(DbNameList) of
-    ok ->
-        Server2 =
-        case ets:lookup(couch_dbs, DbName) of
-        [] -> Server;
-        [#entry{pid = Pid, waiters = Waiters} = Entry] when is_list(Waiters) ->
-            true = ets:delete(couch_dbs, DbName),
-            true = ets:delete(couch_dbs_pid_to_name, Pid),
-            exit(Pid, kill),
-            [gen_server:reply(Waiter, not_found) || Waiter <- Waiters],
-            db_closed(Server, Entry#entry.db_options);
-        [#entry{pid = Pid} = Entry] ->
-            true = ets:delete(couch_dbs, DbName),
-            true = ets:delete(couch_dbs_pid_to_name, Pid),
-            exit(Pid, kill),
-            db_closed(Server, Entry#entry.db_options)
-        end,
-
-        couch_db_plugin:on_delete(DbName, Options),
-
-        DelOpt = [{context, delete} | Options],
-
-        % Make sure and remove all compaction data
-        delete_compaction_files(DbNameList, Options),
-
-        {ok, {Engine, FilePath}} = get_engine(Server, DbNameList),
-        RootDir = Server#server.root_dir,
-        case couch_db_engine:delete(Engine, RootDir, FilePath, DelOpt) of
-        ok ->
-            couch_event:notify(DbName, deleted),
-            {reply, ok, Server2};
-        {error, enoent} ->
-            {reply, not_found, Server2};
-        Else ->
-            {reply, Else, Server2}
-        end;
-    Error ->
-        {reply, Error, Server}
-    end;
-handle_call({db_updated, Db}, _From, Server0) ->
-    DbName = couch_db:name(Db),
-    StartTime = couch_db:get_instance_start_time(Db),
-    Server = try ets:lookup_element(couch_dbs, DbName, #entry.start_time) of
-        StartTime ->
-            true = ets:update_element(couch_dbs, DbName, {#entry.db, Db}),
-            Lru = case couch_db:is_system_db(Db) of
-                false -> couch_lru:update(DbName, Server0#server.lru);
-                true -> Server0#server.lru
-            end,
-            Server0#server{lru = Lru};
-        _ ->
-            Server0
-    catch _:_ ->
-        Server0
-    end,
-    {reply, ok, Server}.
-
-handle_cast({update_lru, DbName}, #server{lru = Lru, update_lru_on_read=true} = Server) ->
-    {noreply, Server#server{lru = couch_lru:update(DbName, Lru)}};
-handle_cast({update_lru, _DbName}, Server) ->
-    {noreply, Server};
-handle_cast({close_db_if_idle, DbName}, Server) ->
-    case ets:update_element(couch_dbs, DbName, {#entry.lock, locked}) of
-    true ->
-        [#entry{db = Db, db_options = DbOpts}] = ets:lookup(couch_dbs, DbName),
-        case couch_db:is_idle(Db) of
-        true ->
-            DbPid = couch_db:get_pid(Db),
-            true = ets:delete(couch_dbs, DbName),
-            true = ets:delete(couch_dbs_pid_to_name, DbPid),
-            exit(DbPid, kill),
-            {noreply, db_closed(Server, DbOpts)};
-        false ->
-            true = ets:update_element(
-                     couch_dbs, DbName, {#entry.lock, unlocked}),
-            {noreply, Server}
-        end;
-    false ->
-        {noreply, Server}
-    end;
-
-handle_cast(Msg, Server) ->
-    {stop, {unknown_cast_message, Msg}, Server}.
-
-code_change(_OldVsn, #server{}=State, _Extra) ->
-    {ok, State}.
-
-handle_info({'EXIT', _Pid, config_change}, Server) ->
-    {stop, config_change, Server};
-handle_info({'EXIT', Pid, Reason}, Server) ->
-    case ets:lookup(couch_dbs_pid_to_name, Pid) of
-    [{Pid, DbName}] ->
-        [#entry{waiters = Waiters} = Entry] = ets:lookup(couch_dbs, DbName),
-        if Reason /= snappy_nif_not_loaded -> ok; true ->
-            Msg = io_lib:format("To open the database `~s`, Apache CouchDB "
-                "must be built with Erlang OTP R13B04 or higher.", [DbName]),
-            couch_log:error(Msg, [])
-        end,
-        % We kill databases on purpose so there's no reason
-        % to log that fact. So we restrict logging to "interesting"
-        % reasons.
-        if Reason == normal orelse Reason == killed -> ok; true ->
-            couch_log:info("db ~s died with reason ~p", [DbName, Reason])
-        end,
-        if not is_list(Waiters) -> ok; true ->
-            [gen_server:reply(Waiter, Reason) || Waiter <- Waiters]
-        end,
-        true = ets:delete(couch_dbs, DbName),
-        true = ets:delete(couch_dbs_pid_to_name, Pid),
-        {noreply, db_closed(Server, Entry#entry.db_options)};
-    [] ->
-        {noreply, Server}
-    end;
-handle_info(restart_config_listener, State) ->
-    ok = config:listen_for_changes(?MODULE, nil),
-    {noreply, State};
-handle_info(Info, Server) ->
-    {stop, {unknown_message, Info}, Server}.
-
-db_opened(Server, Options) ->
-    case lists:member(sys_db, Options) of
-        false -> Server#server{dbs_open=Server#server.dbs_open + 1};
-        true -> Server
-    end.
-
-db_closed(Server, Options) ->
-    case lists:member(sys_db, Options) of
-        false -> Server#server{dbs_open=Server#server.dbs_open - 1};
-        true -> Server
-    end.
-
-validate_open_or_create(DbName, Options) ->
-    case check_dbname(DbName) of
-        ok ->
-            ok;
-        DbNameError ->
-            throw({?MODULE, DbNameError})
-    end,
-
-    case check_engine(Options) of
-        ok ->
-            ok;
-        EngineError ->
-            throw({?MODULE, EngineError})
-    end,
-
-    case ets:lookup(couch_dbs_locks, DbName) of
-        [] ->
-            ok;
-        [{DbName, Reason}] ->
-            throw({?MODULE, {error, {locked, Reason}}})
-    end.
-
-get_configured_engines() ->
-    ConfigEntries = config:get("couchdb_engines"),
-    Engines = lists:flatmap(fun({Extension, ModuleStr}) ->
-        try
-            [{Extension, list_to_atom(ModuleStr)}]
-        catch _T:_R ->
-            []
-        end
-    end, ConfigEntries),
-    case Engines of
-        [] ->
-            [{"couch", couch_bt_engine}];
-        Else ->
-            Else
-    end.
-
-
-get_engine(Server, DbName, Options) ->
-    #server{
-        root_dir = RootDir,
-        engines = Engines
-    } = Server,
-    case couch_util:get_value(engine, Options) of
-        Ext when is_binary(Ext) ->
-            ExtStr = binary_to_list(Ext),
-            case lists:keyfind(ExtStr, 1, Engines) of
-                {ExtStr, Engine} ->
-                    Path = make_filepath(RootDir, DbName, ExtStr),
-                    {ok, {Engine, Path}};
-                false ->
-                    {error, {invalid_engine_extension, Ext}}
-            end;
-        _ ->
-            get_engine(Server, DbName)
-    end.
-
-
-get_engine(Server, DbName) ->
-    #server{
-        root_dir = RootDir,
-        engines = Engines
-    } = Server,
-    Possible = get_possible_engines(DbName, RootDir, Engines),
-    case Possible of
-        [] ->
-            get_default_engine(Server, DbName);
-        [Engine] ->
-            {ok, Engine};
-        _ ->
-            erlang:error(engine_conflict)
-    end.
-
-
-get_possible_engines(DbName, RootDir, Engines) ->
-    lists:foldl(fun({Extension, Engine}, Acc) ->
-        Path = make_filepath(RootDir, DbName, Extension),
-        case couch_db_engine:exists(Engine, Path) of
-            true ->
-                [{Engine, Path} | Acc];
-            false ->
-                Acc
-        end
-    end, [], Engines).
-
-
-get_default_engine(Server, DbName) ->
-    #server{
-        root_dir = RootDir,
-        engines = Engines
-    } = Server,
-    Default = {couch_bt_engine, make_filepath(RootDir, DbName, "couch")},
-    case config:get("couchdb", "default_engine") of
-        Extension when is_list(Extension) ->
-            case lists:keyfind(Extension, 1, Engines) of
-                {Extension, Module} ->
-                    {ok, {Module, make_filepath(RootDir, DbName, Extension)}};
-                false ->
-                    Fmt = "Invalid storage engine extension ~s,"
-                            " configured engine extensions are: ~s",
-                    Exts = [E || {E, _} <- Engines],
-                    Args = [Extension, string:join(Exts, ", ")],
-                    couch_log:error(Fmt, Args),
-                    {ok, Default}
-            end;
-        _ ->
-            {ok, Default}
-    end.
-
-
-make_filepath(RootDir, DbName, Extension) when is_binary(RootDir) ->
-    make_filepath(binary_to_list(RootDir), DbName, Extension);
-make_filepath(RootDir, DbName, Extension) when is_binary(DbName) ->
-    make_filepath(RootDir, binary_to_list(DbName), Extension);
-make_filepath(RootDir, DbName, Extension) when is_binary(Extension) ->
-    make_filepath(RootDir, DbName, binary_to_list(Extension));
-make_filepath(RootDir, DbName, Extension) ->
-    filename:join([RootDir, "./" ++ DbName ++ "." ++ Extension]).
-
-
-get_engine_extensions() ->
-    case config:get("couchdb_engines") of
-        [] ->
-            ["couch"];
-        Entries ->
-            [Ext || {Ext, _Mod} <- Entries]
-    end.
-
-
-check_engine(Options) ->
-    case couch_util:get_value(engine, Options) of
-        Ext when is_binary(Ext) ->
-            ExtStr = binary_to_list(Ext),
-            Extensions = get_engine_extensions(),
-            case lists:member(ExtStr, Extensions) of
-                true ->
-                    ok;
-                false ->
-                    {error, {invalid_engine_extension, Ext}}
-            end;
-        _ ->
-            ok
-    end.
-
-
-get_engine_path(DbName, Engine) when is_binary(DbName), is_atom(Engine) ->
-    RootDir = config:get("couchdb", "database_dir", "."),
-    case lists:keyfind(Engine, 2, get_configured_engines()) of
-        {Ext, Engine} ->
-            {ok, make_filepath(RootDir, DbName, Ext)};
-        false ->
-            {error, {invalid_engine, Engine}}
-    end.
-
-lock(DbName, Reason) when is_binary(DbName), is_binary(Reason) ->
-    case ets:lookup(couch_dbs, DbName) of
-        [] ->
-            true = ets:insert(couch_dbs_locks, {DbName, Reason}),
-            ok;
-        [#entry{}] ->
-            {error, already_opened}
-    end.
-
-unlock(DbName) when is_binary(DbName) ->
-    true = ets:delete(couch_dbs_locks, DbName),
-    ok.
-
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-setup_all() ->
-    ok = meck:new(config, [passthrough]),
-    ok = meck:expect(config, get, fun config_get/3),
-    ok.
-
-teardown_all(_) ->
-    meck:unload().
-
-config_get("couchdb", "users_db_suffix", _) -> "users_db";
-config_get(_, _, _) -> undefined.
-
-maybe_add_sys_db_callbacks_pass_test_() ->
-    {
-        setup,
-        fun setup_all/0,
-        fun teardown_all/1,
-        [
-            fun should_add_sys_db_callbacks/0,
-            fun should_not_add_sys_db_callbacks/0
-        ]
-    }.
-
-should_add_sys_db_callbacks() ->
-    Cases = [
-        "shards/00000000-3fffffff/foo/users_db.1415960794.couch",
-        "shards/00000000-3fffffff/foo/users_db.1415960794",
-        "shards/00000000-3fffffff/foo/users_db",
-        "shards/00000000-3fffffff/users_db.1415960794.couch",
-        "shards/00000000-3fffffff/users_db.1415960794",
-        "shards/00000000-3fffffff/users_db",
-
-        "shards/00000000-3fffffff/_users.1415960794.couch",
-        "shards/00000000-3fffffff/_users.1415960794",
-        "shards/00000000-3fffffff/_users",
-
-        "foo/users_db.couch",
-        "foo/users_db",
-        "users_db.couch",
-        "users_db",
-        "foo/_users.couch",
-        "foo/_users",
-        "_users.couch",
-        "_users",
-
-        "shards/00000000-3fffffff/foo/_replicator.1415960794.couch",
-        "shards/00000000-3fffffff/foo/_replicator.1415960794",
-        "shards/00000000-3fffffff/_replicator",
-        "foo/_replicator.couch",
-        "foo/_replicator",
-        "_replicator.couch",
-        "_replicator"
-    ],
-    lists:foreach(fun(DbName) ->
-        check_case(DbName, true),
-        check_case(?l2b(DbName), true)
-    end, Cases).
-
-should_not_add_sys_db_callbacks() ->
-    Cases = [
-        "shards/00000000-3fffffff/foo/mydb.1415960794.couch",
-        "shards/00000000-3fffffff/foo/mydb.1415960794",
-        "shards/00000000-3fffffff/mydb",
-        "foo/mydb.couch",
-        "foo/mydb",
-        "mydb.couch",
-        "mydb"
-    ],
-    lists:foreach(fun(DbName) ->
-        check_case(DbName, false),
-        check_case(?l2b(DbName), false)
-    end, Cases).
-
-check_case(DbName, IsAdded) ->
-    Options = maybe_add_sys_db_callbacks(DbName, [other_options]),
-    ?assertEqual(IsAdded, lists:member(sys_db, Options)).
-
--endif.

[couchdb] 11/24: Update couch_(js_)os_process after ioq removal

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit cc32e04cdf6357079b13a61935a6f702061965e5
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Apr 14 01:36:55 2021 -0400

    Update couch_(js_)os_process after ioq removal
    
    Remove ioq call from `couch_os_process:prompt/2` and
    `couch_js_os_process:prompt/2`
---
 src/couch/src/couch_os_process.erl       | 2 +-
 src/couch_js/src/couch_js_os_process.erl | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/src/couch/src/couch_os_process.erl b/src/couch/src/couch_os_process.erl
index 63a2414..e17782e 100644
--- a/src/couch/src/couch_os_process.erl
+++ b/src/couch/src/couch_os_process.erl
@@ -51,7 +51,7 @@ send(Pid, Data) ->
     gen_server:cast(Pid, {send, Data}).
 
 prompt(Pid, Data) ->
-    case ioq:call(Pid, {prompt, Data}, erlang:get(io_priority)) of
+    case gen_server:call(Pid, {prompt, Data}, infinity) of
         {ok, Result} ->
             Result;
         Error ->
diff --git a/src/couch_js/src/couch_js_os_process.erl b/src/couch_js/src/couch_js_os_process.erl
index a453d1a..883bc8f 100644
--- a/src/couch_js/src/couch_js_os_process.erl
+++ b/src/couch_js/src/couch_js_os_process.erl
@@ -51,7 +51,7 @@ send(Pid, Data) ->
     gen_server:cast(Pid, {send, Data}).
 
 prompt(Pid, Data) ->
-    case ioq:call(Pid, {prompt, Data}, erlang:get(io_priority)) of
+    case gen_server:call(Pid, {prompt, Data}, infinity) of
         {ok, Result} ->
             Result;
         Error ->

[couchdb] 04/24: Update couch_primary_sup to not start couch_task_status child

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 1c3ed04887d1a5b4f2775d5391000bc4482069e5
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Apr 14 00:08:18 2021 -0400

    Update couch_primary_sup to not start couch_task_status child
    
    `couch_task_status` is removed. `_active_tasks` endpoint is driven by the
    `couch_jobs` application.
---
 src/couch/src/couch_primary_sup.erl | 6 ------
 1 file changed, 6 deletions(-)

diff --git a/src/couch/src/couch_primary_sup.erl b/src/couch/src/couch_primary_sup.erl
index dc2d9e5..b6d370f 100644
--- a/src/couch/src/couch_primary_sup.erl
+++ b/src/couch/src/couch_primary_sup.erl
@@ -25,12 +25,6 @@ init([]) ->
             infinity,
             supervisor,
             [couch_drv]},
-        {couch_task_status,
-            {couch_task_status, start_link, []},
-            permanent,
-            brutal_kill,
-            worker,
-            [couch_task_status]},
         {couch_server,
             {couch_server, sup_start_link, []},
             permanent,

[couchdb] 16/24: Remove clouseau and dreyfus references from mango

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 3080cf561c1d120a01e418a2acecfaa78dc182d0
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Apr 14 02:03:36 2021 -0400

    Remove clouseau and dreyfus references from mango
    
    Try to minimize changes and cheated a bit by returning `false` from
    `is_text_service_available()`. Also keeping in mind that we'd probably want
    this functionality in the future.
---
 src/mango/src/mango_cursor.erl |  8 --------
 src/mango/src/mango_idx.erl    | 17 +++--------------
 2 files changed, 3 insertions(+), 22 deletions(-)

diff --git a/src/mango/src/mango_cursor.erl b/src/mango/src/mango_cursor.erl
index 63b449c..ed35817 100644
--- a/src/mango/src/mango_cursor.erl
+++ b/src/mango/src/mango_cursor.erl
@@ -30,18 +30,10 @@
 -include("mango_idx.hrl").
 
 
--ifdef(HAVE_DREYFUS).
 -define(CURSOR_MODULES, [
     mango_cursor_view,
-    mango_cursor_text,
     mango_cursor_special
 ]).
--else.
--define(CURSOR_MODULES, [
-    mango_cursor_view,
-    mango_cursor_special
-]).
--endif.
 
 -define(SUPERVISOR, mango_cursor_sup).
 
diff --git a/src/mango/src/mango_idx.erl b/src/mango/src/mango_idx.erl
index 37b6e03..7108ae6 100644
--- a/src/mango/src/mango_idx.erl
+++ b/src/mango/src/mango_idx.erl
@@ -263,12 +263,7 @@ cursor_mod(#idx{type = <<"json">>}) ->
 cursor_mod(#idx{def = all_docs, type= <<"special">>}) ->
     mango_cursor_special;
 cursor_mod(#idx{type = <<"text">>}) ->
-    case clouseau_rpc:connected() of
-        true ->
-            mango_cursor_text;
-        false ->
-            ?MANGO_ERROR({index_service_unavailable, <<"text">>})
-    end.
+    ?MANGO_ERROR({index_service_unavailable, <<"text">>}).
 
 
 idx_mod(#idx{type = <<"json">>}) ->
@@ -276,12 +271,7 @@ idx_mod(#idx{type = <<"json">>}) ->
 idx_mod(#idx{type = <<"special">>}) ->
     mango_idx_special;
 idx_mod(#idx{type = <<"text">>}) ->
-    case clouseau_rpc:connected() of
-        true ->
-            mango_idx_text;
-        false ->
-            ?MANGO_ERROR({index_service_unavailable, <<"text">>})
-    end.
+    ?MANGO_ERROR({index_service_unavailable, <<"text">>}).
 
 
 db_to_name(Name) when is_binary(Name) ->
@@ -318,8 +308,7 @@ get_idx_type(Opts) ->
 
 
 is_text_service_available() ->
-    erlang:function_exported(clouseau_rpc, connected, 0) andalso
-        clouseau_rpc:connected().
+    false.
 
 
 get_idx_ddoc(Idx, Opts) ->

[couchdb] 23/24: Update ./dev/run to not auto-create _global_changes

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 3f9894fcc278438e9459339b88806f0f7c4478c3
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Apr 14 03:05:02 2021 -0400

    Update ./dev/run to not auto-create _global_changes
---
 dev/run | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/dev/run b/dev/run
index 9aa95ca..3fe1b33 100755
--- a/dev/run
+++ b/dev/run
@@ -794,7 +794,7 @@ def try_request(
 
 
 def create_system_databases(host, port):
-    for dbname in ["_users", "_replicator", "_global_changes"]:
+    for dbname in ["_users", "_replicator"]:
         conn = httpclient.HTTPConnection(host, port)
         conn.request("HEAD", "/" + dbname)
         resp = conn.getresponse()

[couchdb] 14/24: Remove couch_db_plugin from couch_db_epi services

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit c9e19fba34060ea318d96b9787a71ba16174fee7
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Apr 14 01:48:44 2021 -0400

    Remove couch_db_plugin from couch_db_epi services
    
    It was replaced by fabric2_db_plugin
---
 src/couch/src/couch_db_epi.erl | 1 -
 1 file changed, 1 deletion(-)

diff --git a/src/couch/src/couch_db_epi.erl b/src/couch/src/couch_db_epi.erl
index 21879f6..bfd435a 100644
--- a/src/couch/src/couch_db_epi.erl
+++ b/src/couch/src/couch_db_epi.erl
@@ -35,7 +35,6 @@ providers() ->
 
 services() ->
     [
-        {couch_db, couch_db_plugin},
         {feature_flags, couch_flags}
     ].
 

[couchdb] 21/24: Update all the applications to use the new couch_views utility functions

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit f005aba961d7a99128b54e8758b1ebd78bfa68c1
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Apr 14 02:51:57 2021 -0400

    Update all the applications to use the new couch_views utility functions
    
    This is mostly a bulk search and replace to update fabric, couch_views, chttpd,
    mango and couch_replicator to use either the new included file or the new
    utility functions in couch_views.
    
    The `couch_views_http:transform_row/2` function was brought from the
    removed`fabric_view` module. It's used in only one place to it was copied there
    directly.
---
 src/chttpd/src/chttpd_changes.erl                  |  2 +-
 src/chttpd/src/chttpd_db.erl                       | 12 ++++----
 src/chttpd/src/chttpd_misc.erl                     | 12 ++++----
 src/chttpd/src/chttpd_show.erl                     |  2 +-
 src/chttpd/src/chttpd_view.erl                     | 24 ++++++++--------
 .../src/couch_replicator_api_wrap.erl              |  2 +-
 src/couch_views/src/couch_views.erl                |  7 ++---
 src/couch_views/src/couch_views_batch.erl          |  2 +-
 src/couch_views/src/couch_views_batch_impl.erl     |  2 +-
 src/couch_views/src/couch_views_fdb.erl            |  1 -
 src/couch_views/src/couch_views_http.erl           | 32 ++++++++++++++++++----
 src/couch_views/src/couch_views_indexer.erl        |  1 -
 src/couch_views/src/couch_views_jobs.erl           |  1 -
 src/couch_views/src/couch_views_reader.erl         |  5 ++--
 src/couch_views/src/couch_views_trees.erl          |  1 -
 src/couch_views/src/couch_views_updater.erl        |  6 ++--
 src/fabric/src/fabric2_db.erl                      |  2 +-
 src/fabric/src/fabric2_util.erl                    |  2 +-
 src/mango/src/mango_cursor_special.erl             |  2 +-
 src/mango/src/mango_cursor_view.erl                |  4 +--
 src/mango/src/mango_idx.erl                        |  2 +-
 src/mango/src/mango_json_bookmark.erl              |  2 +-
 22 files changed, 68 insertions(+), 58 deletions(-)

diff --git a/src/chttpd/src/chttpd_changes.erl b/src/chttpd/src/chttpd_changes.erl
index fcaee92..79ca4d1 100644
--- a/src/chttpd/src/chttpd_changes.erl
+++ b/src/chttpd/src/chttpd_changes.erl
@@ -12,7 +12,7 @@
 
 -module(chttpd_changes).
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 
 -export([
     handle_db_changes/3,
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index 8b99059..8f08c7f 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -15,9 +15,7 @@
 -compile(tuple_calls).
 
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 
 -export([handle_request/1, handle_compact_req/2, handle_design_req/2,
     db_req/2, couch_doc_open/4,handle_changes_req/2,
@@ -573,7 +571,7 @@ db_req(#httpd{method='GET',path_parts=[_,OP]}=Req, Db) when ?IS_ALL_DOCS(OP) ->
 db_req(#httpd{method='POST',
     path_parts=[_, OP, <<"queries">>]}=Req, Db) when ?IS_ALL_DOCS(OP) ->
     Props = chttpd:json_body_obj(Req),
-    case couch_mrview_util:get_view_queries(Props) of
+    case couch_views_util:get_view_queries(Props) of
         undefined ->
             throw({bad_request,
                 <<"POST body must include `queries` parameter.">>});
@@ -890,7 +888,7 @@ send_all_docs_keys(Db, #mrargs{} = Args, VAcc0) ->
                     doc = DocValue
                 }
         end,
-        Row1 = fabric_view:transform_row(Row0),
+        Row1 = couch_views_http:transform_row(Row0),
         view_cb(Row1, Acc)
     end,
     {ok, VAcc2} = fabric2_db:fold_docs(Db, Keys, CB, VAcc1, OpenOpts),
@@ -1131,7 +1129,7 @@ db_doc_req(#httpd{method='COPY'}=Req, Db, SourceDocId) ->
         missing_rev -> nil;
         Rev -> Rev
     end,
-    {TargetDocId0, TargetRevs} = couch_httpd_db:parse_copy_destination_header(Req),
+    {TargetDocId0, TargetRevs} = chttpd_util:parse_copy_destination_header(Req),
     TargetDocId = list_to_binary(mochiweb_util:unquote(TargetDocId0)),
     % open old doc
     Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
@@ -1962,7 +1960,7 @@ set_namespace(<<"_local_docs">>, Args) ->
 set_namespace(<<"_design_docs">>, Args) ->
     set_namespace(<<"_design">>, Args);
 set_namespace(NS, #mrargs{} = Args) ->
-    couch_mrview_util:set_extra(Args, namespace, NS).
+    couch_views_util:set_extra(Args, namespace, NS).
 
 
 %% /db/_bulk_get stuff
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index 5d9706a..3f81c1b 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -30,7 +30,7 @@
 ]).
 
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 
 -import(chttpd,
     [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
@@ -115,7 +115,7 @@ handle_all_dbs_req(#httpd{method='GET'}=Req) ->
         direction = Dir,
         limit = Limit,
         skip = Skip
-    } = couch_mrview_http:parse_params(Req, undefined),
+    } = couch_views_http_util:parse_params(Req, undefined),
 
     Options = [
         {start_key, StartKey},
@@ -137,7 +137,7 @@ all_dbs_callback({meta, _Meta}, #vacc{resp=Resp0}=Acc) ->
     {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "["),
     {ok, Acc#vacc{resp=Resp1}};
 all_dbs_callback({row, Row}, #vacc{resp=Resp0}=Acc) ->
-    Prepend = couch_mrview_http:prepend_val(Acc),
+    Prepend = couch_views_http_util:prepend_val(Acc),
     DbName = couch_util:get_value(id, Row),
     {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, [Prepend, ?JSON_ENCODE(DbName)]),
     {ok, Acc#vacc{prepend=",", resp=Resp1}};
@@ -155,7 +155,7 @@ handle_dbs_info_req(#httpd{method = 'GET'} = Req) ->
 handle_dbs_info_req(#httpd{method='POST', user_ctx=UserCtx}=Req) ->
     chttpd:validate_ctype(Req, "application/json"),
     Props = chttpd:json_body_obj(Req),
-    Keys = couch_mrview_util:get_view_keys(Props),
+    Keys = couch_views_util:get_view_keys(Props),
     case Keys of
         undefined -> throw({bad_request, "`keys` member must exist."});
         _ -> ok
@@ -248,7 +248,7 @@ send_db_infos(Req, ListFunctionName) ->
         direction = Dir,
         limit = Limit,
         skip = Skip
-    } = couch_mrview_http:parse_params(Req, undefined),
+    } = couch_views_http_util:parse_params(Req, undefined),
 
     Options = [
         {start_key, StartKey},
@@ -275,7 +275,7 @@ dbs_info_callback({meta, _Meta}, #vacc{resp = Resp0} = Acc) ->
     {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "["),
     {ok, Acc#vacc{resp = Resp1}};
 dbs_info_callback({row, Props}, #vacc{resp = Resp0} = Acc) ->
-    Prepend = couch_mrview_http:prepend_val(Acc),
+    Prepend = couch_views_http_util:prepend_val(Acc),
     Chunk = [Prepend, ?JSON_ENCODE({Props})],
     {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, Chunk),
     {ok, Acc#vacc{prepend = ",", resp = Resp1}};
diff --git a/src/chttpd/src/chttpd_show.erl b/src/chttpd/src/chttpd_show.erl
index b17309a..9fda7ff 100644
--- a/src/chttpd/src/chttpd_show.erl
+++ b/src/chttpd/src/chttpd_show.erl
@@ -15,7 +15,7 @@
 -export([handle_doc_update_req/3]).
 
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 
 
 maybe_open_doc(Db, DocId, Options) ->
diff --git a/src/chttpd/src/chttpd_view.erl b/src/chttpd/src/chttpd_view.erl
index 8d40101..e0001da 100644
--- a/src/chttpd/src/chttpd_view.erl
+++ b/src/chttpd/src/chttpd_view.erl
@@ -12,7 +12,7 @@
 
 -module(chttpd_view).
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 
 -export([
     handle_view_req/3,
@@ -35,10 +35,10 @@ multi_query_view(Req, Db, DDoc, ViewName, Queries) ->
 
 
 stream_multi_query_view(Req, Db, DDoc, ViewName, Args0, Queries) ->
-    {ok, #mrst{views=Views}} = couch_mrview_util:ddoc_to_mrst(Db, DDoc),
-    Args1 = couch_mrview_util:set_view_type(Args0, ViewName, Views),
+    {ok, #mrst{views=Views}} = couch_views_util:ddoc_to_mrst(Db, DDoc),
+    Args1 = couch_views_util:set_view_type(Args0, ViewName, Views),
     ArgQueries = parse_queries(Req, Args1, Queries, fun(QueryArg) ->
-        couch_mrview_util:set_view_type(QueryArg, ViewName, Views)
+        couch_views_util:set_view_type(QueryArg, ViewName, Views)
     end),
     VAcc0 = #vacc{db=Db, req=Req, prepend="\r\n"},
     FirstChunk = "{\"results\":[",
@@ -54,9 +54,9 @@ stream_multi_query_view(Req, Db, DDoc, ViewName, Args0, Queries) ->
 
 
 paginate_multi_query_view(Req, Db, DDoc, ViewName, Args0, Queries) ->
-    {ok, #mrst{views=Views}} = couch_mrview_util:ddoc_to_mrst(Db, DDoc),
+    {ok, #mrst{views=Views}} = couch_views_util:ddoc_to_mrst(Db, DDoc),
     ArgQueries = parse_queries(Req, Args0, Queries, fun(QueryArg) ->
-        couch_mrview_util:set_view_type(QueryArg, ViewName, Views)
+        couch_views_util:set_view_type(QueryArg, ViewName, Views)
     end),
     KeyFun = fun({Props}) ->
         {couch_util:get_value(id, Props), couch_util:get_value(key, Props)}
@@ -76,7 +76,7 @@ paginate_multi_query_view(Req, Db, DDoc, ViewName, Args0, Queries) ->
 
 
 design_doc_post_view(Req, Props, Db, DDoc, ViewName, Keys) ->
-    Args = couch_mrview_http:parse_body_and_query(Req, Props, Keys),
+    Args = couch_views_http_util:parse_body_and_query(Req, Props, Keys),
     fabric_query_view(Db, Req, DDoc, ViewName, Args).
 
 design_doc_view(Req, Db, DDoc, ViewName, Keys) ->
@@ -134,7 +134,7 @@ handle_view_req(#httpd{method='POST',
     path_parts=[_, _, _, _, ViewName, <<"queries">>]}=Req, Db, DDoc) ->
     chttpd:validate_ctype(Req, "application/json"),
     Props = couch_httpd:json_body_obj(Req),
-    case couch_mrview_util:get_view_queries(Props) of
+    case couch_views_util:get_view_queries(Props) of
         undefined ->
             throw({bad_request,
                 <<"POST body must include `queries` parameter.">>});
@@ -156,8 +156,8 @@ handle_view_req(#httpd{method='POST',
         path_parts=[_, _, _, _, ViewName]}=Req, Db, DDoc) ->
     chttpd:validate_ctype(Req, "application/json"),
     Props = couch_httpd:json_body_obj(Req),
-    assert_no_queries_param(couch_mrview_util:get_view_queries(Props)),
-    Keys = couch_mrview_util:get_view_keys(Props),
+    assert_no_queries_param(couch_views_util:get_view_queries(Props)),
+    Keys = couch_views_util:get_view_keys(Props),
     couch_stats:increment_counter([couchdb, httpd, view_reads]),
     design_doc_post_view(Req, Props, Db, DDoc, ViewName, Keys);
 
@@ -299,7 +299,7 @@ t_check_user_can_override_individual_query_type() ->
 
 setup_all() ->
     Views = [#mrview{reduce_funs = [{<<"v">>, <<"_count">>}]}],
-    meck:expect(couch_mrview_util, ddoc_to_mrst, 2, {ok, #mrst{views = Views}}),
+    meck:expect(couch_views_util, ddoc_to_mrst, 2, {ok, #mrst{views = Views}}),
     meck:expect(chttpd, start_delayed_json_response, 4, {ok, resp}),
     meck:expect(couch_views, query, 6, {ok, #vacc{}}),
     meck:expect(chttpd, send_delayed_chunk, 2, {ok, resp}),
@@ -314,7 +314,7 @@ setup() ->
     meck:reset([
         chttpd,
         couch_views,
-        couch_mrview_util
+        couch_views_util
     ]).
 
 
diff --git a/src/couch_replicator/src/couch_replicator_api_wrap.erl b/src/couch_replicator/src/couch_replicator_api_wrap.erl
index 1df8ee0..17e5bf2 100644
--- a/src/couch_replicator/src/couch_replicator_api_wrap.erl
+++ b/src/couch_replicator/src/couch_replicator_api_wrap.erl
@@ -19,7 +19,7 @@
 % Many options and apis aren't yet supported here, they are added as needed.
 
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 -include("couch_replicator_api_wrap.hrl").
 
 -export([
diff --git a/src/couch_views/src/couch_views.erl b/src/couch_views/src/couch_views.erl
index 179e2b3..5804db0 100644
--- a/src/couch_views/src/couch_views.erl
+++ b/src/couch_views/src/couch_views.erl
@@ -26,7 +26,6 @@
 ]).
 
 -include("couch_views.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
 
 
 query(Db, DDoc, ViewName, Callback, Acc0, Args0) ->
@@ -46,8 +45,8 @@ query(Db, DDoc, ViewName, Callback, Acc0, Args0) ->
     } = Mrst,
 
     Args1 = to_mrargs(Args0),
-    Args2 = couch_mrview_util:set_view_type(Args1, ViewName, Views),
-    Args3 = couch_mrview_util:validate_args(Args2),
+    Args2 = couch_views_util:set_view_type(Args1, ViewName, Views),
+    Args3 = couch_views_validate:validate_args(Args2),
     ok = check_range(Mrst, ViewName, Args3),
 
     try
@@ -199,7 +198,7 @@ check_range(Mrst, ViewName, Args) ->
         language = Lang,
         views = Views
     } = Mrst,
-    View = case couch_mrview_util:extract_view(Lang, Args, ViewName, Views) of
+    View = case couch_views_util:extract_view(Lang, Args, ViewName, Views) of
         {map, V, _} -> V;
         {red, {_, _, V}, _} -> V
     end,
diff --git a/src/couch_views/src/couch_views_batch.erl b/src/couch_views/src/couch_views_batch.erl
index ba2a227..555eac9 100644
--- a/src/couch_views/src/couch_views_batch.erl
+++ b/src/couch_views/src/couch_views_batch.erl
@@ -20,7 +20,7 @@
 ]).
 
 
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 
 -type update_stats() :: #{
     docs_read => non_neg_integer(),
diff --git a/src/couch_views/src/couch_views_batch_impl.erl b/src/couch_views/src/couch_views_batch_impl.erl
index 9b3a4ad..d17b5b1 100644
--- a/src/couch_views/src/couch_views_batch_impl.erl
+++ b/src/couch_views/src/couch_views_batch_impl.erl
@@ -22,7 +22,7 @@
 ]).
 
 
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 
 
 -record(batch_st, {
diff --git a/src/couch_views/src/couch_views_fdb.erl b/src/couch_views/src/couch_views_fdb.erl
index b0fb82e..d8c9813 100644
--- a/src/couch_views/src/couch_views_fdb.erl
+++ b/src/couch_views/src/couch_views_fdb.erl
@@ -38,7 +38,6 @@
 
 
 -include("couch_views.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
 -include_lib("fabric/include/fabric2.hrl").
 
 
diff --git a/src/couch_views/src/couch_views_http.erl b/src/couch_views/src/couch_views_http.erl
index 769d8c3..67e2a77 100644
--- a/src/couch_views/src/couch_views_http.erl
+++ b/src/couch_views/src/couch_views_http.erl
@@ -13,7 +13,7 @@
 -module(couch_views_http).
 
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 
 -export([
     parse_body_and_query/2,
@@ -24,7 +24,8 @@
     row_to_obj/2,
     view_cb/2,
     paginated/5,
-    paginated/6
+    paginated/6,
+    transform_row/1
 ]).
 
 -define(BOOKMARK_VSN, 1).
@@ -63,7 +64,7 @@ parse_params(Props, Keys, #mrargs{}=Args, Options) ->
         _ ->
             throw({bad_request, "Cannot use `bookmark` with other options"})
     end,
-    couch_mrview_http:parse_params(Props, Keys, Args, Options).
+    couch_views_http_util:parse_params(Props, Keys, Args, Options).
 
 
 row_to_obj(Row) ->
@@ -72,11 +73,11 @@ row_to_obj(Row) ->
 
 
 row_to_obj(Id, Row) ->
-    couch_mrview_http:row_to_obj(Id, Row).
+    couch_views_http_util:row_to_obj(Id, Row).
 
 
 view_cb(Msg, #vacc{paginated = false}=Acc) ->
-    couch_mrview_http:view_cb(Msg, Acc);
+    couch_views_http_util:view_cb(Msg, Acc);
 view_cb(Msg, #vacc{paginated = true}=Acc) ->
     paginated_cb(Msg, Acc).
 
@@ -279,6 +280,25 @@ mask_to_index(Mask, Pos, Acc) when is_integer(Mask), Mask > 0 ->
     mask_to_index(Mask bsr 1, Pos + 1, NewAcc).
 
 
+transform_row(#view_row{value={[{reduce_overflow_error, Msg}]}}) ->
+    {row, [{key,null}, {id,error}, {value,reduce_overflow_error}, {reason,Msg}]};
+
+transform_row(#view_row{key=Key, id=reduced, value=Value}) ->
+    {row, [{key,Key}, {value,Value}]};
+
+transform_row(#view_row{key=Key, id=undefined}) ->
+    {row, [{key,Key}, {id,error}, {value,not_found}]};
+
+transform_row(#view_row{key=Key, id=Id, value=Value, doc=undefined}) ->
+    {row, [{id,Id}, {key,Key}, {value,Value}]};
+
+transform_row(#view_row{key=Key, id=_Id, value=_Value, doc={error,Reason}}) ->
+    {row, [{id,error}, {key,Key}, {value,Reason}]};
+
+transform_row(#view_row{key=Key, id=Id, value=Value, doc=Doc}) ->
+    {row, [{id,Id}, {key,Key}, {value,Value}, {doc,Doc}]}.
+
+
 -ifdef(TEST).
 
 -include_lib("eunit/include/eunit.hrl").
@@ -351,4 +371,4 @@ check_completion_test() ->
         check_completion(2, 3, [1, 2, 3, 4, 5])
     ),
     ok.
--endif.
\ No newline at end of file
+-endif.
diff --git a/src/couch_views/src/couch_views_indexer.erl b/src/couch_views/src/couch_views_indexer.erl
index df53797..88b1ff6 100644
--- a/src/couch_views/src/couch_views_indexer.erl
+++ b/src/couch_views/src/couch_views_indexer.erl
@@ -30,7 +30,6 @@
 
 -include("couch_views.hrl").
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
 -include_lib("fabric/include/fabric2.hrl").
 
 
diff --git a/src/couch_views/src/couch_views_jobs.erl b/src/couch_views/src/couch_views_jobs.erl
index 022d27c..17f0118 100644
--- a/src/couch_views/src/couch_views_jobs.erl
+++ b/src/couch_views/src/couch_views_jobs.erl
@@ -26,7 +26,6 @@
 -endif.
 
 
--include_lib("couch_mrview/include/couch_mrview.hrl").
 -include("couch_views.hrl").
 
 
diff --git a/src/couch_views/src/couch_views_reader.erl b/src/couch_views/src/couch_views_reader.erl
index 0fc910f..ae7a3c3 100644
--- a/src/couch_views/src/couch_views_reader.erl
+++ b/src/couch_views/src/couch_views_reader.erl
@@ -19,7 +19,6 @@
 
 -include("couch_views.hrl").
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
 -include_lib("fabric/include/fabric2.hrl").
 
 
@@ -245,14 +244,14 @@ maybe_finalize(Finalizer, Red) ->
 
 
 get_map_view(Lang, Args, ViewName, Views) ->
-    case couch_mrview_util:extract_view(Lang, Args, ViewName, Views) of
+    case couch_views_util:extract_view(Lang, Args, ViewName, Views) of
         {map, View, _Args} -> View;
         {red, {_Idx, _Lang, View}, _} -> View
     end.
 
 
 get_red_view(Lang, Args, ViewName, Views) ->
-    case couch_mrview_util:extract_view(Lang, Args, ViewName, Views) of
+    case couch_views_util:extract_view(Lang, Args, ViewName, Views) of
         {red, {Idx, Lang, View}, _} -> check_red_enabled({Idx, Lang, View});
         _ -> throw({not_found, missing_named_view})
     end.
diff --git a/src/couch_views/src/couch_views_trees.erl b/src/couch_views/src/couch_views_trees.erl
index 51c1e46..9aafbb2 100644
--- a/src/couch_views/src/couch_views_trees.erl
+++ b/src/couch_views/src/couch_views_trees.erl
@@ -32,7 +32,6 @@
 
 
 -include("couch_views.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
 -include_lib("fabric/include/fabric2.hrl").
 
 
diff --git a/src/couch_views/src/couch_views_updater.erl b/src/couch_views/src/couch_views_updater.erl
index 7e5466e..defdb6b 100644
--- a/src/couch_views/src/couch_views_updater.erl
+++ b/src/couch_views/src/couch_views_updater.erl
@@ -17,7 +17,7 @@
 
 
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 
 % If the doc revision doesn't not match the NewRevId passed here we can ignore
 % the document since it is then a conflict document and it doesn't need
@@ -52,7 +52,7 @@ index_int(Db, #doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>,
 
     case couch_views_ddoc:is_interactive(DDoc) of
         true ->
-            {ok, Mrst} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
+            {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
             case couch_views_fdb:get_creation_vs(Db, Mrst) of
                 not_found ->
                     couch_views_fdb:new_interactive_index(Db, Mrst, Seq),
@@ -87,7 +87,7 @@ write_doc(Db, #doc{deleted = Deleted} = Doc) ->
     },
 
     lists:foreach(fun(DDoc) ->
-        {ok, Mrst0} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
+        {ok, Mrst0} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
         Mrst1 = couch_views_trees:open(Db, Mrst0),
 
         case should_index_doc(Doc, Mrst1) of
diff --git a/src/fabric/src/fabric2_db.erl b/src/fabric/src/fabric2_db.erl
index aab80a8..0074865 100644
--- a/src/fabric/src/fabric2_db.erl
+++ b/src/fabric/src/fabric2_db.erl
@@ -2144,7 +2144,7 @@ validate_doc_update(Db, Doc, PrevDoc) ->
 
 validate_ddoc(Db, DDoc) ->
     try
-        ok = couch_mrview:validate(Db, couch_doc:with_ejson_body(DDoc))
+        ok = couch_views_validate:validate_ddoc(Db, DDoc)
     catch
         throw:{invalid_design_doc, Reason} ->
             throw({bad_request, invalid_design_doc, Reason});
diff --git a/src/fabric/src/fabric2_util.erl b/src/fabric/src/fabric2_util.erl
index 136762b..7c21252 100644
--- a/src/fabric/src/fabric2_util.erl
+++ b/src/fabric/src/fabric2_util.erl
@@ -50,7 +50,7 @@
 
 
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 
 
 revinfo_to_revs(RevInfo) ->
diff --git a/src/mango/src/mango_cursor_special.erl b/src/mango/src/mango_cursor_special.erl
index df1f6d6..33a1f8c 100644
--- a/src/mango/src/mango_cursor_special.erl
+++ b/src/mango/src/mango_cursor_special.erl
@@ -24,7 +24,7 @@
 
 
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 -include("mango_cursor.hrl").
 
 
diff --git a/src/mango/src/mango_cursor_view.erl b/src/mango/src/mango_cursor_view.erl
index 43a59ff..992e9cf 100644
--- a/src/mango/src/mango_cursor_view.erl
+++ b/src/mango/src/mango_cursor_view.erl
@@ -27,9 +27,7 @@
 
 
 -include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
--include_lib("fabric/include/fabric.hrl").
-
+-include_lib("couch_views/include/couch_views.hrl").
 -include("mango_cursor.hrl").
 -include("mango_idx_view.hrl").
 
diff --git a/src/mango/src/mango_idx.erl b/src/mango/src/mango_idx.erl
index 7108ae6..e27f327edf 100644
--- a/src/mango/src/mango_idx.erl
+++ b/src/mango/src/mango_idx.erl
@@ -60,7 +60,7 @@ list(Db) ->
 
         case proplists:get_value(<<"language">>, Props) == <<"query">> of
             true ->
-                {ok, Mrst} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
+                {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
 
                 IsInteractive = couch_views_ddoc:is_interactive(DDoc),
                 BuildState = couch_views_fdb:get_build_status(Db, Mrst),
diff --git a/src/mango/src/mango_json_bookmark.erl b/src/mango/src/mango_json_bookmark.erl
index 83fd00f..b60ecdb 100644
--- a/src/mango/src/mango_json_bookmark.erl
+++ b/src/mango/src/mango_json_bookmark.erl
@@ -19,7 +19,7 @@
 ]).
 
 
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
 -include("mango_cursor.hrl").
 -include("mango.hrl").
 

[couchdb] 24/24: Clean up Makefiles and start running all the unit tests

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 32179742424388ef3cd0a088aa93d9c12099814f
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Apr 14 03:05:29 2021 -0400

    Clean up Makefiles and start running all the unit tests
    
     * Remove non-existent applications
    
     * Most importantly, start running all the unit test. This should include 500+ new couch tests.
    
     * Noticed `local` application was flaky and periodically timing out in CI.
       Since it's a transitive dependency of jaeger, let's skip running it for now.
       It's a bit in the same category as brcypt, meck and hyper.
---
 Makefile     | 6 +++---
 Makefile.win | 2 +-
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/Makefile b/Makefile
index a748b2c..7914118 100644
--- a/Makefile
+++ b/Makefile
@@ -72,7 +72,7 @@ DESTDIR=
 
 # Rebar options
 apps=
-skip_deps=folsom,meck,mochiweb,triq,proper,snappy,bcrypt,hyper
+skip_deps=folsom,meck,mochiweb,proper,bcrypt,hyper,local
 suites=
 tests=
 
@@ -160,9 +160,9 @@ endif
 .PHONY: check
 check:  all
 	@$(MAKE) emilio
-	@$(MAKE) eunit apps=couch_eval,couch_expiring_cache,ctrace,couch_jobs,couch_views,fabric,mango,chttpd,couch_replicator
+	@$(MAKE) eunit
 	@$(MAKE) elixir-suite
-	@$(MAKE) exunit apps=chttpd
+	@$(MAKE) exunit
 	@$(MAKE) mango-test
 
 .PHONY: eunit
diff --git a/Makefile.win b/Makefile.win
index aeb7fe7..5240da3 100644
--- a/Makefile.win
+++ b/Makefile.win
@@ -76,7 +76,7 @@ DESTDIR=
 
 # Rebar options
 apps=
-skip_deps=folsom,meck,mochiweb,triq,proper,snappy,bcrypt,hyper
+skip_deps=folsom,meck,mochiweb,proper,bcrypt,hyper
 suites=
 tests=
 

[couchdb] 18/24: Remove mem3_sync:get_backlog/0 call from stats in chttpd_node

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 5b3983994bf293cd8fb50b7bcc93baa5d3d800dc
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Apr 14 02:40:42 2021 -0400

    Remove mem3_sync:get_backlog/0 call from stats in chttpd_node
---
 src/chttpd/src/chttpd_node.erl | 1 -
 1 file changed, 1 deletion(-)

diff --git a/src/chttpd/src/chttpd_node.erl b/src/chttpd/src/chttpd_node.erl
index e36380a..54e0e48 100644
--- a/src/chttpd/src/chttpd_node.erl
+++ b/src/chttpd/src/chttpd_node.erl
@@ -199,7 +199,6 @@ get_stats() ->
         {process_count, erlang:system_info(process_count)},
         {process_limit, erlang:system_info(process_limit)},
         {message_queues, {MessageQueues}},
-        {internal_replication_jobs, mem3_sync:get_backlog()},
         {distribution, {get_distribution_stats()}}
     ].
 

[couchdb] 15/24: Clean up couch_db.hrl

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 7b83445139a6dc8c4b953a1c5879d8cb8b025fea
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Apr 14 01:58:18 2021 -0400

    Clean up couch_db.hrl
    
    Un-used defines and records are removed
---
 src/chttpd/src/chttpd_changes.erl |  4 +---
 src/couch/include/couch_db.hrl    | 44 ---------------------------------------
 2 files changed, 1 insertion(+), 47 deletions(-)

diff --git a/src/chttpd/src/chttpd_changes.erl b/src/chttpd/src/chttpd_changes.erl
index 8bf33ec..fcaee92 100644
--- a/src/chttpd/src/chttpd_changes.erl
+++ b/src/chttpd/src/chttpd_changes.erl
@@ -530,9 +530,7 @@ send_lookup_changes(FullDocInfos, StartSeq, Dir, Db, Fun, Acc0) ->
         fwd ->
             FinalAcc0 = case element(1, FinalAcc) of
                 changes_acc -> % we came here via couch_http or internal call
-                    FinalAcc#changes_acc{seq = fabric2_db:get_update_seq(Db)};
-                fabric_changes_acc -> % we came here via chttpd / fabric / rexi
-                    FinalAcc#fabric_changes_acc{seq = couch_db:get_update_seq(Db)}
+                    FinalAcc#changes_acc{seq = fabric2_db:get_update_seq(Db)}
             end,
             {ok, FinalAcc0};
         rev -> {ok, FinalAcc}
diff --git a/src/couch/include/couch_db.hrl b/src/couch/include/couch_db.hrl
index cc1fb5d..2289089 100644
--- a/src/couch/include/couch_db.hrl
+++ b/src/couch/include/couch_db.hrl
@@ -13,18 +13,13 @@
 -define(LOCAL_DOC_PREFIX, "_local/").
 -define(DESIGN_DOC_PREFIX0, "_design").
 -define(DESIGN_DOC_PREFIX, "_design/").
--define(DEFAULT_COMPRESSION, snappy).
 
 -define(MIN_STR, <<"">>).
 -define(MAX_STR, <<255>>). % illegal utf string
 
--define(REWRITE_COUNT, couch_rewrite_count).
-
 -define(JSON_ENCODE(V), couch_util:json_encode(V)).
 -define(JSON_DECODE(V), couch_util:json_decode(V)).
 
--define(IS_OLD_RECORD(V, R), (tuple_size(V) /= tuple_size(R))).
-
 -define(b2l(V), binary_to_list(V)).
 -define(l2b(V), list_to_binary(V)).
 -define(i2b(V), couch_util:integer_to_boolean(V)).
@@ -39,7 +34,6 @@
 
 -define(SYSTEM_DATABASES, [
     <<"_dbs">>,
-    <<"_global_changes">>,
     <<"_metadata">>,
     <<"_nodes">>,
     <<"_replicator">>,
@@ -128,18 +122,6 @@
     handler
 }).
 
--record(view_fold_helper_funs, {
-    reduce_count,
-    passed_end,
-    start_response,
-    send_row
-}).
-
--record(reduce_fold_helper_funs, {
-    start_response,
-    send_row
-}).
-
 -record(extern_resp_args, {
     code = 200,
     stop = false,
@@ -149,13 +131,6 @@
     json = nil
 }).
 
--record(index_header, {
-    seq=0,
-    purge_seq=0,
-    id_btree_state=nil,
-    view_states=nil
-}).
-
 % small value used in revision trees to indicate the revision isn't stored
 -define(REV_MISSING, []).
 
@@ -176,16 +151,6 @@
     db_open_options = []
 }).
 
--record(btree, {
-    fd,
-    root,
-    extract_kv,
-    assemble_kv,
-    less,
-    reduce = nil,
-    compression = ?DEFAULT_COMPRESSION
-}).
-
 -record(proc, {
     pid,
     lang,
@@ -204,15 +169,6 @@
     atts = []
 }).
 
--record (fabric_changes_acc, {
-    db,
-    seq,
-    args,
-    options,
-    pending,
-    epochs
-}).
-
 -type doc() :: #doc{}.
 -type ddoc() :: #doc{}.
 -type user_ctx() :: #user_ctx{}.