You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by ro...@apache.org on 2023/05/15 19:36:23 UTC

[couchdb] branch align-makefiles updated (6705ba381 -> be9b8e96a)

This is an automated email from the ASF dual-hosted git repository.

ronny pushed a change to branch align-makefiles
in repository https://gitbox.apache.org/repos/asf/couchdb.git


 discard 6705ba381 Add a simple fabric benchmark
     new be9b8e96a Add a simple fabric benchmark

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (6705ba381)
            \
             N -- N -- N   refs/heads/align-makefiles (be9b8e96a)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 Makefile.win | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)


[couchdb] 01/01: Add a simple fabric benchmark

Posted by ro...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ronny pushed a commit to branch align-makefiles
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit be9b8e96a6b3bebfed15178f79bdafe4af9f2d9c
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Fri May 12 18:25:29 2023 -0400

    Add a simple fabric benchmark
    
    This is mostly a diagnostic tool in the spirit of couch_debug. It creates a
    database, fills it with some docs, and then tries to read them. It computes
    rough expected rates for doc operations: how many docs per second it could
    insert, read, get via _all_docs, etc. When the test is done, it deletes the
    database. If it crashes, it also deletes the database. If someone brutally
    kills it, the subsequent runs will still find old databases and delete them.
    
    To run a benchmark:
    ```
      fabric_bench:go().
    ```
    
    Pass parameters as a map:
    ```
      fabric_bench:go(#{doc_size=>large, docs=>25000}).
    ```
    
    To get available options:
    ```
      fabric_bench:opts()
    ```
---
 Makefile                                    |   7 +-
 Makefile.win                                |  70 +++++-
 src/fabric/src/fabric_bench.erl             | 371 ++++++++++++++++++++++++++++
 src/fabric/test/eunit/fabric_bench_test.erl |  79 ++++++
 4 files changed, 512 insertions(+), 15 deletions(-)

diff --git a/Makefile b/Makefile
index 183afd5e4..ecd3e76c3 100644
--- a/Makefile
+++ b/Makefile
@@ -418,14 +418,12 @@ ifeq ($(with_fauxton), 1)
 endif
 
 ifeq ($(with_docs), 1)
-ifeq ($(IN_RELEASE), true)
 	@mkdir -p rel/couchdb/share/www/docs/
 	@mkdir -p rel/couchdb/share/docs/
+ifeq ($(IN_RELEASE), true)
 	@cp -R share/docs/html/* rel/couchdb/share/www/docs/
 	@cp share/docs/man/apachecouchdb.1 rel/couchdb/share/docs/couchdb.1
 else
-	@mkdir -p rel/couchdb/share/www/docs/
-	@mkdir -p rel/couchdb/share/docs/
 	@cp -R src/docs/build/html/ rel/couchdb/share/www/docs
 	@cp src/docs/build/man/apachecouchdb.1 rel/couchdb/share/docs/couchdb.1
 endif
@@ -546,13 +544,14 @@ derived:
 ################################################################################
 
 .PHONY: nouveau
-# Build nouveau
+# target: nouveau - Build nouveau
 nouveau:
 ifeq ($(with_nouveau), 1)
 	@cd nouveau && ./gradlew build -x test
 endif
 
 .PHONY: nouveau-test
+# target: nouveau-test - Run nouveau tests
 nouveau-test: nouveau-test-gradle nouveau-test-elixir
 
 .PHONY: nouveau-test-gradle
diff --git a/Makefile.win b/Makefile.win
index 418a14515..542b7a7d8 100644
--- a/Makefile.win
+++ b/Makefile.win
@@ -78,7 +78,7 @@ DESTDIR=
 
 # Rebar options
 apps=
-skip_deps=folsom,meck,mochiweb,triq,proper,snappy,bcrypt,hyper,ibrowse,local
+skip_deps=folsom,meck,mochiweb,triq,proper,snappy,bcrypt,hyper,ibrowse
 suites=
 tests=
 
@@ -98,7 +98,7 @@ TEST_OPTS=-c startup_jitter=0 -c default_security=admin_local
 
 .PHONY: all
 # target: all - Build everything
-all: couch fauxton docs
+all: couch fauxton docs nouveau
 
 
 .PHONY: help
@@ -140,9 +140,11 @@ fauxton: share\www
 .PHONY: check
 # target: check - Test everything
 check: all
+	@$(MAKE) exunit
 	@$(MAKE) eunit
 	@$(MAKE) mango-test
 	@$(MAKE) elixir-suite
+	@$(MAKE) nouveau-test
 
 ifdef apps
 subdirs = $(apps)
@@ -167,8 +169,9 @@ exunit: export MIX_ENV=test
 exunit: export ERL_LIBS = $(shell echo %cd%)/src
 exunit: export ERL_AFLAGS = -config $(shell echo "%cd%")/rel/files/eunit.config
 exunit: export COUCHDB_QUERY_SERVER_JAVASCRIPT = $(shell echo %cd%)/bin/couchjs $(shell echo %cd%)/share/server/main.js
-exunit: couch elixir-init setup-eunit elixir-check-formatted elixir-credo
-	@mix test --cover --trace $(EXUNIT_OPTS)
+exunit: export COUCHDB_TEST_ADMIN_PARTY_OVERRIDE=1
+exunit: couch elixir-init setup-eunit
+	@mix test --trace $(EXUNIT_OPTS)
 
 setup-eunit: export BUILDDIR = $(shell echo %cd%)
 setup-eunit: export ERL_AFLAGS = -config $(shell echo "%cd%")/rel/files/eunit.config
@@ -215,26 +218,27 @@ python-black-update: .venv/bin/black
 .PHONY: elixir
 elixir: export MIX_ENV=integration
 elixir: export COUCHDB_TEST_ADMIN_PARTY_OVERRIDE=1
-elixir: elixir-init elixir-check-formatted elixir-credo devclean
-	@dev\run $(TEST_OPTS) -a adm:pass -n 1 --enable-erlang-views \
+elixir: elixir-init devclean
+	@dev\run $(TEST_OPTS) -a adm:pass -n 1 \
+	  --enable-erlang-views \
       --locald-config test/elixir/test/config/test-config.ini \
       --no-eval 'mix test --trace --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)'
 
 .PHONY: elixir-init
-elixir-init: MIX_ENV=test
+elixir-init: MIX_ENV=integration
 elixir-init: config.erl
 	@mix local.rebar --force && mix local.hex --force && mix deps.get
 
 .PHONY: elixir-cluster-without-quorum
 elixir-cluster-without-quorum: export MIX_ENV=integration
-elixir-cluster-without-quorum: elixir-init elixir-check-formatted elixir-credo devclean
+elixir-cluster-without-quorum: elixir-init devclean
 	@dev\run -n 3 -q -a adm:pass \
 	    --degrade-cluster 2 \
         --no-eval 'mix test --trace --only without_quorum_test $(EXUNIT_OPTS)'
 
 .PHONY: elixir-cluster-with-quorum
 elixir-cluster-with-quorum: export MIX_ENV=integration
-elixir-cluster-with-quorum: elixir-init elixir-check-formatted elixir-credo devclean
+elixir-cluster-with-quorum: elixir-init devclean
 	@dev\run -n 3 -q -a adm:pass \
 	    --degrade-cluster 1 \
 		--no-eval 'mix test --trace --only with_quorum_test $(EXUNIT_OPTS)'
@@ -269,7 +273,11 @@ elixir-source-checks: elixir-init
 .PHONY: build-report
 # target: build-report - Generate a build report
 build-report:
-	@$(PYTHON) build-aux/show-test-results.py --suites=10 --tests=10 > test-results.log
+	@$(PYTHON) build-aux/show-test-results.py --suites=10 --tests=10 > test-results.log || true
+	cat .\dev\logs\node1.log || true
+    cat .\dev\logs\nouveau.log || true
+    cat .\tmp\couch.log || true
+    cat test-results.log || true
 
 .PHONY: check-qs
 # target: check-qs - Run query server tests (ruby and rspec required!)
@@ -293,7 +301,7 @@ list-eunit-suites:
 mango-test: export COUCHDB_TEST_ADMIN_PARTY_OVERRIDE=1
 mango-test: devclean all
 	@cd src\mango && \
-		python.exe -m venv .venv && \
+		@$(PYTHON) -m venv .venv && \
 		.venv\Scripts\pip.exe install -r requirements.txt
 	@cd src\mango && \
 		..\..\dev\run $(TEST_OPTS) \
@@ -383,6 +391,12 @@ else
 endif
 endif
 
+ifeq ($(with_nouveau), 1)
+	-@mkdir -p rel\couchdb\nouveau
+	@cp nouveau\build\libs\server-*-dist.jar rel\couchdb\nouveau\
+	@cp nouveau\nouveau.yaml rel\couchdb\nouveau\
+endif
+
 	@echo ... done
 	@echo .
 	@echo     You can now copy the rel\couchdb directory anywhere on your system.
@@ -423,6 +437,9 @@ clean:
 	-@rmdir /s/q src\mango\.venv
 	-@del /f/q src\couch\priv\couch_js\config.h
 	-@del /f/q dev\boot_node.beam dev\pbkdf2.pyc log\crash.log
+ifeq ($(with_nouveau), 1)
+	@cd nouveau && ./gradlew clean
+endif
 
 
 .PHONY: distclean
@@ -489,3 +506,34 @@ derived:
 	@echo "ON_TAG:                 $(ON_TAG)"
 	@echo "REL_TAG:                $(REL_TAG)"
 	@echo "SUB_VSN:                $(SUB_VSN)"
+
+################################################################################
+# Nouveau
+################################################################################
+
+.PHONY: nouveau
+# target: nouveau - Build nouveau
+nouveau:
+ifeq ($(with_nouveau), 1)
+	@cd nouveau && ./gradlew build -x test
+endif
+
+.PHONY: nouveau-test
+# target: nouveau-test - Run nouveau tests
+nouveau-test: nouveau-test-gradle nouveau-test-elixir
+
+.PHONY: nouveau-test-gradle
+nouveau-test-gradle: couch nouveau
+ifeq ($(with_nouveau), 1)
+	@cd nouveau && ./gradlew test
+endif
+
+.PHONY: nouveau-test-elixir
+nouveau-test-elixir: export MIX_ENV=integration
+nouveau-test-elixir: elixir-init devclean
+nouveau-test-elixir: couch nouveau
+ifeq ($(with_nouveau), 1)
+	@dev/run -n 1 -q -a adm:pass --with-nouveau \
+		--locald-config test/config/test-config.ini \
+		--no-eval 'mix test --trace --include test/elixir/test/config/nouveau.elixir'
+endif
diff --git a/src/fabric/src/fabric_bench.erl b/src/fabric/src/fabric_bench.erl
new file mode 100644
index 000000000..f00e521f9
--- /dev/null
+++ b/src/fabric/src/fabric_bench.erl
@@ -0,0 +1,371 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_bench).
+
+-export([
+    opts/0,
+    go/0,
+    go/1,
+    doc/2,
+    body/1,
+    delete_old_dbs/0
+]).
+
+-define(VERSION, "1").
+-define(PREFIX, "fabricbenchdb-").
+-define(MAX_DB_AGE_USEC, (60 * 60 * 8 * 1000000)).
+
+opts() ->
+    #{
+        q => default,
+        n => default,
+        % Doc size type: small, medium or large
+        doc_size => medium,
+        % How many total docs to insert using _bulk_docs. These are the
+        % docs used for reads and streaming bechmarks
+        docs => 100000,
+        % Batch size used for _bulk_docs insertion
+        batch_size => 1000,
+        % How many individual doc updates to do
+        individual_docs => 1000
+    }.
+
+go() ->
+    go(#{}).
+
+go(#{} = Opts) ->
+    #{q := Q, n := N} = maps:merge(opts(), Opts),
+    QN = [{q, Q}, {n, N}],
+    DbOpts = [{K, V} || {K, V} <- QN, V =/= default],
+    ok = delete_old_dbs(),
+    Db = db_name(),
+    ok = fabric:create_db(Db, DbOpts),
+    Shards = disable_compaction(Db),
+    try
+        go(Db, Opts)
+    after
+        ok = fabric:delete_db(Db),
+        clear_compaction_settings(Shards)
+    end.
+
+go(Db, #{} = Opts0) when is_binary(Db) ->
+    Opts = maps:merge(opts(), Opts0),
+    #{
+        doc_size := DocSize,
+        docs := BulkDocs,
+        batch_size := BatchSize,
+        individual_docs := IndividualDocs
+    } = Opts,
+
+    log_opts(Opts),
+    log_environment_info(Db),
+
+    io:format("~n *** Inserting ~B docs~n", [BulkDocs]),
+    {T1, {Ok, Accepted}} = run(fun() -> bulk_docs(Db, DocSize, BatchSize, BulkDocs, {0, 0}) end),
+    log("Add ~p docs, ok:~p/accepted:~p", [BulkDocs, Ok, Accepted], T1, BulkDocs),
+
+    % Avoid a lagging internal replicator skewing test results, so wait
+    % for the backlog to clear.
+    wait_for_internal_replicator(),
+
+    {ok, N} = fabric:get_doc_count(Db),
+    case N =/= BulkDocs of
+        true -> throw({unexpected_doc_count, N, BulkDocs});
+        false -> ok
+    end,
+
+    {T2, _} = run(fun() ->
+        rand:seed(default, 0),
+        get_docs(Db, N, N)
+    end),
+    log("Get random doc ~pX", [N], T2, N),
+
+    {T3, N} = run(fun() -> get_all_docs(Db, []) end),
+    log("All docs", [], T3, N),
+
+    {T4, N} = run(fun() -> get_all_docs(Db, [{include_docs, true}]) end),
+    log("All docs w/ include_docs", [], T4, N),
+
+    {T5, N} = run(fun() -> get_changes(Db, []) end),
+    log("Changes", [], T5, N),
+
+    {T6, _} = run(fun() -> put_docs(Db, DocSize, IndividualDocs) end),
+    log("Single doc updates ~pX", [IndividualDocs], T6, IndividualDocs),
+
+    % Rough mixed ops/second read/write rate for the doc counts total docs.
+    % Exclude the final put_docs as that adds more documents
+    TSec = round((T1 + T2 + T3 + T5 + T6) / 1000000),
+    io:format(" * Time to run all benchmarks            (sec): ~7B~n", [TSec]),
+    ok.
+
+old_enough_db(Db) when is_binary(Db) ->
+    case string:prefix(Db, ?PREFIX) of
+        nomatch ->
+            false;
+        Ts when is_binary(Ts) ->
+            NowUSec = os:system_time(microsecond),
+            try binary_to_integer(Ts) of
+                AgeUSec when (NowUSec - AgeUSec) > ?MAX_DB_AGE_USEC ->
+                    true;
+                _ ->
+                    false
+            catch
+                _:_ ->
+                    false
+            end
+    end.
+
+delete_old_dbs() ->
+    {ok, Dbs} = fabric:all_dbs(),
+    [ok = fabric:delete_db(Db) || Db <- Dbs, old_enough_db(Db)],
+    ok.
+
+db_name() ->
+    Suffix = integer_to_binary(os:system_time(microsecond)),
+    <<?PREFIX, Suffix/binary>>.
+
+bulk_docs(_, _, _, BulkDocs, Acc = {_, _}) when BulkDocs =< 0 ->
+    Acc;
+bulk_docs(Db, DocSize, BatchSize, BulkDocs, {Ok, Accepted}) ->
+    DocCount = min(BatchSize, BulkDocs),
+    Docs = [doc(BulkDocs - I, DocSize) || I <- lists:seq(0, DocCount - 1)],
+    Acc1 =
+        case fabric:update_docs(Db, Docs, []) of
+            {ok, [_ | _]} -> {Ok + 1, Accepted};
+            {accepted, [_ | _]} -> {Ok, Accepted + 1};
+            Error -> throw({unexpected_bulk_get_error, Error})
+        end,
+    bulk_docs(Db, DocSize, BatchSize, BulkDocs - BatchSize, Acc1).
+
+put_docs(_, _, 0) ->
+    ok;
+put_docs(Db, DocSize, DocId) ->
+    {ok, _} = fabric:update_doc(Db, doc(random, DocSize), []),
+    put_docs(Db, DocSize, DocId - 1).
+
+get_docs(_, _, 0) ->
+    ok;
+get_docs(Db, DocCount, N) ->
+    ok = get_doc(Db, rand:uniform(DocCount)),
+    get_docs(Db, DocCount, N - 1).
+
+get_doc(Db, DocId) ->
+    DocIdBin = integer_to_binary(DocId),
+    case fabric:open_doc(Db, DocIdBin, []) of
+        {ok, Doc} ->
+            _ = couch_doc:to_json_obj(Doc, []),
+            ok;
+        {not_found, missing} ->
+            not_found
+    end.
+
+get_db_info(Db) ->
+    {ok, Info} = fabric:get_db_info(Db),
+    {CLInfo} = proplists:get_value(cluster, Info),
+    Q = proplists:get_value(q, CLInfo),
+    N = proplists:get_value(n, CLInfo),
+    [{q, Q}, {n, N}].
+
+get_all_docs(Db, Opts) ->
+    {ok, Acc} = fabric:all_docs(Db, [], fun all_docs_cb/2, 0, Opts),
+    Acc.
+
+all_docs_cb({row, Row}, Acc) ->
+    _ = jiffy:encode({Row}),
+    {ok, Acc + 1};
+all_docs_cb({meta, _}, Acc) ->
+    {ok, Acc};
+all_docs_cb(complete, Acc) ->
+    {ok, Acc}.
+
+get_changes(Db, Opts) ->
+    {ok, Acc} = fabric:changes(Db, fun changes_cb/2, 0, Opts),
+    Acc.
+
+changes_cb(start, Acc) ->
+    {ok, Acc};
+changes_cb({change, Row}, Acc) ->
+    _ = jiffy:encode(Row),
+    {ok, Acc + 1};
+changes_cb(timeout, Acc) ->
+    {ok, Acc};
+changes_cb({stop, _, _}, Acc) ->
+    {ok, Acc}.
+
+run(Fun) ->
+    T0 = ts(),
+    Res = fabric_util:isolate(Fun),
+    {ts() - T0, Res}.
+
+log_environment_info(Db) ->
+    [{q, Q}, {n, N}] = get_db_info(Db),
+    Nodes = length(all_nodes()),
+    {OsType, OsDetail} = os:type(),
+    CouchVersion = couch_server:get_version(),
+    GitSha = couch_server:get_git_sha(),
+    VmInfo = erlang:system_info(system_version),
+    io:format(
+        "~n *** Environment~n"
+        " * Nodes        : ~p~n"
+        " * Bench ver.   : ~s~n"
+        " * N            : ~p~n"
+        " * Q            : ~p~n"
+        " * OS           : ~p/~p~n"
+        " * Couch ver.   : ~s~n"
+        " * Couch git sha: ~s~n"
+        " * VM details   : ~s",
+        [Nodes, ?VERSION, N, Q, OsType, OsDetail, CouchVersion, GitSha, VmInfo]
+    ).
+
+log_opts(Opts) ->
+    io:format(" *** Parameters~n", []),
+    KVs = lists:sort(maps:to_list(Opts)),
+    [io:format(" * ~-16s : ~p~n", [K, V]) || {K, V} <- KVs],
+    ok.
+
+log(FStr, FArgs, T, N) ->
+    Str = lists:flatten(io_lib:format(FStr, FArgs)),
+    io:format(" * ~-38s (Hz): ~7B~n", [Str, hz(T, N)]).
+
+ts() ->
+    erlang:monotonic_time(microsecond).
+
+hz(Dt, Count) ->
+    % We are doing very rough numbers so emphasize it
+    % by rounding a few positions based on the total
+    % doc count number.
+    Hz = round(1000000 * Count / Dt),
+    Round =
+        if
+            Hz > 100000 -> 10000;
+            Hz > 10000 -> 1000;
+            Hz > 1000 -> 100;
+            Hz > 100 -> 10;
+            true -> 1
+        end,
+    round(Hz / Round) * Round.
+
+wait_for_internal_replicator() ->
+    case nodes() of
+        [] ->
+            ok;
+        [_ | _] ->
+            timer:sleep(4000),
+            Backlog = mem3_backlog(),
+            case Backlog > 0 of
+                true ->
+                    io:format("    ---  mem3_sync backlog: ~p~n", [Backlog]),
+                    wait_for_internal_replicator();
+                false ->
+                    timer:sleep(4000)
+            end
+    end.
+
+% This setting is not persisted so if node crashes it will be reset
+disable_compaction(Db) ->
+    Shards = mem3:shards(Db),
+    lists:foreach(
+        fun(S) ->
+            Name = binary_to_list(mem3:name(S)),
+            Node = mem3:node(S),
+            Args = ["smoosh.ignore", Name, "true", _Persist = false],
+            ok = erpc:call(Node, config, set, Args, 15000)
+        end,
+        Shards
+    ),
+    Shards.
+
+clear_compaction_settings([_ | _] = Shards) ->
+    lists:foreach(
+        fun(S) ->
+            Name = binary_to_list(mem3:name(S)),
+            Node = mem3:node(S),
+            Args = ["smoosh.ignore", Name, _Persist = false],
+            ok = erpc:call(Node, config, delete, Args, 15000)
+        end,
+        Shards
+    ),
+    ok.
+
+all_nodes() ->
+    [node() | nodes()].
+
+mem3_backlog() ->
+    Resps = erpc:multicall(all_nodes(), mem3_sync, get_backlog, []),
+    lists:foldl(fun({ok, Bl}, Acc) -> max(Bl, Acc) end, 0, Resps).
+
+doc(random, DocSize) ->
+    doc(rand:uniform(1 bsl 128), DocSize);
+doc(Id, DocSize) when is_integer(Id), is_atom(DocSize) ->
+    {[{<<"_id">>, integer_to_binary(Id)}] ++ body(DocSize)}.
+
+hexbin(Size) ->
+    binary:encode_hex(crypto:strong_rand_bytes(round(Size / 2))).
+
+body(small) ->
+    [
+        {<<"random_val">>, rand:uniform(1 bsl 50)},
+        {<<"worker_id">>, 72},
+        {<<"foo">>, <<"1209809812904880912">>},
+        {<<"bar">>, <<"asdfasdf">>},
+        {<<"baz">>, <<"eeefffwww">>},
+        {<<"blah">>, <<"lkqjwelkrjlqwejkrklqwjeklrjkl lkjasdflk jaslkdfj ">>},
+        {<<"num1">>, <<"123555123">>},
+        {<<"num2">>, <<"90812091289054">>},
+        {<<"biz">>, <<",zmxncv lkjaf qwerlkj">>},
+        {<<"zab">>, <<"zooooooob">>},
+        {<<"mtime">>, <<"1453234712345">>},
+        {<<"ctime">>, <<"1453234712345">>},
+        {<<"bool">>, false}
+    ];
+body(medium) ->
+    % From a random json generator with a few tweaks
+    [
+        {<<"random_val">>, rand:uniform(1 bsl 50)},
+        {<<"index">>, 0},
+        {<<"blob1">>, hexbin(1024)},
+        {<<"guid">>, <<"f9b39716-285a-4e9c-8574-790f42b9631e">>},
+        {<<"isActive">>, true},
+        {<<"name">>, <<"Abc Def Xyz">>},
+        {<<"company">>, <<"FOOCORPINCLLC">>},
+        {<<"about">>,
+            <<"Ad aute anim eiusmod consequat ullamco excepteur cupidatat. Sunt consectetur tempor culpa incididunt voluptate enim dolore ex ullamco occaecat irure consectetur anim. Incididunt sint do non exercitation culpa cupidatat.\r\n">>},
+        {<<"extra">>,
+            <<"Sit proident labore aliquip do duis irure eu esse quis dolore non qui anim minim. Commodo et pariatur Lorem commodo ea consequat. Excepteur tempor commodo voluptate sunt anim id est occaecat nostrud culpa magna dolor aliqua incididunt. Qui nisi occaecat qui velit minim do occaecat.\r\nExercitation amet ut ut et et elit consequat ex ea eiusmod incididunt. Incididunt laborum magna sit qui ex qui ullamco fugiat reprehenderit qui. Consequat nulla sit duis minim esse velit sint [...]
+        {<<"registered">>, <<"2016-06-05T12:41:48 +04:00">>},
+        {<<"latitude">>, 16.091562},
+        {<<"longitude">>, -83.309904},
+        {<<"tags">>, [
+            <<"esse irure eiusmod ad reprehenderit commodo Lorem fugiat nulla esse velit pariatur dolore et exercitation">>,
+            <<"consequat elit laboris labore laboris Lorem non enim non cillum eiusmod quis nisi culpa proident">>,
+            <<"esse cillum ex in incididunt adipisicing qui esse eiusmod consectetur tempor labore consequat excepteur incididunt">>,
+            <<"occaecat occaecat non reprehenderit reprehenderit nulla mollit et minim velit fugiat elit occaecat cillum ad">>,
+            <<"fugiat aliqua esse non pariatur reprehenderit ea culpa non ex culpa et exercitation commodo aute">>,
+            <<"elit magna voluptate fugiat voluptate aliquip officia dolore non laboris velit amet excepteur tempor veniam">>,
+            <<"officia mollit nulla cupidatat occaecat Lorem mollit magna consectetur dolor qui eiusmod commodo eiusmod sit">>,
+            <<"aliquip magna dolore commodo qui amet ipsum cupidatat cillum eu veniam voluptate ipsum sint reprehenderit">>,
+            <<"ea elit id labore mollit magna non commodo magna culpa amet id amet duis do">>,
+            <<"proident deserunt id fugiat sunt ipsum sit aute aute eu ex consectetur proident consequat ea">>,
+            <<"est do consequat aute reprehenderit ea et do magna adipisicing tempor laboris duis aliquip aute">>,
+            <<"cupidatat ad occaecat et do Lorem sint duis dolore irure magna quis excepteur ex tempor">>,
+            <<"et nisi pariatur deserunt quis Lorem laborum dolore magna qui ex quis ea ea anim">>,
+            <<"do laboris magna aliqua est laborum reprehenderit ut eiusmod do qui irure Lorem dolore dolor">>,
+            <<"quis in commodo ex pariatur adipisicing eu ad nulla exercitation irure elit nisi excepteur id">>
+        ]},
+        {<<"array_ints">>, [I || I <- lists:seq(1, 1000)]},
+        {<<"array_floats">>, [float(I) || I <- lists:seq(1, 1000)]},
+        {<<"blob2">>, hexbin(1024)}
+    ];
+body(large) ->
+    % These are mostly incompressible, should be about ~128KB or so
+    body(medium) ++ [{integer_to_binary(Field), hexbin(4096)} || Field <- lists:seq(1, 32)].
diff --git a/src/fabric/test/eunit/fabric_bench_test.erl b/src/fabric/test/eunit/fabric_bench_test.erl
new file mode 100644
index 000000000..95d0dc41c
--- /dev/null
+++ b/src/fabric/test/eunit/fabric_bench_test.erl
@@ -0,0 +1,79 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_bench_test).
+
+-include_lib("couch/include/couch_eunit.hrl").
+
+fabric_bench_test_() ->
+    {
+        setup,
+        fun setup/0,
+        fun teardown/1,
+        with([
+            ?TDEF(t_default_doc_size, 15),
+            ?TDEF(t_small_doc_size, 15),
+            ?TDEF(t_large_doc_size, 15),
+            ?TDEF(t_old_db_deletion_works),
+            ?TDEF(t_newer_db_deletion_doesnt_work),
+            ?TDEF(t_db_deletion_ignores_other_dbs)
+        ])
+    }.
+
+setup() ->
+    test_util:start_couch([fabric]).
+
+teardown(Ctx) ->
+    test_util:stop_couch(Ctx).
+
+t_default_doc_size(_Ctx) ->
+    Opts = #{docs => 100, individual_docs => 5},
+    % The goal is to just have it not crash
+    ?assertEqual(ok, fabric_bench:go(Opts)).
+
+t_small_doc_size(_Ctx) ->
+    Opts = #{q => 4, docs => 100, doc_size => small, individual_docs => 5},
+    % The goal is to just have it not crash
+    ?assertEqual(ok, fabric_bench:go(Opts)).
+
+t_large_doc_size(_Ctx) ->
+    Opts = #{q => 1, docs => 5, doc_size => large, individual_docs => 1},
+    % The goal is to just have it not crash
+    ?assertEqual(ok, fabric_bench:go(Opts)).
+
+t_old_db_deletion_works(_Ctx) ->
+    NineHoursAgoUsec = os:system_time(microsecond) - (9 * 60 * 60 * 1000000),
+    Suffix = integer_to_binary(NineHoursAgoUsec),
+    Db = <<"fabricbenchdb-", Suffix/binary>>,
+    ok = fabric:create_db(Db, [{q, 1}, {n, 1}]),
+    fabric_bench:delete_old_dbs(),
+    ?assertError(database_does_not_exist, fabric:get_doc_count(Db)).
+
+t_newer_db_deletion_doesnt_work(_Ctx) ->
+    SevenHoursAgoUsec = os:system_time(microsecond) - (7 * 60 * 60 * 1000000),
+    Suffix = integer_to_binary(SevenHoursAgoUsec),
+    Db = <<"fabricbenchdb-", Suffix/binary>>,
+    ok = fabric:create_db(Db, [{q, 1}, {n, 1}]),
+    fabric_bench:delete_old_dbs(),
+    ?assertEqual({ok, 0}, fabric:get_doc_count(Db)).
+
+t_db_deletion_ignores_other_dbs(_Ctx) ->
+    Db1 = <<"fabricbenchdb-">>,
+    Db2 = <<"fabricbenchdb">>,
+    Db3 = <<"fabricbenchdb-xyz">>,
+    ok = fabric:create_db(Db1, [{q, 1}, {n, 1}]),
+    ok = fabric:create_db(Db2, [{q, 1}, {n, 1}]),
+    ok = fabric:create_db(Db3, [{q, 1}, {n, 1}]),
+    fabric_bench:delete_old_dbs(),
+    ?assertEqual({ok, 0}, fabric:get_doc_count(Db1)),
+    ?assertEqual({ok, 0}, fabric:get_doc_count(Db2)),
+    ?assertEqual({ok, 0}, fabric:get_doc_count(Db3)).