[couchdb] Kill fragile etap tests in favor of eunit-based test-suite

Peter Lemenkov peter at fedoraproject.org
Fri Aug 29 15:03:09 UTC 2014


commit ed87d97ca6ca36b321c283833fa252995d8cc361
Author: Peter Lemenkov <lemenkov at gmail.com>
Date:   Fri Aug 29 19:03:08 2014 +0400

    Kill fragile etap tests in favor of eunit-based test-suite
    
    Signed-off-by: Peter Lemenkov <lemenkov at gmail.com>

 .gitignore                                         |    1 +
 ...ilence-redundant-logging-to-stdout-stderr.patch |   10 +-
 couchdb-0012-Expand-.d-directories-in-erlang.patch |   10 +-
 ...hdb-0013-Add-systemd-notification-support.patch |    9 +-
 ...014-Add-run-script-to-execute-eunit-tests.patch |15346 ++++++++++++++++++++
 couchdb.spec                                       |   34 +-
 sources                                            |    1 +
 7 files changed, 15369 insertions(+), 42 deletions(-)
---
diff --git a/.gitignore b/.gitignore
index 1065a06..e16572a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,3 +12,4 @@ apache-couchdb-1.0.1.tar.gz
 /apache-couchdb-1.3.1.tar.gz
 /apache-couchdb-1.5.0.tar.gz
 /apache-couchdb-1.6.0.tar.gz
+/couchdb-tests-blobs.tar
diff --git a/couchdb-0011-Silence-redundant-logging-to-stdout-stderr.patch b/couchdb-0011-Silence-redundant-logging-to-stdout-stderr.patch
index b0c28f6..ab6c802 100644
--- a/couchdb-0011-Silence-redundant-logging-to-stdout-stderr.patch
+++ b/couchdb-0011-Silence-redundant-logging-to-stdout-stderr.patch
@@ -1,7 +1,6 @@
-From 50f4c9cc273790d4e4ed6bd93f1272e7d399eba0 Mon Sep 17 00:00:00 2001
 From: Warren Togami <wtogami at gmail.com>
 Date: Wed, 2 Jul 2014 22:54:38 -1000
-Subject: [PATCH 11/12] Silence redundant logging to stdout/stderr
+Subject: [PATCH] Silence redundant logging to stdout/stderr
 
 Instead print log filename to stdout during startup.
 
@@ -10,10 +9,6 @@ The stdout/stderr redundantly floods /var/log/messages.
 This temporary hack was suggested by rnewson in #couchdb.
 https://issues.apache.org/jira/browse/COUCHDB-2264
 Related issue
----
- src/couchdb/couch_log.erl        | 2 +-
- src/couchdb/couch_server_sup.erl | 6 ++++--
- 2 files changed, 5 insertions(+), 3 deletions(-)
 
 diff --git a/src/couchdb/couch_log.erl b/src/couchdb/couch_log.erl
 index cd4bbbb..db483a7 100644
@@ -47,6 +42,3 @@ index be3c3a3..39a5568 100644
      ]),
      case LogLevel of
      "debug" ->
--- 
-1.9.3
-
diff --git a/couchdb-0012-Expand-.d-directories-in-erlang.patch b/couchdb-0012-Expand-.d-directories-in-erlang.patch
index 16c365a..0b8b4d3 100644
--- a/couchdb-0012-Expand-.d-directories-in-erlang.patch
+++ b/couchdb-0012-Expand-.d-directories-in-erlang.patch
@@ -1,12 +1,7 @@
-From c530313acb1c8ed59962eae676f919e1ea53bf96 Mon Sep 17 00:00:00 2001
 From: Robert Newson <rnewson at apache.org>
 Date: Sun, 6 Jul 2014 23:47:23 +0100
-Subject: [PATCH 12/12] Expand .d directories in erlang
+Subject: [PATCH] Expand .d directories in erlang
 
----
- bin/couchdb.tpl.in        | 26 +++++++++-----------------
- src/couchdb/couch_app.erl | 12 +++++++++++-
- 2 files changed, 20 insertions(+), 18 deletions(-)
 
 diff --git a/bin/couchdb.tpl.in b/bin/couchdb.tpl.in
 index ba034cc..ffdbb17 100644
@@ -107,6 +102,3 @@ index 42411a8..d6d8c0c 100644
      end.
  
  start_apps([]) ->
--- 
-1.9.3
-
diff --git a/couchdb-0013-Add-systemd-notification-support.patch b/couchdb-0013-Add-systemd-notification-support.patch
index f650849..26802e6 100644
--- a/couchdb-0013-Add-systemd-notification-support.patch
+++ b/couchdb-0013-Add-systemd-notification-support.patch
@@ -1,7 +1,6 @@
-From 2969091a2ba903b9329739db886326528bfbecbe Mon Sep 17 00:00:00 2001
 From: Peter Lemenkov <lemenkov at gmail.com>
 Date: Mon, 7 Jul 2014 21:32:43 +0400
-Subject: [PATCH 13/13] Add systemd notification support
+Subject: [PATCH] Add systemd notification support
 
 Signed-off-by: Peter Lemenkov <lemenkov at gmail.com>
 
@@ -13,9 +12,6 @@ by calling any function from the module (and catch for possible
 exceptions if no such module available) or explicitly.
 
 Signed-off-by: Peter Lemenkov <lemenkov at gmail.com>
----
- src/couchdb/couch_server_sup.erl | 1 +
- 1 file changed, 1 insertion(+)
 
 diff --git a/src/couchdb/couch_server_sup.erl b/src/couchdb/couch_server_sup.erl
 index 39a5568..2d25220 100644
@@ -29,6 +25,3 @@ index 39a5568..2d25220 100644
      Uris = [get_uri(Name, Ip) || Name <- [couch_httpd, https]],
      [begin
          case Uri of
--- 
-1.9.3
-
diff --git a/couchdb-0014-Add-run-script-to-execute-eunit-tests.patch b/couchdb-0014-Add-run-script-to-execute-eunit-tests.patch
new file mode 100644
index 0000000..8469be9
--- /dev/null
+++ b/couchdb-0014-Add-run-script-to-execute-eunit-tests.patch
@@ -0,0 +1,15346 @@
+From: Alexander Shorin <kxepal at apache.org>
+Date: Fri, 16 May 2014 00:08:36 +0400
+Subject: [PATCH] Add run script to execute eunit tests
+
+Usage is the same as for test/etap/run:
+
+    ./test/couchdb/run -v ${PATH}
+
+-v runs in verbose mode, as etap does. Also, you can use make for that:
+
+    make check-eunit
+
+which will run tests everywhere in project where Makefile contains
+check-eunit subcommand definition.
+
+The ${PATH} thing could be single file or directory. The latter should
+contains *_tests.erl files which would be compiled and executed by
+eunit.
+
+The *_tests.erl
+
+The reason of compiling on run instead of using autoconf for that is
+to simplify tests developing and avoid situations, when you'd fixed
+the test or add new one, but forgot to remove/compile beam file.
+
+All test_*.beam files are been stored in test/couchdb/ebin directory,
+the temporary test files will be places to test/couchdb/temp one.
+Both directories will be removed by make clean/distclean command.
+
+Add common header for eunit test files
+
+Port 001-load.t etap test suite to eunit
+
+Port 002-icu-driver.t etap test suite to eunit
+
+See setup/0 comment for specific info about loading
+couch_icu_driver with eunit.
+
+Port 010-file-basics.t and 011-file-headers.t etap test suites to eunit
+
+Both merged into single suite since they tests single target and shares
+common bits.
+
+Port 020-btree-basics.t and 021-btree-reductions.t etap suites to eunit
+
+Both merged into single suite since they tests single target and shares
+common bits.
+
+Port 030-doc-from-json.t and 031-doc-to-json.t etap suites to eunit
+
+Both merged into single suite since they tests single target and shares
+common bits.
+
+Port 040-util.t etap test suite to eunit
+
+Port 041-uuid.t etap test suite to eunit
+
+Config files are removed in favor of using couch_config API instead.
+
+Port 042-work-queue.t etap test suite to eunit
+
+Etap tests were made in flow style, testing the same things multiple
+times without real need. For eunit they are split into small test cases
+to focus on testing goals.
+
+Timeout on receive is decreased from 3000 to 100.
+
+Port 043-find-in-binary.t etap test suite to eunit
+
+It been merged into couch_util_tests suite.
+
+Port 050-stream.t etap test suite to eunit
+
+Port 06[0-5]-kt-*.t etap test suites to eunit
+
+All merged into single suite since they're test the same module.
+
+Port 070-couch-db.t etap test suite to eunit
+
+Fix ERL_LIB environment variable setting. Add ?tempdb macros for
+unique temporary database name generation.
+
+Port 072-cleanup.t etap test suite to eunit
+
+requests functions from test_util were moved to test_request module
+with nicer and simpler API.
+
+Port 073-changes.t etap test suite to eunit
+
+For heartbeats test they don't being counted anymore since their
+amount is heavy depends from the overall system performance and some
+assertions may fail or not because of that. Instead of this, we just
+ensure that their amount is going to increase over the time.
+
+Port 074-doc-update-conflicts.t etap test suite to eunit
+
+Timeout decreased, added 10K clients case
+
+Port 075-auth-cache.t etap test suite to eunit
+
+Timeouts are removed.
+
+Port 076-file-compression.t etap test suite to eunit
+
+The original test suite was decoupled into compaction and comparison
+cases.
+
+Port 077-couch-db-fast-db-delete-create.t etap test suite to eunit
+
+Merged into couch_db_tests suite.
+
+Port 080-config-get-set.t etap test suite to eunit
+
+Port 081-config-override.t etap test suite to eunit
+
+Merged into couch_config_tests suite.
+Setup fixtures.
+
+Port 082-config-register.t etap test suite to eunit
+
+Merged into couch_config_tests suite.
+
+Port 083-config-no-files.t etap test suite to eunit
+
+Merged into couch_config_tests suite.
+
+Port 090-task-status.t etap test suite to eunit
+
+Split huge test case into multiple ones. Fix issue with get_task_prop
+when Acc may be reset if searched task isn't last in the list.
+
+Port 100-ref-counter.t etap test suite to eunit
+
+Port 120-stats-collect.t etap test suite to eunit
+
+Port 121-stats-aggregates.t etap test suite to eunit
+
+Merged into couch_stats_tests suite.
+
+Port 130-attachments-md5.t etap test suite to eunit
+
+Add random document id generator macros.
+Have to use handmade http client instead of ibrowse since it makes too
+complicated sending chunked requests.
+
+Port 140-attachments-comp.t etap test suite to eunit
+
+- Merge into couchdb_attachments_tests suite;
+- Add PUT requests to test_request util;
+- Remove dependency from files outside fixtures directory;
+- Group test cases to reduce amount of duplicate code;
+- Fix hidden issue with gzip encoding: for encoding_length stub info
+  check using zlib:gzip on 2KiB+ files leads to mismatch by 2-4 bytes
+  and this difference grows with file size. Using gzip fun code from
+  couch_stream solves the issue.
+
+Port 150-invalid-view-seq.t etap test suite to eunit
+
+Merged into couchdb_views_tests suite.
+
+Port 160-vhosts.t etap test suite to eunit
+
+Split Rewrite and OAuth tests.
+
+Port 170-os-daemons.t etap test suite to eunit
+
+Port 171-os-daemons-config.t etap test suite to eunit
+
+Merged into couchdb_os_daemons_tests suite.
+
+Port 172-os-daemons-errors.t etap test suite to eunit
+
+Merged into couchdb_os_daemons_tests suite.
+Removed errors redirection to /dev/null to explicitly signal that
+permissions are set correctly.
+
+Port 173-os-daemons-cfg-register.t etap test suite to eunit
+
+Merged into couchdb_os_daemons_tests suite.
+
+Port 180-http-proxy.t etap test suite to eunit
+
+Port 190-json-stream-parse.t etap test suite to eunit
+
+Port 200-view-group-no-db-leaks.t etap test suite to eunit
+
+Merged into couchdb_views_tests suite. Apply minor refactor changes.
+
+Port 201-view-group-shutdown.t etap test suite to eunit
+
+Merged into couchdb_views_tests suite. Database population reduced
+to speedup test and removed second view index call which leaded to
+race condition when compaction becomes completed in time of view index
+update call and before assertion check for {error, all_dbs_active}.
+
+Port 210-os-proc-pool.t etap test suite to eunit
+
+Port 220-compaction-daemon.t etap test suite to eunit
+
+Port 230-pbkfd2.t etap test suite to eunit
+
+Port 231-cors.t etap test suite to eunit
+
+Extend vhost and subresource testing for more generic code.
+
+Port 232-csp.t etap test suite to eunit
+
+Port 250-upgrade-legacy-view-files.t etap test suite to eunit
+
+Run couchdb tests with eunit
+
+Port couch_mrview/01-load.t etap test suite to eunit
+
+Port couch_mrview/02-map-views.t etap test suite to eunit
+
+Port couch_mrview/03-red-views.t etap test suite to eunit
+
+Port couch_mrview/04-index-info.t etap test suite to eunit
+
+Port couch_mrview/05-collation.t etap test suite to eunit
+
+Port couch_mrview/06-all-docs.t etap test suite to eunit
+
+Port couch_mrview/07-compact-swap.t etap test suite to eunit
+
+Run couch_mrview tests with eunit
+
+Port couch_replicator/01-load.t etap test suite to eunit
+
+Port couch_replicator/02-httpc-pool.t etap test suite to eunit
+
+Test test_worker_dead_pool_full removed as it's redundant.
+
+Port couch_replicator/03-replication-compact.t etap test suite to eunit
+
+Split big test fun into smaller steps, optimize timeouts.
+
+Port couch_replicator/04-replication-large_atts.t etap test to eunit
+
+Port couch_replicator/05-replication-many-leaves.t etap test to eunit
+
+Port couch_replicator/06-doc-missing-stubs.t etap test suite to eunit
+
+Port couch_replicator/07-use-checkpoints.t etap test to eunit
+
+Run couch_replicator tests with eunit
+
+Goodbye etap!
+
+Add uuid for test server
+
+Prevents it generation during tests running and failure for
+make distcheck when config file is read only.
+
+Fix tests temp directory path
+
+Fix relative include path for couch_eunit.hrl
+
+Move couch_eunit.hrl.in to include directory
+
+Because include path "." has higher priority than any other custom
+paths, generated during `configure` phase couch_eunit.hrl contains
+build and source paths which aren't used for `make distcheck` causing
+various failures.
+
+Ensures that .test_design directory becomes created
+
+Handle test_cfg_register daemon path in special way
+
+During make distcheck test_cfg_register binary become separated from
+others fixtures and becomes available by ?BUILDDIR path, not ?SOURCEDIR
+as others are.
+
+Simplify os_daemon_configer.escript
+
+There is no need to start couch_config, init code paths and include
+anything, just need to make ejson beams available.
+
+More etap cleanup
+
+Fix Makefile rules for `make distcheck`
+
+Conflicts:
+	configure.ac
+	src/Makefile.am
+	test/etap/Makefile.am
+	test/etap/test_util.erl.in
+
+diff --git a/LICENSE b/LICENSE
+index 4c58f19..0fbc123 100644
+--- a/LICENSE
++++ b/LICENSE
+@@ -474,31 +474,6 @@ For the src/erlang-oauth component:
+   FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+   OTHER DEALINGS IN THE SOFTWARE.
+ 
+-For the src/etap component:
+-
+-  Copyright (c) 2008-2009 Nick Gerakines <nick at gerakines.net>
+-
+-  Permission is hereby granted, free of charge, to any person
+-  obtaining a copy of this software and associated documentation
+-  files (the "Software"), to deal in the Software without
+-  restriction, including without limitation the rights to use,
+-  copy, modify, merge, publish, distribute, sublicense, and/or sell
+-  copies of the Software, and to permit persons to whom the
+-  Software is furnished to do so, subject to the following
+-  conditions:
+-  
+-  The above copyright notice and this permission notice shall be
+-  included in all copies or substantial portions of the Software.
+-  
+-  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+-  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+-  OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+-  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+-  HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+-  WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+-  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+-  OTHER DEALINGS IN THE SOFTWARE.
+-
+ For the src/ejson/yajl component
+ 
+ Copyright 2010, Lloyd Hilaiel.
+diff --git a/Makefile.am b/Makefile.am
+index 1eca5ab..d6b2afe 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -47,7 +47,7 @@ EXTRA_DIST = \
+ 
+ check: dev check-js
+ if TESTS
+-	$(top_builddir)/test/etap/run $(top_srcdir)/test/etap
++	$(top_builddir)/test/couchdb/run -v $(top_srcdir)/test/couchdb
+ endif
+ 
+ check-js: dev
+@@ -57,20 +57,9 @@ if USE_CURL
+ endif
+ endif
+ 
+-check-etap: dev
++check-eunit: dev
+ if TESTS
+-	$(top_builddir)/test/etap/run $(top_srcdir)/test/etap
+-endif
+-
+-cover: dev
+-if TESTS
+-	rm -f cover/*.coverdata
+-	COVER=1 COVER_BIN=./src/couchdb/ $(top_builddir)/test/etap/run
+-	SRC=./src/couchdb/ \
+-	    $(ERL) -noshell \
+-	    -pa src/etap \
+-	    -eval 'etap_report:create()' \
+-	    -s init stop > /dev/null 2>&1
++	$(top_builddir)/test/couchdb/run -v $(top_srcdir)/test/couchdb
+ endif
+ 
+ dev: all
+@@ -107,7 +96,6 @@ local-clean: maintainer-clean
+ 	rm -f $(top_srcdir)/aclocal.m4
+ 	rm -f $(top_srcdir)/config.h.in
+ 	rm -f $(top_srcdir)/configure
+-	rm -f $(top_srcdir)/test/etap/temp.*
+ 	rm -f $(top_srcdir)/*.tar.gz
+ 	rm -f $(top_srcdir)/*.tar.gz.*
+ 	find $(top_srcdir) -name Makefile.in -exec rm -f {} \;
+diff --git a/NOTICE b/NOTICE
+index 08e3b82..be5ed49 100644
+--- a/NOTICE
++++ b/NOTICE
+@@ -42,10 +42,6 @@ This product also includes the following third-party components:
+ 
+    Copyright 2012, the authors and contributors
+ 
+- * ETap (http://github.com/ngerakines/etap/)
+-
+-   Copyright 2009, Nick Gerakines <nick at gerakines.net>
+-
+  * mimeparse.js (http://code.google.com/p/mimeparse/)
+ 
+    Copyright 2009, Chris Anderson <jchris at apache.org>
+diff --git a/configure.ac b/configure.ac
+index 7a84f4f..8d1a64f 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -749,12 +749,14 @@ AC_CONFIG_FILES([src/couchjs-node/Makefile])
+ AC_CONFIG_FILES([src/couchdb/couch.app.tpl])
+ AC_CONFIG_FILES([src/couchdb/Makefile])
+ AC_CONFIG_FILES([src/couchdb/priv/Makefile])
+-AC_CONFIG_FILES([src/etap/Makefile])
+ AC_CONFIG_FILES([src/ejson/Makefile])
+ AC_CONFIG_FILES([test/Makefile])
+ AC_CONFIG_FILES([test/bench/Makefile])
+-AC_CONFIG_FILES([test/etap/Makefile])
+-AC_CONFIG_FILES([test/etap/test_util.erl])
++AC_CONFIG_FILES([test/couchdb/run])
++AC_CONFIG_FILES([test/couchdb/Makefile])
++AC_CONFIG_FILES([test/couchdb/include/couch_eunit.hrl])
++AC_CONFIG_FILES([test/couchdb/fixtures/Makefile])
++AC_CONFIG_FILES([test/couchdb/fixtures/os_daemon_configer.escript])
+ AC_CONFIG_FILES([test/javascript/Makefile])
+ AC_CONFIG_FILES([test/view_server/Makefile])
+ AC_CONFIG_FILES([utils/Makefile])
+diff --git a/license.skip b/license.skip
+index 45558d1..12eaa5e 100644
+--- a/license.skip
++++ b/license.skip
+@@ -166,14 +166,16 @@
+ ^test/Makefile.in
+ ^test/bench/Makefile
+ ^test/bench/Makefile.in
+-^test/etap/.*.beam
+-^test/etap/.*.o
+-^test/etap/.deps/.*
+-^test/etap/test_cfg_register
+-^test/etap/Makefile
+-^test/etap/Makefile.in
+-^test/etap/temp..*
+-^test/etap/fixtures/*
++^test/couchdb/Makefile
++^test/couchdb/Makefile.in
++^test/couchdb/fixtures/logo.png
++^test/couchdb/fixtures/3b835456c235b1827e012e25666152f3.view
++^test/couchdb/fixtures/Makefile
++^test/couchdb/fixtures/Makefile.in
++^test/couchdb/fixtures/test.couch
++^test/couchdb/fixtures/.deps/test_cfg_register-test_cfg_register.Po
++^test/couchdb/fixtures/test_cfg_register
++^test/couchdb/fixtures/test_cfg_register.o
+ ^test/javascript/Makefile
+ ^test/javascript/Makefile.in
+ ^test/local.ini
+diff --git a/src/Makefile.am b/src/Makefile.am
+index dde8b52..66efc11 100644
+--- a/src/Makefile.am
++++ b/src/Makefile.am
+@@ -18,7 +18,6 @@ SUBDIRS = \
+     couch_replicator \
+     couchdb \
+     ejson \
+-    etap \
+     couchjs-node
+ 
+ EXTRA_DIST = \
+diff --git a/src/couch_mrview/Makefile.am b/src/couch_mrview/Makefile.am
+index 2b9ef86..b9abe28 100644
+--- a/src/couch_mrview/Makefile.am
++++ b/src/couch_mrview/Makefile.am
+@@ -33,13 +33,13 @@ source_files = \
+     src/couch_mrview_util.erl
+ 
+ test_files = \
+-    test/01-load.t \
+-    test/02-map-views.t \
+-    test/03-red-views.t \
+-    test/04-index-info.t \
+-    test/05-collation.t \
+-    test/06-all-docs.t \
+-	test/07-compact-swap.t
++    test/couch_mrview_all_docs_tests.erl \
++    test/couch_mrview_collation_tests.erl \
++    test/couch_mrview_compact_tests.erl \
++    test/couch_mrview_index_info_tests.erl \
++    test/couch_mrview_map_views_tests.erl \
++    test/couch_mrview_modules_load_tests.erl \
++    test/couch_mrview_red_views_tests.erl
+ 
+ compiled_files = \
+     ebin/couch_mrview.app \
+@@ -58,7 +58,7 @@ CLEANFILES = $(compiled_files)
+ 
+ check:
+ if TESTS
+-	$(abs_top_builddir)/test/etap/run $(abs_top_srcdir)/src/couch_mrview/test
++	$(abs_top_builddir)/test/couchdb/run -v $(abs_top_srcdir)/src/couch_mrview/test
+ endif
+ 
+ ebin/%.app: src/%.app.src
+diff --git a/src/couch_mrview/test/01-load.t b/src/couch_mrview/test/01-load.t
+deleted file mode 100644
+index a57c1a7..0000000
+--- a/src/couch_mrview/test/01-load.t
++++ /dev/null
+@@ -1,34 +0,0 @@
+-#!/usr/bin/env escript
+-%% -*- erlang -*-
+-
+-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+-% use this file except in compliance with the License. You may obtain a copy of
+-% the License at
+-%
+-%   http://www.apache.org/licenses/LICENSE-2.0
+-%
+-% Unless required by applicable law or agreed to in writing, software
+-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+-% License for the specific language governing permissions and limitations under
+-% the License.
+-
+-% Test that we can load each module.
+-
+-main(_) ->
+-    test_util:init_code_path(),
+-    Modules = [
+-        couch_mrview,
+-        couch_mrview_compactor,
+-        couch_mrview_http,
+-        couch_mrview_index,
+-        couch_mrview_updater,
+-        couch_mrview_util
+-    ],
+-
+-    etap:plan(length(Modules)),
+-    lists:foreach(
+-        fun(Module) ->
+-            etap:loaded_ok(Module, lists:concat(["Loaded: ", Module]))
+-        end, Modules),
+-    etap:end_tests().
+diff --git a/src/couch_mrview/test/02-map-views.t b/src/couch_mrview/test/02-map-views.t
+deleted file mode 100644
+index 7e1ca0c..0000000
+--- a/src/couch_mrview/test/02-map-views.t
++++ /dev/null
+@@ -1,131 +0,0 @@
+-#!/usr/bin/env escript
+-%% -*- erlang -*-
+-
+-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+-% use this file except in compliance with the License. You may obtain a copy of
+-% the License at
+-%
+-%   http://www.apache.org/licenses/LICENSE-2.0
+-%
+-% Unless required by applicable law or agreed to in writing, software
+-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+-% License for the specific language governing permissions and limitations under
+-% the License.
+-
+-main(_) ->
+-    test_util:init_code_path(),
+-
+-    etap:plan(6),
+-    case (catch test()) of
+-        ok ->
+-            etap:end_tests();
+-        Other ->
+-            etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+-            etap:bail(Other)
+-    end,
+-    timer:sleep(300),
+-    ok.
+-
+-test() ->
+-    couch_server_sup:start_link(test_util:config_files()),
+-
+-    {ok, Db} = couch_mrview_test_util:init_db(<<"foo">>, map),
+-
+-    test_basic(Db),
+-    test_range(Db),
+-    test_rev_range(Db),
+-    test_limit_and_skip(Db),
+-    test_include_docs(Db),
+-    test_empty_view(Db),
+-
+-    ok.
+-
+-
+-test_basic(Db) ->
+-    Result = run_query(Db, []),
+-    Expect = {ok, [
+-        {meta, [{total, 10}, {offset, 0}]},
+-        {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
+-        {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+-        {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+-        {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+-        {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
+-        {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
+-        {row, [{id, <<"7">>}, {key, 7}, {value, 7}]},
+-        {row, [{id, <<"8">>}, {key, 8}, {value, 8}]},
+-        {row, [{id, <<"9">>}, {key, 9}, {value, 9}]},
+-        {row, [{id, <<"10">>}, {key, 10}, {value, 10}]}
+-    ]},
+-    etap:is(Result, Expect, "Simple view query worked.").
+-
+-
+-test_range(Db) ->
+-    Result = run_query(Db, [{start_key, 3}, {end_key, 5}]),
+-    Expect = {ok, [
+-        {meta, [{total, 10}, {offset, 2}]},
+-        {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+-        {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+-        {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
+-    ]},
+-    etap:is(Result, Expect, "Query with range works.").
+-
+-
+-test_rev_range(Db) ->
+-    Result = run_query(Db, [
+-        {direction, rev},
+-        {start_key, 5}, {end_key, 3},
+-        {inclusive_end, true}
+-    ]),
+-    Expect = {ok, [
+-        {meta, [{total, 10}, {offset, 5}]},
+-        {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
+-        {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+-        {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}
+-    ]},
+-    etap:is(Result, Expect, "Query with reversed range works.").
+-
+-
+-test_limit_and_skip(Db) ->
+-    Result = run_query(Db, [
+-        {start_key, 2},
+-        {limit, 3},
+-        {skip, 3}
+-    ]),
+-    Expect = {ok, [
+-        {meta, [{total, 10}, {offset, 4}]},
+-        {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
+-        {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
+-        {row, [{id, <<"7">>}, {key, 7}, {value, 7}]}
+-    ]},
+-    etap:is(Result, Expect, "Query with limit and skip works.").
+-
+-
+-test_include_docs(Db) ->
+-    Result = run_query(Db, [
+-        {start_key, 8},
+-        {end_key, 8},
+-        {include_docs, true}
+-    ]),
+-    Doc = {[
+-        {<<"_id">>,<<"8">>},
+-        {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>},
+-        {<<"val">>,8}
+-    ]},
+-    Expect = {ok, [
+-        {meta, [{total, 10}, {offset, 7}]},
+-        {row, [{id, <<"8">>}, {key, 8}, {value, 8}, {doc, Doc}]}
+-    ]},
+-    etap:is(Result, Expect, "Query with include docs works.").
+-
+-
+-test_empty_view(Db) ->
+-    Result = couch_mrview:query_view(Db, <<"_design/bar">>, <<"bing">>),
+-    Expect = {ok, [
+-        {meta, [{total, 0}, {offset, 0}]}
+-    ]},
+-    etap:is(Result, Expect, "Empty views are correct.").
+-
+-
+-run_query(Db, Opts) ->
+-    couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, Opts).
+diff --git a/src/couch_mrview/test/03-red-views.t b/src/couch_mrview/test/03-red-views.t
+deleted file mode 100644
+index 6ad341b..0000000
+--- a/src/couch_mrview/test/03-red-views.t
++++ /dev/null
+@@ -1,78 +0,0 @@
+-#!/usr/bin/env escript
+-%% -*- erlang -*-
+-
+-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+-% use this file except in compliance with the License. You may obtain a copy of
+-% the License at
+-%
+-%   http://www.apache.org/licenses/LICENSE-2.0
+-%
+-% Unless required by applicable law or agreed to in writing, software
+-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+-% License for the specific language governing permissions and limitations under
+-% the License.
+-
+-main(_) ->
+-    test_util:run(4, fun() -> test() end).
+-
+-test() ->
+-    couch_server_sup:start_link(test_util:config_files()),
+-
+-    {ok, Db} = couch_mrview_test_util:init_db(<<"foo">>, red),
+-
+-    test_basic(Db),
+-    test_key_range(Db),
+-    test_group_level(Db),
+-    test_group_exact(Db),
+-
+-    ok.
+-
+-
+-test_basic(Db) ->
+-    Result = run_query(Db, []),
+-    Expect = {ok, [
+-        {meta, []},
+-        {row, [{key, null}, {value, 55}]}
+-    ]},
+-    etap:is(Result, Expect, "Simple reduce view works.").
+-
+-
+-test_key_range(Db) ->
+-    Result = run_query(Db, [{start_key, [0, 2]}, {end_key, [0, 4]}]),
+-    Expect = {ok, [
+-        {meta, []},
+-        {row, [{key, null}, {value, 6}]}
+-    ]},
+-    etap:is(Result, Expect, "Reduce with key range works.").
+-
+-
+-test_group_level(Db) ->
+-    Result = run_query(Db, [{group_level, 1}]),
+-    Expect = {ok, [
+-        {meta, []},
+-        {row, [{key, [0]}, {value, 30}]},
+-        {row, [{key, [1]}, {value, 25}]}
+-    ]},
+-    etap:is(Result, Expect, "Group level works.").
+-
+-test_group_exact(Db) ->
+-    Result = run_query(Db, [{group_level, exact}]),
+-    Expect = {ok, [
+-        {meta, []},
+-        {row, [{key, [0, 2]}, {value, 2}]},
+-        {row, [{key, [0, 4]}, {value, 4}]},
+-        {row, [{key, [0, 6]}, {value, 6}]},
+-        {row, [{key, [0, 8]}, {value, 8}]},
+-        {row, [{key, [0, 10]}, {value, 10}]},
+-        {row, [{key, [1, 1]}, {value, 1}]},
+-        {row, [{key, [1, 3]}, {value, 3}]},
+-        {row, [{key, [1, 5]}, {value, 5}]},
+-        {row, [{key, [1, 7]}, {value, 7}]},
+-        {row, [{key, [1, 9]}, {value, 9}]}
+-    ]},
+-    etap:is(Result, Expect, "Group exact works.").
+-
+-
+-run_query(Db, Opts) ->
+-    couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, Opts).
+diff --git a/src/couch_mrview/test/04-index-info.t b/src/couch_mrview/test/04-index-info.t
+deleted file mode 100644
+index 6b67b56..0000000
+--- a/src/couch_mrview/test/04-index-info.t
++++ /dev/null
+@@ -1,54 +0,0 @@
+-#!/usr/bin/env escript
+-%% -*- erlang -*-
+-
+-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+-% use this file except in compliance with the License. You may obtain a copy of
+-% the License at
+-%
+-%   http://www.apache.org/licenses/LICENSE-2.0
+-%
+-% Unless required by applicable law or agreed to in writing, software
+-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+-% License for the specific language governing permissions and limitations under
+-% the License.
+-
+-main(_) ->
+-    test_util:init_code_path(),
+-
+-    etap:plan(9),
+-    case (catch test()) of
+-        ok ->
+-            etap:end_tests();
+-        Other ->
+-            etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+-            etap:bail(Other)
+-    end,
+-    timer:sleep(300),
+-    ok.
+-
+-sig() -> <<"276df562b152b3c4e5d34024f62672ed">>.
+-
+-test() ->
+-    couch_server_sup:start_link(test_util:config_files()),
+-
+-    {ok, Db} = couch_mrview_test_util:init_db(<<"foo">>, map),
+-    couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>),
+-
+-    {ok, Info} = couch_mrview:get_info(Db, <<"_design/bar">>),
+-
+-    etap:is(getval(signature, Info), sig(), "Signature is ok."),
+-    etap:is(getval(language, Info), <<"javascript">>, "Language is ok."),
+-    etap:is_greater(getval(disk_size, Info), 0, "Disk size is ok."),
+-    etap:is_greater(getval(data_size, Info), 0, "Data size is ok."),
+-    etap:is(getval(update_seq, Info), 11, "Update seq is ok."),
+-    etap:is(getval(purge_seq, Info), 0, "Purge seq is ok."),
+-    etap:is(getval(updater_running, Info), false, "No updater running."),
+-    etap:is(getval(compact_running, Info), false, "No compaction running."),
+-    etap:is(getval(waiting_clients, Info), 0, "No waiting clients."),
+-
+-    ok.
+-
+-getval(Key, PL) ->
+-    {value, {Key, Val}} = lists:keysearch(Key, 1, PL),
+-    Val.
+diff --git a/src/couch_mrview/test/05-collation.t b/src/couch_mrview/test/05-collation.t
+deleted file mode 100644
+index ac8f8bc..0000000
+--- a/src/couch_mrview/test/05-collation.t
++++ /dev/null
+@@ -1,163 +0,0 @@
+-#!/usr/bin/env escript
+-%% -*- erlang -*-
+-
+-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+-% use this file except in compliance with the License. You may obtain a copy of
+-% the License at
+-%
+-%   http://www.apache.org/licenses/LICENSE-2.0
+-%
+-% Unless required by applicable law or agreed to in writing, software
+-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+-% License for the specific language governing permissions and limitations under
+-% the License.
+-
+-main(_) ->
+-    test_util:run(9, fun() -> test() end).
+-
+-
+-test() ->
+-    couch_server_sup:start_link(test_util:config_files()),
+-    {ok, Db0} = couch_mrview_test_util:new_db(<<"foo">>, map),
+-    {ok, Db1} = couch_mrview_test_util:save_docs(Db0, docs()),
+-
+-    test_collated_fwd(Db1),
+-    test_collated_rev(Db1),
+-    test_range_collation(Db1),
+-    test_inclusive_end(Db1),
+-    test_uninclusive_end(Db1),
+-    test_with_endkey_docid(Db1),
+-
+-    ok.
+-
+-test_collated_fwd(Db) ->
+-    {ok, Results} = run_query(Db, []),
+-    Expect = [{meta, [{total, 26}, {offset, 0}]}] ++ rows(),
+-    etap:is(Results, Expect, "Values were collated correctly.").
+-
+-
+-test_collated_rev(Db) ->
+-    {ok, Results} = run_query(Db, [{direction, rev}]),
+-    Expect = [{meta, [{total, 26}, {offset, 0}]}] ++ lists:reverse(rows()),
+-    etap:is(Results, Expect, "Values were collated correctly descending.").
+-
+-
+-test_range_collation(Db) ->
+-    {_, Error} = lists:foldl(fun(V, {Count, Error}) ->
+-        {ok, Results} = run_query(Db, [{start_key, V}, {end_key, V}]),
+-        Id = list_to_binary(integer_to_list(Count)),
+-        Expect = [
+-            {meta, [{total, 26}, {offset, Count}]},
+-            {row, [{id, Id}, {key, V}, {value, 0}]}
+-        ],
+-        case Results == Expect of
+-            true -> {Count+1, Error};
+-            _ -> {Count+1, true}
+-        end
+-    end, {0, false}, vals()),
+-    etap:is(Error, false, "Found each individual key correctly.").
+-
+-
+-test_inclusive_end(Db) ->
+-    Opts = [{end_key, <<"b">>}, {inclusive_end, true}],
+-    {ok, Rows0} = run_query(Db, Opts),
+-    LastRow0 = lists:last(Rows0),
+-    Expect0 = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]},
+-    etap:is(LastRow0, Expect0, "Inclusive end is correct."),
+-
+-    {ok, Rows1} = run_query(Db, Opts ++ [{direction, rev}]),
+-    LastRow1 = lists:last(Rows1),
+-    Expect1 = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]},
+-    etap:is(LastRow1, Expect1,
+-            "Inclusive end is correct with descending=true").
+-
+-test_uninclusive_end(Db) ->
+-    Opts = [{end_key, <<"b">>}, {inclusive_end, false}],
+-    {ok, Rows0} = run_query(Db, Opts),
+-    LastRow0 = lists:last(Rows0),
+-    Expect0 = {row, [{id,<<"9">>}, {key,<<"aa">>}, {value,0}]},
+-    etap:is(LastRow0, Expect0, "Uninclusive end is correct."),
+-
+-    {ok, Rows1} = run_query(Db, Opts ++ [{direction, rev}]),
+-    LastRow1 = lists:last(Rows1),
+-    Expect1 = {row, [{id,<<"11">>}, {key,<<"B">>}, {value,0}]},
+-    etap:is(LastRow1, Expect1,
+-            "Uninclusive end is correct with descending=true").
+-
+-
+-test_with_endkey_docid(Db) ->
+-    {ok, Rows0} = run_query(Db, [
+-        {end_key, <<"b">>}, {end_key_docid, <<"10">>},
+-        {inclusive_end, false}
+-    ]),
+-    Result0 = lists:last(Rows0),
+-    Expect0 = {row, [{id,<<"9">>}, {key,<<"aa">>}, {value,0}]},
+-    etap:is(Result0, Expect0, "Uninclsuive end with endkey_docid set is ok."),
+-
+-    {ok, Rows1} = run_query(Db, [
+-        {end_key, <<"b">>}, {end_key_docid, <<"11">>},
+-        {inclusive_end, false}
+-    ]),
+-    Result1 = lists:last(Rows1),
+-    Expect1 = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]},
+-    etap:is(Result1, Expect1, "Uninclsuive end with endkey_docid set is ok.").
+-
+-
+-run_query(Db, Opts) ->
+-    couch_mrview:query_view(Db, <<"_design/bar">>, <<"zing">>, Opts).
+-
+-
+-docs() ->
+-    {Docs, _} = lists:foldl(fun(V, {Docs0, Count}) ->
+-        Doc = couch_doc:from_json_obj({[
+-            {<<"_id">>, list_to_binary(integer_to_list(Count))},
+-            {<<"foo">>, V}
+-        ]}),
+-        {[Doc | Docs0], Count+1}
+-    end, {[], 0}, vals()),
+-    Docs.
+-
+-
+-rows() ->
+-    {Rows, _} = lists:foldl(fun(V, {Rows0, Count}) ->
+-        Id = list_to_binary(integer_to_list(Count)),
+-        Row = {row, [{id, Id}, {key, V}, {value, 0}]},
+-        {[Row | Rows0], Count+1}
+-    end, {[], 0}, vals()),
+-    lists:reverse(Rows).
+-
+-
+-vals() ->
+-    [
+-        null,
+-        false,
+-        true,
+-
+-        1,
+-        2,
+-        3.0,
+-        4,
+-
+-        <<"a">>,
+-        <<"A">>,
+-        <<"aa">>,
+-        <<"b">>,
+-        <<"B">>,
+-        <<"ba">>,
+-        <<"bb">>,
+-
+-        [<<"a">>],
+-        [<<"b">>],
+-        [<<"b">>, <<"c">>],
+-        [<<"b">>, <<"c">>, <<"a">>],
+-        [<<"b">>, <<"d">>],
+-        [<<"b">>, <<"d">>, <<"e">>],
+-
+-        {[{<<"a">>, 1}]},
+-        {[{<<"a">>, 2}]},
+-        {[{<<"b">>, 1}]},
+-        {[{<<"b">>, 2}]},
+-        {[{<<"b">>, 2}, {<<"a">>, 1}]},
+-        {[{<<"b">>, 2}, {<<"c">>, 2}]}
+-    ].
+diff --git a/src/couch_mrview/test/06-all-docs.t b/src/couch_mrview/test/06-all-docs.t
+deleted file mode 100644
+index 4501aa5..0000000
+--- a/src/couch_mrview/test/06-all-docs.t
++++ /dev/null
+@@ -1,127 +0,0 @@
+-#!/usr/bin/env escript
+-%% -*- erlang -*-
+-
+-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+-% use this file except in compliance with the License. You may obtain a copy of
+-% the License at
+-%
+-%   http://www.apache.org/licenses/LICENSE-2.0
+-%
+-% Unless required by applicable law or agreed to in writing, software
+-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+-% License for the specific language governing permissions and limitations under
+-% the License.
+-
+-main(_) ->
+-    test_util:run(6, fun() -> test() end).
+-
+-
+-test() ->
+-    couch_server_sup:start_link(test_util:config_files()),
+-
+-    {ok, Db} = couch_mrview_test_util:init_db(<<"foo">>, map),
+-
+-    test_basic(Db),
+-    test_range(Db),
+-    test_rev_range(Db),
+-    test_limit_and_skip(Db),
+-    test_include_docs(Db),
+-    test_empty_view(Db),
+-
+-    ok.
+-
+-
+-test_basic(Db) ->
+-    Result = run_query(Db, []),
+-    Expect = {ok, [
+-        {meta, [{total, 11}, {offset, 0}]},
+-        mk_row(<<"1">>, <<"1-08d53a5760b95fce6df2e2c5b008be39">>),
+-        mk_row(<<"10">>, <<"1-a05b6ea2bc0243949f103d5b4f15f71e">>),
+-        mk_row(<<"2">>, <<"1-b57c77a9e6f7574ca6469f0d6dcd78bb">>),
+-        mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>),
+-        mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
+-        mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>),
+-        mk_row(<<"6">>, <<"1-aca21c2e7bc5f8951424fcfc5d1209d8">>),
+-        mk_row(<<"7">>, <<"1-4374aeec17590d82f16e70f318116ad9">>),
+-        mk_row(<<"8">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>),
+-        mk_row(<<"9">>, <<"1-558c8487d9aee25399a91b5d31d90fe2">>),
+-        mk_row(<<"_design/bar">>, <<"1-a44e1dd1994a7717bf89c894ebd1f081">>)
+-    ]},
+-    etap:is(Result, Expect, "Simple view query worked.").
+-
+-
+-test_range(Db) ->
+-    Result = run_query(Db, [{start_key, <<"3">>}, {end_key, <<"5">>}]),
+-    Expect = {ok, [
+-        {meta, [{total, 11}, {offset, 3}]},
+-        mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>),
+-        mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
+-        mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>)
+-    ]},
+-    etap:is(Result, Expect, "Query with range works.").
+-
+-
+-test_rev_range(Db) ->
+-    Result = run_query(Db, [
+-        {direction, rev},
+-        {start_key, <<"5">>}, {end_key, <<"3">>},
+-        {inclusive_end, true}
+-    ]),
+-    Expect = {ok, [
+-        {meta, [{total, 11}, {offset, 5}]},
+-        mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>),
+-        mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
+-        mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>)
+-    ]},
+-    etap:is(Result, Expect, "Query with reversed range works.").
+-
+-
+-test_limit_and_skip(Db) ->
+-    Result = run_query(Db, [
+-        {start_key, <<"2">>},
+-        {limit, 3},
+-        {skip, 3}
+-    ]),
+-    Expect = {ok, [
+-        {meta, [{total, 11}, {offset, 5}]},
+-        mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>),
+-        mk_row(<<"6">>, <<"1-aca21c2e7bc5f8951424fcfc5d1209d8">>),
+-        mk_row(<<"7">>, <<"1-4374aeec17590d82f16e70f318116ad9">>)
+-    ]},
+-    etap:is(Result, Expect, "Query with limit and skip works.").
+-
+-
+-test_include_docs(Db) ->
+-    Result = run_query(Db, [
+-        {start_key, <<"8">>},
+-        {end_key, <<"8">>},
+-        {include_docs, true}
+-    ]),
+-    Doc = {[
+-        {<<"_id">>,<<"8">>},
+-        {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>},
+-        {<<"val">>, 8}
+-    ]},
+-    Val = {[{rev, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>}]},
+-    Expect = {ok, [
+-        {meta, [{total, 11}, {offset, 8}]},
+-        {row, [{id, <<"8">>}, {key, <<"8">>}, {value, Val}, {doc, Doc}]}
+-    ]},
+-    etap:is(Result, Expect, "Query with include docs works.").
+-
+-
+-test_empty_view(Db) ->
+-    Result = couch_mrview:query_view(Db, <<"_design/bar">>, <<"bing">>),
+-    Expect = {ok, [
+-        {meta, [{total, 0}, {offset, 0}]}
+-    ]},
+-    etap:is(Result, Expect, "Empty views are correct.").
+-
+-
+-mk_row(Id, Rev) ->
+-    {row, [{id, Id}, {key, Id}, {value, {[{rev, Rev}]}}]}.
+-
+-
+-run_query(Db, Opts) ->
+-    couch_mrview:query_all_docs(Db, Opts).
+diff --git a/src/couch_mrview/test/07-compact-swap.t b/src/couch_mrview/test/07-compact-swap.t
+deleted file mode 100644
+index 4bfe124..0000000
+--- a/src/couch_mrview/test/07-compact-swap.t
++++ /dev/null
+@@ -1,57 +0,0 @@
+-#!/usr/bin/env escript
+-%% -*- erlang -*-
+-
+-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+-% use this file except in compliance with the License. You may obtain a copy of
+-% the License at
+-%
+-%   http://www.apache.org/licenses/LICENSE-2.0
+-%
+-% Unless required by applicable law or agreed to in writing, software
+-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+-% License for the specific language governing permissions and limitations under
+-% the License.
+-
+-main(_) ->
+-    test_util:run(1, fun() -> test() end).
+-
+-
+-test() ->
+-    couch_server_sup:start_link(test_util:config_files()),
+-    {ok, Db} = couch_mrview_test_util:init_db(<<"foo">>, map, 1000),
+-    couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>),
+-    test_swap(Db),
+-    ok.
+-
+-
+-test_swap(Db) ->
+-    {ok, QPid} = start_query(Db),    
+-    {ok, MonRef} = couch_mrview:compact(Db, <<"_design/bar">>, [monitor]),
+-    receive
+-        {'DOWN', MonRef, process, _, _} -> ok
+-    after 1000 ->
+-        throw(compaction_failed)
+-    end,
+-    QPid ! {self(), continue},
+-    receive
+-        {QPid, Count} ->
+-            etap:is(Count, 1000, "View finished successfully.")
+-    after 1000 ->
+-        throw("query failed")
+-    end.
+-
+-
+-start_query(Db) ->
+-    Self = self(),
+-    Pid = spawn(fun() ->
+-        CB = fun
+-            (_, wait) -> receive {Self, continue} -> {ok, 0} end;
+-            ({row, _}, Count) -> {ok, Count+1};
+-            (_, Count) -> {ok, Count}
+-        end,
+-        {ok, Result} = 
+-        couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, [], CB, wait),
+-        Self ! {self(), Result}
+-    end),
+-    {ok, Pid}.
+diff --git a/src/couch_mrview/test/couch_mrview_all_docs_tests.erl b/src/couch_mrview/test/couch_mrview_all_docs_tests.erl
+new file mode 100644
+index 0000000..4e098ff
+--- /dev/null
++++ b/src/couch_mrview/test/couch_mrview_all_docs_tests.erl
+@@ -0,0 +1,154 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_mrview_all_docs_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
++-define(TIMEOUT, 1000).
++
++
++start() ->
++    {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    Pid.
++
++stop(Pid) ->
++    erlang:monitor(process, Pid),
++    couch_server_sup:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout, server_stop})
++    end.
++
++setup() ->
++    {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map),
++    Db.
++
++teardown(Db) ->
++    couch_db:close(Db),
++    couch_server:delete(Db#db.name, [?ADMIN_USER]),
++    ok.
++
++
++all_docs_test_() ->
++    {
++        "_all_docs view tests",
++        {
++            setup,
++            fun start/0, fun stop/1,
++            {
++                foreach,
++                fun setup/0, fun teardown/1,
++                [
++                    fun should_query/1,
++                    fun should_query_with_range/1,
++                    fun should_query_with_range_rev/1,
++                    fun should_query_with_limit_and_skip/1,
++                    fun should_query_with_include_docs/1,
++                    fun should_query_empty_views/1
++                ]
++            }
++        }
++    }.
++
++
++should_query(Db) ->
++    Result = run_query(Db, []),
++    Expect = {ok, [
++        {meta, [{total, 11}, {offset, 0}]},
++        mk_row(<<"1">>, <<"1-08d53a5760b95fce6df2e2c5b008be39">>),
++        mk_row(<<"10">>, <<"1-a05b6ea2bc0243949f103d5b4f15f71e">>),
++        mk_row(<<"2">>, <<"1-b57c77a9e6f7574ca6469f0d6dcd78bb">>),
++        mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>),
++        mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
++        mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>),
++        mk_row(<<"6">>, <<"1-aca21c2e7bc5f8951424fcfc5d1209d8">>),
++        mk_row(<<"7">>, <<"1-4374aeec17590d82f16e70f318116ad9">>),
++        mk_row(<<"8">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>),
++        mk_row(<<"9">>, <<"1-558c8487d9aee25399a91b5d31d90fe2">>),
++        mk_row(<<"_design/bar">>, <<"1-a44e1dd1994a7717bf89c894ebd1f081">>)
++    ]},
++    ?_assertEqual(Expect, Result).
++
++should_query_with_range(Db) ->
++    Result = run_query(Db, [{start_key, <<"3">>}, {end_key, <<"5">>}]),
++    Expect = {ok, [
++        {meta, [{total, 11}, {offset, 3}]},
++        mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>),
++        mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
++        mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>)
++    ]},
++    ?_assertEqual(Expect, Result).
++
++should_query_with_range_rev(Db) ->
++    Result = run_query(Db, [
++        {direction, rev},
++        {start_key, <<"5">>}, {end_key, <<"3">>},
++        {inclusive_end, true}
++    ]),
++    Expect = {ok, [
++        {meta, [{total, 11}, {offset, 5}]},
++        mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>),
++        mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
++        mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>)
++    ]},
++    ?_assertEqual(Expect, Result).
++
++should_query_with_limit_and_skip(Db) ->
++    Result = run_query(Db, [
++        {start_key, <<"2">>},
++        {limit, 3},
++        {skip, 3}
++    ]),
++    Expect = {ok, [
++        {meta, [{total, 11}, {offset, 5}]},
++        mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>),
++        mk_row(<<"6">>, <<"1-aca21c2e7bc5f8951424fcfc5d1209d8">>),
++        mk_row(<<"7">>, <<"1-4374aeec17590d82f16e70f318116ad9">>)
++    ]},
++    ?_assertEqual(Expect, Result).
++
++should_query_with_include_docs(Db) ->
++    Result = run_query(Db, [
++        {start_key, <<"8">>},
++        {end_key, <<"8">>},
++        {include_docs, true}
++    ]),
++    Doc = {[
++        {<<"_id">>,<<"8">>},
++        {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>},
++        {<<"val">>, 8}
++    ]},
++    Val = {[{rev, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>}]},
++    Expect = {ok, [
++        {meta, [{total, 11}, {offset, 8}]},
++        {row, [{id, <<"8">>}, {key, <<"8">>}, {value, Val}, {doc, Doc}]}
++    ]},
++    ?_assertEqual(Expect, Result).
++
++should_query_empty_views(Db) ->
++    Result = couch_mrview:query_view(Db, <<"_design/bar">>, <<"bing">>),
++    Expect = {ok, [
++        {meta, [{total, 0}, {offset, 0}]}
++    ]},
++    ?_assertEqual(Expect, Result).
++
++
++mk_row(Id, Rev) ->
++    {row, [{id, Id}, {key, Id}, {value, {[{rev, Rev}]}}]}.
++
++run_query(Db, Opts) ->
++    couch_mrview:query_all_docs(Db, Opts).
+diff --git a/src/couch_mrview/test/couch_mrview_collation_tests.erl b/src/couch_mrview/test/couch_mrview_collation_tests.erl
+new file mode 100644
+index 0000000..2e0b75b
+--- /dev/null
++++ b/src/couch_mrview/test/couch_mrview_collation_tests.erl
+@@ -0,0 +1,202 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_mrview_collation_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
++-define(TIMEOUT, 1000).
++-define(VALUES, [
++    null,
++    false,
++    true,
++
++    1,
++    2,
++    3.0,
++    4,
++
++    <<"a">>,
++    <<"A">>,
++    <<"aa">>,
++    <<"b">>,
++    <<"B">>,
++    <<"ba">>,
++    <<"bb">>,
++
++    [<<"a">>],
++    [<<"b">>],
++    [<<"b">>, <<"c">>],
++    [<<"b">>, <<"c">>, <<"a">>],
++    [<<"b">>, <<"d">>],
++    [<<"b">>, <<"d">>, <<"e">>],
++
++    {[{<<"a">>, 1}]},
++    {[{<<"a">>, 2}]},
++    {[{<<"b">>, 1}]},
++    {[{<<"b">>, 2}]},
++    {[{<<"b">>, 2}, {<<"a">>, 1}]},
++    {[{<<"b">>, 2}, {<<"c">>, 2}]}
++]).
++
++
++start() ->
++    {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    Pid.
++
++stop(Pid) ->
++    erlang:monitor(process, Pid),
++    couch_server_sup:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout, server_stop})
++    end.
++
++setup() ->
++    {ok, Db1} = couch_mrview_test_util:new_db(?tempdb(), map),
++    {ok, Db2} = couch_mrview_test_util:save_docs(Db1, make_docs()),
++    Db2.
++
++teardown(Db) ->
++    couch_db:close(Db),
++    couch_server:delete(Db#db.name, [?ADMIN_USER]),
++    ok.
++
++
++collation_test_() ->
++    {
++        "Collation tests",
++        {
++            setup,
++            fun start/0, fun stop/1,
++            {
++                foreach,
++                fun setup/0, fun teardown/1,
++                [
++                    fun should_collate_fwd/1,
++                    fun should_collate_rev/1,
++                    fun should_collate_range/1,
++                    fun should_collate_with_inclusive_end_fwd/1,
++                    fun should_collate_with_inclusive_end_rev/1,
++                    fun should_collate_without_inclusive_end_fwd/1,
++                    fun should_collate_without_inclusive_end_rev/1,
++                    fun should_collate_with_endkey_docid/1
++                ]
++            }
++        }
++    }.
++
++
++should_collate_fwd(Db) ->
++    {ok, Results} = run_query(Db, []),
++    Expect = [{meta, [{total, 26}, {offset, 0}]}] ++ rows(),
++    %% cannot use _assertEqual since mrview converts
++    %% value 3.0 to 3 making assertion fail
++    ?_assert(Expect == Results).
++
++should_collate_rev(Db) ->
++    {ok, Results} = run_query(Db, [{direction, rev}]),
++    Expect = [{meta, [{total, 26}, {offset, 0}]}] ++ lists:reverse(rows()),
++    %% cannot use _assertEqual since mrview converts
++    %% value 3.0 to 3 making assertion fail
++    ?_assert(Expect == Results).
++
++should_collate_range(Db) ->
++    ?_assertNot(
++        begin
++            {_, Error} = lists:foldl(fun(V, {Count, Error}) ->
++                {ok, Results} = run_query(Db, [{start_key, V}, {end_key, V}]),
++                Id = list_to_binary(integer_to_list(Count)),
++                Expect = [
++                    {meta, [{total, 26}, {offset, Count}]},
++                    {row, [{id, Id}, {key, V}, {value, 0}]}
++                ],
++                case Results == Expect of
++                    true -> {Count+1, Error};
++                    _ -> {Count+1, true}
++                end
++            end, {0, false}, ?VALUES),
++            Error
++        end).
++
++should_collate_with_inclusive_end_fwd(Db) ->
++    Opts = [{end_key, <<"b">>}, {inclusive_end, true}],
++    {ok, Rows0} = run_query(Db, Opts),
++    LastRow = lists:last(Rows0),
++    Expect = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]},
++    ?_assertEqual(Expect, LastRow).
++
++should_collate_with_inclusive_end_rev(Db) ->
++    Opts = [{end_key, <<"b">>}, {inclusive_end, true}, {direction, rev}],
++    {ok, Rows} = run_query(Db, Opts),
++    LastRow = lists:last(Rows),
++    Expect = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]},
++    ?_assertEqual(Expect, LastRow).
++
++should_collate_without_inclusive_end_fwd(Db) ->
++    Opts = [{end_key, <<"b">>}, {inclusive_end, false}],
++    {ok, Rows0} = run_query(Db, Opts),
++    LastRow = lists:last(Rows0),
++    Expect = {row, [{id,<<"9">>}, {key,<<"aa">>}, {value,0}]},
++    ?_assertEqual(Expect, LastRow).
++
++should_collate_without_inclusive_end_rev(Db) ->
++    Opts = [{end_key, <<"b">>}, {inclusive_end, false}, {direction, rev}],
++    {ok, Rows} = run_query(Db, Opts),
++    LastRow = lists:last(Rows),
++    Expect = {row, [{id,<<"11">>}, {key,<<"B">>}, {value,0}]},
++    ?_assertEqual(Expect, LastRow).
++
++should_collate_with_endkey_docid(Db) ->
++    ?_test(begin
++        {ok, Rows0} = run_query(Db, [
++            {end_key, <<"b">>}, {end_key_docid, <<"10">>},
++            {inclusive_end, false}
++        ]),
++        Result0 = lists:last(Rows0),
++        Expect0 = {row, [{id,<<"9">>}, {key,<<"aa">>}, {value,0}]},
++        ?assertEqual(Expect0, Result0),
++
++        {ok, Rows1} = run_query(Db, [
++            {end_key, <<"b">>}, {end_key_docid, <<"11">>},
++            {inclusive_end, false}
++        ]),
++        Result1 = lists:last(Rows1),
++        Expect1 = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]},
++        ?assertEqual(Expect1, Result1)
++    end).
++
++
++make_docs() ->
++    {Docs, _} = lists:foldl(fun(V, {Docs0, Count}) ->
++        Doc = couch_doc:from_json_obj({[
++            {<<"_id">>, list_to_binary(integer_to_list(Count))},
++            {<<"foo">>, V}
++        ]}),
++        {[Doc | Docs0], Count+1}
++    end, {[], 0}, ?VALUES),
++    Docs.
++
++rows() ->
++    {Rows, _} = lists:foldl(fun(V, {Rows0, Count}) ->
++        Id = list_to_binary(integer_to_list(Count)),
++        Row = {row, [{id, Id}, {key, V}, {value, 0}]},
++        {[Row | Rows0], Count+1}
++    end, {[], 0}, ?VALUES),
++    lists:reverse(Rows).
++
++run_query(Db, Opts) ->
++    couch_mrview:query_view(Db, <<"_design/bar">>, <<"zing">>, Opts).
+diff --git a/src/couch_mrview/test/couch_mrview_compact_tests.erl b/src/couch_mrview/test/couch_mrview_compact_tests.erl
+new file mode 100644
+index 0000000..4cb7daf
+--- /dev/null
++++ b/src/couch_mrview/test/couch_mrview_compact_tests.erl
+@@ -0,0 +1,101 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_mrview_compact_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
++-define(TIMEOUT, 1000).
++
++
++start() ->
++    {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    Pid.
++
++stop(Pid) ->
++    erlang:monitor(process, Pid),
++    couch_server_sup:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout, server_stop})
++    end.
++
++setup() ->
++    {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map, 1000),
++    Db.
++
++teardown(Db) ->
++    couch_db:close(Db),
++    couch_server:delete(Db#db.name, [?ADMIN_USER]),
++    ok.
++
++
++compaction_test_() ->
++    {
++        "Compaction tests",
++        {
++            setup,
++            fun start/0, fun stop/1,
++            {
++                foreach,
++                fun setup/0, fun teardown/1,
++                [
++                    fun should_swap/1
++                ]
++            }
++        }
++    }.
++
++
++should_swap(Db) ->
++    ?_test(begin
++        couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>),
++        {ok, QPid} = start_query(Db),
++        {ok, MonRef} = couch_mrview:compact(Db, <<"_design/bar">>, [monitor]),
++        receive
++            {'DOWN', MonRef, process, _, _} -> ok
++        after ?TIMEOUT ->
++            erlang:error(
++                {assertion_failed,
++                 [{module, ?MODULE}, {line, ?LINE},
++                  {reason, "compaction failed"}]})
++        end,
++        QPid ! {self(), continue},
++        receive
++            {QPid, Count} ->
++                ?assertEqual(1000, Count)
++        after ?TIMEOUT ->
++            erlang:error(
++                {assertion_failed,
++                 [{module, ?MODULE}, {line, ?LINE},
++                  {reason, "query failed"}]})
++        end
++    end).
++
++
++start_query(Db) ->
++    Self = self(),
++    Pid = spawn(fun() ->
++        CB = fun
++            (_, wait) -> receive {Self, continue} -> {ok, 0} end;
++            ({row, _}, Count) -> {ok, Count+1};
++            (_, Count) -> {ok, Count}
++        end,
++        {ok, Result} =
++        couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, [], CB, wait),
++        Self ! {self(), Result}
++    end),
++    {ok, Pid}.
+diff --git a/src/couch_mrview/test/couch_mrview_index_info_tests.erl b/src/couch_mrview/test/couch_mrview_index_info_tests.erl
+new file mode 100644
+index 0000000..6c30da8
+--- /dev/null
++++ b/src/couch_mrview/test/couch_mrview_index_info_tests.erl
+@@ -0,0 +1,87 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_mrview_index_info_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
++-define(TIMEOUT, 1000).
++
++
++start() ->
++    {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    Pid.
++
++stop(Pid) ->
++    erlang:monitor(process, Pid),
++    couch_server_sup:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout, server_stop})
++    end.
++
++setup() ->
++    {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map),
++    couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>),
++    {ok, Info} = couch_mrview:get_info(Db, <<"_design/bar">>),
++    {Db, Info}.
++
++teardown({Db, _}) ->
++    couch_db:close(Db),
++    couch_server:delete(Db#db.name, [?ADMIN_USER]),
++    ok.
++
++
++view_info_test_() ->
++    {
++        "Views index tests",
++        {
++            setup,
++            fun start/0, fun stop/1,
++            {
++                foreach,
++                fun setup/0, fun teardown/1,
++                [
++                    fun should_get_property/1
++                ]
++            }
++        }
++    }.
++
++
++should_get_property({_, Info}) ->
++    InfoProps = [
++        {signature, <<"276df562b152b3c4e5d34024f62672ed">>},
++        {language, <<"javascript">>},
++        {disk_size, 314},
++        {data_size, 263},
++        {update_seq, 11},
++        {purge_seq, 0},
++        {updater_running, false},
++        {compact_running, false},
++        {waiting_clients, 0}
++    ],
++    [
++        {atom_to_list(Key), ?_assertEqual(Val, getval(Key, Info))}
++        || {Key, Val} <- InfoProps
++    ].
++
++
++getval(Key, PL) ->
++    {value, {Key, Val}} = lists:keysearch(Key, 1, PL),
++    Val.
++
++
+diff --git a/src/couch_mrview/test/couch_mrview_map_views_tests.erl b/src/couch_mrview/test/couch_mrview_map_views_tests.erl
+new file mode 100644
+index 0000000..b364b77
+--- /dev/null
++++ b/src/couch_mrview/test/couch_mrview_map_views_tests.erl
+@@ -0,0 +1,138 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_mrview_map_views_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(TIMEOUT, 1000).
++-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
++
++
++start() ->
++    {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    Pid.
++
++stop(Pid) ->
++    erlang:monitor(process, Pid),
++    couch_server_sup:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout, server_stop})
++    end.
++
++setup() ->
++    {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map),
++    Db.
++
++teardown(Db) ->
++    couch_db:close(Db),
++    couch_server:delete(Db#db.name, [?ADMIN_USER]),
++    ok.
++
++
++map_views_test_() ->
++    {
++        "Map views",
++        {
++            setup,
++            fun start/0, fun stop/1,
++            {
++                foreach,
++                fun setup/0, fun teardown/1,
++                [
++                    fun should_map/1,
++                    fun should_map_with_range/1,
++                    fun should_map_with_limit_and_skip/1,
++                    fun should_map_with_include_docs/1,
++                    fun should_map_empty_views/1
++                ]
++            }
++        }
++    }.
++
++
++should_map(Db) ->
++    Result = run_query(Db, []),
++    Expect = {ok, [
++        {meta, [{total, 10}, {offset, 0}]},
++        {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
++        {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
++        {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
++        {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
++        {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
++        {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
++        {row, [{id, <<"7">>}, {key, 7}, {value, 7}]},
++        {row, [{id, <<"8">>}, {key, 8}, {value, 8}]},
++        {row, [{id, <<"9">>}, {key, 9}, {value, 9}]},
++        {row, [{id, <<"10">>}, {key, 10}, {value, 10}]}
++    ]},
++    ?_assertEqual(Expect, Result).
++
++should_map_with_range(Db) ->
++    Result = run_query(Db, [
++        {direction, rev},
++        {start_key, 5}, {end_key, 3},
++        {inclusive_end, true}
++    ]),
++    Expect = {ok, [
++        {meta, [{total, 10}, {offset, 5}]},
++        {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
++        {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
++        {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}
++    ]},
++    ?_assertEqual(Expect, Result).
++
++should_map_with_limit_and_skip(Db) ->
++    Result = run_query(Db, [
++        {start_key, 2},
++        {limit, 3},
++        {skip, 3}
++    ]),
++    Expect = {ok, [
++        {meta, [{total, 10}, {offset, 4}]},
++        {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
++        {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
++        {row, [{id, <<"7">>}, {key, 7}, {value, 7}]}
++    ]},
++    ?_assertEqual(Expect, Result).
++
++should_map_with_include_docs(Db) ->
++    Result = run_query(Db, [
++        {start_key, 8},
++        {end_key, 8},
++        {include_docs, true}
++    ]),
++    Doc = {[
++        {<<"_id">>,<<"8">>},
++        {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>},
++        {<<"val">>,8}
++    ]},
++    Expect = {ok, [
++        {meta, [{total, 10}, {offset, 7}]},
++        {row, [{id, <<"8">>}, {key, 8}, {value, 8}, {doc, Doc}]}
++    ]},
++    ?_assertEqual(Expect, Result).
++
++should_map_empty_views(Db) ->
++    Result = couch_mrview:query_view(Db, <<"_design/bar">>, <<"bing">>),
++    Expect = {ok, [
++        {meta, [{total, 0}, {offset, 0}]}
++    ]},
++    ?_assertEqual(Expect, Result).
++
++
++run_query(Db, Opts) ->
++    couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, Opts).
+diff --git a/src/couch_mrview/test/couch_mrview_modules_load_tests.erl b/src/couch_mrview/test/couch_mrview_modules_load_tests.erl
+new file mode 100644
+index 0000000..bfab646
+--- /dev/null
++++ b/src/couch_mrview/test/couch_mrview_modules_load_tests.erl
+@@ -0,0 +1,37 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_mrview_modules_load_tests).
++
++-include("couch_eunit.hrl").
++
++
++modules_load_test_() ->
++    {
++        "Verify that all modules loads",
++        should_load_modules()
++    }.
++
++
++should_load_modules() ->
++    Modules = [
++        couch_mrview,
++        couch_mrview_compactor,
++        couch_mrview_http,
++        couch_mrview_index,
++        couch_mrview_updater,
++        couch_mrview_util
++    ],
++    [should_load_module(Mod) || Mod <- Modules].
++
++should_load_module(Mod) ->
++    {atom_to_list(Mod), ?_assertMatch({module, _}, code:load_file(Mod))}.
+diff --git a/src/couch_mrview/test/couch_mrview_red_views_tests.erl b/src/couch_mrview/test/couch_mrview_red_views_tests.erl
+new file mode 100644
+index 0000000..ed6018b
+--- /dev/null
++++ b/src/couch_mrview/test/couch_mrview_red_views_tests.erl
+@@ -0,0 +1,110 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_mrview_red_views_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(TIMEOUT, 1000).
++-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
++
++
++start() ->
++    {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    Pid.
++
++stop(Pid) ->
++    erlang:monitor(process, Pid),
++    couch_server_sup:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout, server_stop})
++    end.
++
++setup() ->
++    {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), red),
++    Db.
++
++teardown(Db) ->
++    couch_db:close(Db),
++    couch_server:delete(Db#db.name, [?ADMIN_USER]),
++    ok.
++
++
++reduce_views_test_() ->
++    {
++        "Reduce views",
++        {
++            setup,
++            fun start/0, fun stop/1,
++            {
++                foreach,
++                fun setup/0, fun teardown/1,
++                [
++                    fun should_reduce_basic/1,
++                    fun should_reduce_key_range/1,
++                    fun should_reduce_with_group_level/1,
++                    fun should_reduce_with_group_exact/1
++                ]
++            }
++        }
++    }.
++
++
++should_reduce_basic(Db) ->
++    Result = run_query(Db, []),
++    Expect = {ok, [
++        {meta, []},
++        {row, [{key, null}, {value, 55}]}
++    ]},
++    ?_assertEqual(Expect, Result).
++
++should_reduce_key_range(Db) ->
++    Result = run_query(Db, [{start_key, [0, 2]}, {end_key, [0, 4]}]),
++    Expect = {ok, [
++        {meta, []},
++        {row, [{key, null}, {value, 6}]}
++    ]},
++    ?_assertEqual(Expect, Result).
++
++should_reduce_with_group_level(Db) ->
++    Result = run_query(Db, [{group_level, 1}]),
++    Expect = {ok, [
++        {meta, []},
++        {row, [{key, [0]}, {value, 30}]},
++        {row, [{key, [1]}, {value, 25}]}
++    ]},
++    ?_assertEqual(Expect, Result).
++
++should_reduce_with_group_exact(Db) ->
++    Result = run_query(Db, [{group_level, exact}]),
++    Expect = {ok, [
++        {meta, []},
++        {row, [{key, [0, 2]}, {value, 2}]},
++        {row, [{key, [0, 4]}, {value, 4}]},
++        {row, [{key, [0, 6]}, {value, 6}]},
++        {row, [{key, [0, 8]}, {value, 8}]},
++        {row, [{key, [0, 10]}, {value, 10}]},
++        {row, [{key, [1, 1]}, {value, 1}]},
++        {row, [{key, [1, 3]}, {value, 3}]},
++        {row, [{key, [1, 5]}, {value, 5}]},
++        {row, [{key, [1, 7]}, {value, 7}]},
++        {row, [{key, [1, 9]}, {value, 9}]}
++    ]},
++    ?_assertEqual(Expect, Result).
++
++
++run_query(Db, Opts) ->
++    couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, Opts).
+diff --git a/src/couch_replicator/Makefile.am b/src/couch_replicator/Makefile.am
+index 2dcd47d..67c9872 100644
+--- a/src/couch_replicator/Makefile.am
++++ b/src/couch_replicator/Makefile.am
+@@ -36,13 +36,13 @@ source_files = \
+ 	src/couch_replicator.erl
+ 
+ test_files = \
+-	test/01-load.t \
+-	test/02-httpc-pool.t \
+-	test/03-replication-compact.t \
+-	test/04-replication-large-atts.t \
+-	test/05-replication-many-leaves.t \
+-	test/06-doc-missing-stubs.t \
+-	test/07-use-checkpoints.t
++	test/couch_replicator_compact_tests.erl \
++	test/couch_replicator_httpc_pool_tests.erl \
++	test/couch_replicator_large_atts_tests.erl \
++	test/couch_replicator_many_leaves_tests.erl \
++	test/couch_replicator_missing_stubs_tests.erl \
++	test/couch_replicator_modules_load_tests.erl \
++	test/couch_replicator_use_checkpoints_tests.erl
+ 
+ compiled_files = \
+ 	ebin/couch_replicator_api_wrap.beam \
+@@ -62,7 +62,7 @@ CLEANFILES = $(compiled_files)
+ 
+ check:
+ if TESTS
+-	$(abs_top_builddir)/test/etap/run $(abs_top_srcdir)/src/couch_replicator/test
++	$(abs_top_builddir)/test/couchdb/run -v $(abs_top_srcdir)/src/couch_replicator/test
+ endif
+ 
+ ebin/%.app: src/%.app.src
+diff --git a/src/couch_replicator/test/01-load.t b/src/couch_replicator/test/01-load.t
+deleted file mode 100644
+index 8bd82dd..0000000
+--- a/src/couch_replicator/test/01-load.t
++++ /dev/null
+@@ -1,37 +0,0 @@
+-#!/usr/bin/env escript
+-%% -*- erlang -*-
+-
+-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+-% use this file except in compliance with the License. You may obtain a copy of
+-% the License at
+-%
+-%   http://www.apache.org/licenses/LICENSE-2.0
+-%
+-% Unless required by applicable law or agreed to in writing, software
+-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+-% License for the specific language governing permissions and limitations under
+-% the License.
+-
+-% Test that we can load each module.
+-
+-main(_) ->
+-    test_util:init_code_path(),
+-    Modules = [
+-        couch_replicator_api_wrap,
+-        couch_replicator_httpc,
+-        couch_replicator_httpd,
+-        couch_replicator_manager,
+-        couch_replicator_notifier,
+-        couch_replicator,
+-        couch_replicator_worker,
+-        couch_replicator_utils,
+-        couch_replicator_job_sup
+-    ],
+-
+-    etap:plan(length(Modules)),
+-    lists:foreach(
+-        fun(Module) ->
+-            etap:loaded_ok(Module, lists:concat(["Loaded: ", Module]))
+-        end, Modules),
+-    etap:end_tests().
+diff --git a/src/couch_replicator/test/02-httpc-pool.t b/src/couch_replicator/test/02-httpc-pool.t
+deleted file mode 100755
+index a7bde6c..0000000
+--- a/src/couch_replicator/test/02-httpc-pool.t
++++ /dev/null
+@@ -1,250 +0,0 @@
+-#!/usr/bin/env escript
+-%% -*- erlang -*-
+-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+-% use this file except in compliance with the License. You may obtain a copy of
+-% the License at
+-%
+-%   http://www.apache.org/licenses/LICENSE-2.0
+-%
+-% Unless required by applicable law or agreed to in writing, software
+-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+-% License for the specific language governing permissions and limitations under
+-% the License.
+-
+-main(_) ->
+-    test_util:init_code_path(),
+-
+-    etap:plan(55),
+-    case (catch test()) of
+-        ok ->
+-            etap:end_tests();
+-        Other ->
+-            etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+-            etap:bail(Other)
+-    end,
+-    ok.
+-
+-
+-test() ->
+-    couch_server_sup:start_link(test_util:config_files()),
+-    ibrowse:start(),
+-
+-    test_pool_full(),
+-    test_worker_dead_pool_non_full(),
+-    test_worker_dead_pool_full(),
+-
+-    couch_server_sup:stop(),
+-    ok.
+-
+-
+-test_pool_full() ->
+-    Pool = spawn_pool(),
+-    Client1 = spawn_client(Pool),
+-    Client2 = spawn_client(Pool),
+-    Client3 = spawn_client(Pool),
+-
+-    etap:diag("Check that we can spawn the max number of connections."),
+-    etap:is(ping_client(Client1), ok, "Client 1 started ok."),
+-    etap:is(ping_client(Client2), ok, "Client 2 started ok."),
+-    etap:is(ping_client(Client3), ok, "Client 3 started ok."),
+-
+-    Worker1 = get_client_worker(Client1, "1"),
+-    Worker2 = get_client_worker(Client2, "2"),
+-    Worker3 = get_client_worker(Client3, "3"),
+-    etap:is(is_process_alive(Worker1), true, "Client's 1 worker is alive."),
+-    etap:is(is_process_alive(Worker2), true, "Client's 2 worker is alive."),
+-    etap:is(is_process_alive(Worker3), true, "Client's 3 worker is alive."),
+-
+-    etap:isnt(Worker1, Worker2, "Clients 1 and 2 got different workers."),
+-    etap:isnt(Worker2, Worker3, "Clients 2 and 3 got different workers."),
+-    etap:isnt(Worker1, Worker3, "Clients 1 and 3 got different workers."),
+-
+-    etap:diag("Check that client 4 blocks waiting for a worker."),
+-    Client4 = spawn_client(Pool),
+-    etap:is(ping_client(Client4), timeout, "Client 4 blocked while waiting."),
+-
+-    etap:diag("Check that stopping a client gives up its worker."),
+-    etap:is(stop_client(Client1), ok, "First client stopped."),
+-
+-    etap:diag("And check that our blocked client has been unblocked."),
+-    etap:is(ping_client(Client4), ok, "Client 4 was unblocked."),
+-
+-    Worker4 = get_client_worker(Client4, "4"),
+-    etap:is(is_process_alive(Worker4), true, "Client's 4 worker is alive."),
+-    etap:is(Worker4, Worker1, "Client 4 got worker that client 1 got before."),
+-
+-    lists:foreach(fun(C) -> ok = stop_client(C) end, [Client2, Client3, Client4]),
+-    stop_pool(Pool).
+-
+-
+-test_worker_dead_pool_non_full() ->
+-    Pool = spawn_pool(),
+-    Client1 = spawn_client(Pool),
+-
+-    etap:is(ping_client(Client1), ok, "Client 1 started ok."),
+-    Worker1 = get_client_worker(Client1, "1"),
+-    etap:is(is_process_alive(Worker1), true, "Client's 1 worker is alive."),
+-
+-    etap:diag("Kill client's 1 worker."),
+-    etap:is(kill_client_worker(Client1), ok, "Killed client's 1 worker."),
+-    etap:is(is_process_alive(Worker1), false, "Client's 1 worker process is dead."),
+-
+-    etap:is(stop_client(Client1), ok, "First client stopped and released its worker."),
+-
+-    Client2 = spawn_client(Pool),
+-    etap:is(ping_client(Client2), ok, "Client 2 started ok."),
+-    Worker2 = get_client_worker(Client2, "2"),
+-    etap:isnt(Worker2, Worker1, "Client 2 got a different worker from client 1"),
+-    etap:is(is_process_alive(Worker2), true, "Client's 2 worker is alive."),
+-
+-    etap:is(stop_client(Client2), ok, "Second client stopped."),
+-    stop_pool(Pool).
+-
+-
+-test_worker_dead_pool_full() ->
+-    Pool = spawn_pool(),
+-    Client1 = spawn_client(Pool),
+-    Client2 = spawn_client(Pool),
+-    Client3 = spawn_client(Pool),
+-
+-    etap:diag("Check that we can spawn the max number of connections."),
+-    etap:is(ping_client(Client1), ok, "Client 1 started ok."),
+-    etap:is(ping_client(Client2), ok, "Client 2 started ok."),
+-    etap:is(ping_client(Client3), ok, "Client 3 started ok."),
+-
+-    Worker1 = get_client_worker(Client1, "1"),
+-    Worker2 = get_client_worker(Client2, "2"),
+-    Worker3 = get_client_worker(Client3, "3"),
+-    etap:is(is_process_alive(Worker1), true, "Client's 1 worker is alive."),
+-    etap:is(is_process_alive(Worker2), true, "Client's 2 worker is alive."),
+-    etap:is(is_process_alive(Worker3), true, "Client's 3 worker is alive."),
+-
+-    etap:isnt(Worker1, Worker2, "Clients 1 and 2 got different workers."),
+-    etap:isnt(Worker2, Worker3, "Clients 2 and 3 got different workers."),
+-    etap:isnt(Worker1, Worker3, "Clients 1 and 3 got different workers."),
+-
+-    etap:diag("Check that client 4 blocks waiting for a worker."),
+-    Client4 = spawn_client(Pool),
+-    etap:is(ping_client(Client4), timeout, "Client 4 blocked while waiting."),
+-
+-    etap:diag("Kill client's 1 worker."),
+-    etap:is(kill_client_worker(Client1), ok, "Killed client's 1 worker."),
+-    etap:is(is_process_alive(Worker1), false, "Client's 1 worker process is dead."),
+-
+-    etap:diag("Check client 4 got unblocked after first worker's death"),
+-    etap:is(ping_client(Client4), ok, "Client 4 not blocked anymore."),
+-
+-    Worker4 = get_client_worker(Client4, "4"),
+-    etap:is(is_process_alive(Worker4), true, "Client's 4 worker is alive."),
+-    etap:isnt(Worker4, Worker1, "Client 4 got a worker different from client 1."),
+-    etap:isnt(Worker4, Worker2, "Client 4 got a worker different from client 2."),
+-    etap:isnt(Worker4, Worker3, "Client 4 got a worker different from client 3."),
+-
+-    etap:diag("Check that stopping client 1 is a noop."),
+-    etap:is(stop_client(Client1), ok, "First client stopped."),
+-
+-    etap:is(is_process_alive(Worker2), true, "Client's 2 worker still alive."),
+-    etap:is(is_process_alive(Worker3), true, "Client's 3 worker still alive."),
+-    etap:is(is_process_alive(Worker4), true, "Client's 4 worker still alive."),
+-
+-    etap:diag("Check that client 5 blocks waiting for a worker."),
+-    Client5 = spawn_client(Pool),
+-    etap:is(ping_client(Client5), timeout, "Client 5 blocked while waiting."),
+-
+-    etap:diag("Check that stopping client 2 gives up its worker."),
+-    etap:is(stop_client(Client2), ok, "Second client stopped."),
+-
+-    etap:diag("Now check that client 5 has been unblocked."),
+-    etap:is(ping_client(Client5), ok, "Client 5 was unblocked."),
+-
+-    Worker5 = get_client_worker(Client5, "5"),
+-    etap:is(is_process_alive(Worker5), true, "Client's 5 worker is alive."),
+-    etap:isnt(Worker5, Worker1, "Client 5 got a worker different from client 1."),
+-    etap:is(Worker5, Worker2, "Client 5 got same worker as client 2."),
+-    etap:isnt(Worker5, Worker3, "Client 5 got a worker different from client 3."),
+-    etap:isnt(Worker5, Worker4, "Client 5 got a worker different from client 4."),
+-
+-    etap:is(is_process_alive(Worker3), true, "Client's 3 worker still alive."),
+-    etap:is(is_process_alive(Worker4), true, "Client's 4 worker still alive."),
+-    etap:is(is_process_alive(Worker5), true, "Client's 5 worker still alive."),
+-
+-    lists:foreach(fun(C) -> ok = stop_client(C) end, [Client3, Client4, Client5]),
+-    stop_pool(Pool).
+-
+-
+-spawn_client(Pool) ->
+-    Parent = self(),
+-    Ref = make_ref(),
+-    Pid = spawn(fun() ->
+-        {ok, Worker} = couch_replicator_httpc_pool:get_worker(Pool),
+-        loop(Parent, Ref, Worker, Pool)
+-    end),
+-    {Pid, Ref}.
+-
+-
+-ping_client({Pid, Ref}) ->
+-    Pid ! ping,
+-    receive
+-        {pong, Ref} ->
+-            ok
+-    after 3000 ->
+-        timeout
+-    end.
+-
+-
+-get_client_worker({Pid, Ref}, ClientName) ->
+-    Pid ! get_worker,
+-    receive
+-        {worker, Ref, Worker} ->
+-            Worker
+-    after 3000 ->
+-        etap:bail("Timeout getting client " ++ ClientName ++ " worker.")
+-    end.
+-
+-
+-stop_client({Pid, Ref}) ->
+-    Pid ! stop,
+-    receive
+-        {stop, Ref} ->
+-            ok
+-    after 3000 ->
+-        timeout
+-    end.
+-
+-
+-kill_client_worker({Pid, Ref}) ->
+-    Pid ! get_worker,
+-    receive
+-        {worker, Ref, Worker} ->
+-            exit(Worker, kill),
+-            ok
+-    after 3000 ->
+-        timeout
+-    end.
+-
+-
+-loop(Parent, Ref, Worker, Pool) ->
+-    receive
+-        ping ->
+-            Parent ! {pong, Ref},
+-            loop(Parent, Ref, Worker, Pool);
+-        get_worker  ->
+-            Parent ! {worker, Ref, Worker},
+-            loop(Parent, Ref, Worker, Pool);
+-        stop ->
+-            couch_replicator_httpc_pool:release_worker(Pool, Worker),
+-            Parent ! {stop, Ref}
+-    end.
+-
+-
+-spawn_pool() ->
+-    Host = couch_config:get("httpd", "bind_address", "127.0.0.1"),
+-    Port = couch_config:get("httpd", "port", "5984"),
+-    {ok, Pool} = couch_replicator_httpc_pool:start_link(
+-        "http://" ++ Host ++ ":5984", [{max_connections, 3}]),
+-    Pool.
+-
+-
+-stop_pool(Pool) ->
+-    ok = couch_replicator_httpc_pool:stop(Pool).
+diff --git a/src/couch_replicator/test/03-replication-compact.t b/src/couch_replicator/test/03-replication-compact.t
+deleted file mode 100755
+index 7c4d38c..0000000
+--- a/src/couch_replicator/test/03-replication-compact.t
++++ /dev/null
+@@ -1,488 +0,0 @@
+-#!/usr/bin/env escript
+-%% -*- erlang -*-
+-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+-% use this file except in compliance with the License. You may obtain a copy of
+-% the License at
+-%
+-%   http://www.apache.org/licenses/LICENSE-2.0
+-%
+-% Unless required by applicable law or agreed to in writing, software
+-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+-% License for the specific language governing permissions and limitations under
+-% the License.
+-
+-% Verify that compacting databases that are being used as the source or
+-% target of a replication doesn't affect the replication and that the
+-% replication doesn't hold their reference counters forever.
+-
+--define(b2l(B), binary_to_list(B)).
+-
+--record(user_ctx, {
+-    name = null,
+-    roles = [],
+-    handler
+-}).
+-
+--record(db, {
+-    main_pid = nil,
+-    update_pid = nil,
+-    compactor_pid = nil,
+-    instance_start_time, % number of microsecs since jan 1 1970 as a binary string
+-    fd,
+-    updater_fd,
+-    fd_ref_counter,
+-    header = nil,
+-    committed_update_seq,
+-    fulldocinfo_by_id_btree,
+-    docinfo_by_seq_btree,
+-    local_docs_btree,
+-    update_seq,
+-    name,
+-    filepath,
+-    validate_doc_funs = [],
+-    security = [],
+-    security_ptr = nil,
+-    user_ctx = #user_ctx{},
+-    waiting_delayed_commit = nil,
+-    revs_limit = 1000,
+-    fsync_options = [],
+-    options = [],
+-    compression,
+-    before_doc_update,
+-    after_doc_read
+-}).
+-
+--record(rep, {
+-    id,
+-    source,
+-    target,
+-    options,
+-    user_ctx,
+-    doc_id
+-}).
+-
+-
+-source_db_name() -> <<"couch_test_rep_db_a">>.
+-target_db_name() -> <<"couch_test_rep_db_b">>.
+-
+-
+-main(_) ->
+-    test_util:init_code_path(),
+-
+-    etap:plan(376),
+-    case (catch test()) of
+-        ok ->
+-            etap:end_tests();
+-        Other ->
+-            etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+-            etap:bail(Other)
+-    end,
+-    ok.
+-
+-
+-test() ->
+-    couch_server_sup:start_link(test_util:config_files()),
+-    ibrowse:start(),
+-
+-    Pairs = [
+-        {source_db_name(), target_db_name()},
+-        {{remote, source_db_name()}, target_db_name()},
+-        {source_db_name(), {remote, target_db_name()}},
+-        {{remote, source_db_name()}, {remote, (target_db_name())}}
+-    ],
+-
+-    lists:foreach(
+-        fun({Source, Target}) ->
+-            {ok, SourceDb} = create_db(source_db_name()),
+-            etap:is(couch_db:is_idle(SourceDb), true,
+-                "Source database is idle before starting replication"),
+-
+-            {ok, TargetDb} = create_db(target_db_name()),
+-            etap:is(couch_db:is_idle(TargetDb), true,
+-                "Target database is idle before starting replication"),
+-
+-            {ok, RepPid, RepId} = replicate(Source, Target),
+-            check_active_tasks(RepPid, RepId, Source, Target),
+-            {ok, DocsWritten} = populate_and_compact_test(
+-                RepPid, SourceDb, TargetDb),
+-
+-            wait_target_in_sync(DocsWritten, TargetDb),
+-            check_active_tasks(RepPid, RepId, Source, Target),
+-            cancel_replication(RepId, RepPid),
+-            compare_dbs(SourceDb, TargetDb),
+-
+-            delete_db(SourceDb),
+-            delete_db(TargetDb),
+-            couch_server_sup:stop(),
+-            ok = timer:sleep(1000),
+-            couch_server_sup:start_link(test_util:config_files())
+-        end,
+-        Pairs),
+-
+-    couch_server_sup:stop(),
+-    ok.
+-
+-
+-populate_and_compact_test(RepPid, SourceDb0, TargetDb0) ->
+-    etap:is(is_process_alive(RepPid), true, "Replication process is alive"),
+-    check_db_alive("source", SourceDb0),
+-    check_db_alive("target", TargetDb0),
+-
+-    Writer = spawn_writer(SourceDb0),
+-
+-    lists:foldl(
+-        fun(_, {SourceDb, TargetDb, DocCount}) ->
+-            pause_writer(Writer),
+-
+-            compact_db("source", SourceDb),
+-            etap:is(is_process_alive(RepPid), true,
+-                "Replication process is alive after source database compaction"),
+-            check_db_alive("source", SourceDb),
+-            check_ref_counter("source", SourceDb),
+-
+-            compact_db("target", TargetDb),
+-            etap:is(is_process_alive(RepPid), true,
+-                "Replication process is alive after target database compaction"),
+-            check_db_alive("target", TargetDb),
+-            check_ref_counter("target", TargetDb),
+-
+-            {ok, SourceDb2} = reopen_db(SourceDb),
+-            {ok, TargetDb2} = reopen_db(TargetDb),
+-
+-            resume_writer(Writer),
+-            wait_writer(Writer, DocCount),
+-
+-            compact_db("source", SourceDb2),
+-            etap:is(is_process_alive(RepPid), true,
+-                "Replication process is alive after source database compaction"),
+-            check_db_alive("source", SourceDb2),
+-            pause_writer(Writer),
+-            check_ref_counter("source", SourceDb2),
+-            resume_writer(Writer),
+-
+-            compact_db("target", TargetDb2),
+-            etap:is(is_process_alive(RepPid), true,
+-                "Replication process is alive after target database compaction"),
+-            check_db_alive("target", TargetDb2),
+-            pause_writer(Writer),
+-            check_ref_counter("target", TargetDb2),
+-            resume_writer(Writer),
+-
+-            {ok, SourceDb3} = reopen_db(SourceDb2),
+-            {ok, TargetDb3} = reopen_db(TargetDb2),
+-            {SourceDb3, TargetDb3, DocCount + 50}
+-        end,
+-        {SourceDb0, TargetDb0, 50}, lists:seq(1, 5)),
+-
+-    DocsWritten = stop_writer(Writer),
+-    {ok, DocsWritten}.
+-
+-
+-check_db_alive(Type, #db{main_pid = Pid}) ->
+-    etap:is(is_process_alive(Pid), true,
+-        "Local " ++ Type ++ " database main pid is alive").
+-
+-
+-compact_db(Type, #db{name = Name}) ->
+-    {ok, Db} = couch_db:open_int(Name, []),
+-    {ok, CompactPid} = couch_db:start_compact(Db),
+-    MonRef = erlang:monitor(process, CompactPid),
+-    receive
+-    {'DOWN', MonRef, process, CompactPid, normal} ->
+-        ok;
+-    {'DOWN', MonRef, process, CompactPid, Reason} ->
+-        etap:bail("Error compacting " ++ Type ++ " database " ++ ?b2l(Name) ++
+-            ": " ++ couch_util:to_list(Reason))
+-    after 30000 ->
+-        etap:bail("Compaction for " ++ Type ++ " database " ++ ?b2l(Name) ++
+-            " didn't finish")
+-    end,
+-    ok = couch_db:close(Db).
+-
+-
+-check_ref_counter(Type, #db{name = Name, fd_ref_counter = OldRefCounter}) ->
+-    MonRef = erlang:monitor(process, OldRefCounter),
+-    receive
+-    {'DOWN', MonRef, process, OldRefCounter, _} ->
+-        etap:diag("Old " ++ Type ++ " database ref counter terminated")
+-    after 30000 ->
+-        etap:bail("Old " ++ Type ++ " database ref counter didn't terminate")
+-    end,
+-    {ok, #db{fd_ref_counter = NewRefCounter} = Db} = couch_db:open_int(Name, []),
+-    ok = couch_db:close(Db),
+-    etap:isnt(
+-        NewRefCounter, OldRefCounter, Type ++ " database has new ref counter").
+-
+-
+-reopen_db(#db{name = Name}) ->
+-    {ok, Db} = couch_db:open_int(Name, []),
+-    ok = couch_db:close(Db),
+-    {ok, Db}.
+-
+-
+-wait_target_in_sync(DocCount, #db{name = TargetName}) ->
+-    wait_target_in_sync_loop(DocCount, TargetName, 300).
+-
+-
+-wait_target_in_sync_loop(_DocCount, _TargetName, 0) ->
+-    etap:bail("Could not get source and target databases in sync");
+-wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) ->
+-    {ok, Target} = couch_db:open_int(TargetName, []),
+-    {ok, TargetInfo} = couch_db:get_db_info(Target),
+-    ok = couch_db:close(Target),
+-    TargetDocCount = couch_util:get_value(doc_count, TargetInfo),
+-    case TargetDocCount == DocCount of
+-    true ->
+-        etap:diag("Source and target databases are in sync");
+-    false ->
+-        ok = timer:sleep(100),
+-        wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft - 1)
+-    end.
+-
+-
+-compare_dbs(#db{name = SourceName}, #db{name = TargetName}) ->
+-    {ok, SourceDb} = couch_db:open_int(SourceName, []),
+-    {ok, TargetDb} = couch_db:open_int(TargetName, []),
+-    Fun = fun(FullDocInfo, _, Acc) ->
+-        {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo),
+-        {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]),
+-        DocId = couch_util:get_value(<<"_id">>, Props),
+-        DocTarget = case couch_db:open_doc(TargetDb, DocId) of
+-        {ok, DocT} ->
+-            DocT;
+-        Error ->
+-            etap:bail("Error opening document '" ++ ?b2l(DocId) ++
+-                "' from target: " ++ couch_util:to_list(Error))
+-        end,
+-        DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]),
+-        case DocTargetJson of
+-        DocJson ->
+-            ok;
+-        _ ->
+-            etap:bail("Content from document '" ++ ?b2l(DocId) ++
+-                "' differs in target database")
+-        end,
+-        {ok, Acc}
+-    end,
+-    {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
+-    etap:diag("Target database has the same documents as the source database"),
+-    ok = couch_db:close(SourceDb),
+-    ok = couch_db:close(TargetDb).
+-
+-
+-check_active_tasks(RepPid, {BaseId, Ext} = _RepId, Src, Tgt) ->
+-    Source = case Src of
+-    {remote, NameSrc} ->
+-        <<(db_url(NameSrc))/binary, $/>>;
+-    _ ->
+-        Src
+-    end,
+-    Target = case Tgt of
+-    {remote, NameTgt} ->
+-        <<(db_url(NameTgt))/binary, $/>>;
+-    _ ->
+-        Tgt
+-    end,
+-    FullRepId = list_to_binary(BaseId ++ Ext),
+-    Pid = list_to_binary(pid_to_list(RepPid)),
+-    [RepTask] = couch_task_status:all(),
+-    etap:is(couch_util:get_value(pid, RepTask), Pid,
+-        "_active_tasks entry has correct pid property"),
+-    etap:is(couch_util:get_value(replication_id, RepTask), FullRepId,
+-        "_active_tasks entry has right replication id"),
+-    etap:is(couch_util:get_value(continuous, RepTask), true,
+-        "_active_tasks entry has continuous property set to true"),
+-    etap:is(couch_util:get_value(source, RepTask), Source,
+-        "_active_tasks entry has correct source property"),
+-    etap:is(couch_util:get_value(target, RepTask), Target,
+-        "_active_tasks entry has correct target property"),
+-    etap:is(is_integer(couch_util:get_value(docs_read, RepTask)), true,
+-        "_active_tasks entry has integer docs_read property"),
+-    etap:is(is_integer(couch_util:get_value(docs_written, RepTask)), true,
+-        "_active_tasks entry has integer docs_written property"),
+-    etap:is(is_integer(couch_util:get_value(doc_write_failures, RepTask)), true,
+-        "_active_tasks entry has integer doc_write_failures property"),
+-    etap:is(is_integer(couch_util:get_value(revisions_checked, RepTask)), true,
+-        "_active_tasks entry has integer revisions_checked property"),
+-    etap:is(is_integer(couch_util:get_value(missing_revisions_found, RepTask)), true,
+-        "_active_tasks entry has integer missing_revisions_found property"),
+-    etap:is(is_integer(couch_util:get_value(checkpointed_source_seq, RepTask)), true,
+-        "_active_tasks entry has integer checkpointed_source_seq property"),
+-    etap:is(is_integer(couch_util:get_value(source_seq, RepTask)), true,
+-        "_active_tasks entry has integer source_seq property"),
+-    Progress = couch_util:get_value(progress, RepTask),
+-    etap:is(is_integer(Progress), true,
+-        "_active_tasks entry has an integer progress property"),
+-    etap:is(Progress =< 100, true, "Progress is not greater than 100%").
+-
+-
+-wait_writer(Pid, NumDocs) ->
+-    case get_writer_num_docs_written(Pid) of
+-    N when N >= NumDocs ->
+-        ok;
+-    _ ->
+-        wait_writer(Pid, NumDocs)
+-    end.
+-
+-
+-spawn_writer(Db) ->
+-    Parent = self(),
+-    Pid = spawn(fun() -> writer_loop(Db, Parent, 0) end),
+-    etap:diag("Started source database writer"),
+-    Pid.
+-
+-
+-pause_writer(Pid) ->
+-    Ref = make_ref(),
+-    Pid ! {pause, Ref},
+-    receive
+-    {paused, Ref} ->
+-        ok
+-    after 30000 ->
+-        etap:bail("Failed to pause source database writer")
+-    end.
+-
+-
+-resume_writer(Pid) ->
+-    Ref = make_ref(),
+-    Pid ! {continue, Ref},
+-    receive
+-    {ok, Ref} ->
+-        ok
+-    after 30000 ->
+-        etap:bail("Failed to unpause source database writer")
+-    end.
+-
+-
+-get_writer_num_docs_written(Pid) ->
+-    Ref = make_ref(),
+-    Pid ! {get_count, Ref},
+-    receive
+-    {count, Ref, Count} ->
+-        Count
+-    after 30000 ->
+-        etap:bail("Timeout getting number of documents written from "
+-            "source database writer")
+-    end.
+-
+-
+-stop_writer(Pid) ->
+-    Ref = make_ref(),
+-    Pid ! {stop, Ref},
+-    receive
+-    {stopped, Ref, DocsWritten} ->
+-        MonRef = erlang:monitor(process, Pid),
+-        receive
+-        {'DOWN', MonRef, process, Pid, _Reason} ->
+-            etap:diag("Stopped source database writer"),
+-            DocsWritten
+-        after 30000 ->
+-            etap:bail("Timeout stopping source database writer")
+-        end
+-    after 30000 ->
+-        etap:bail("Timeout stopping source database writer")
+-    end.
+-
+-
+-writer_loop(#db{name = DbName}, Parent, Counter) ->
+-    maybe_pause(Parent, Counter),
+-    Doc = couch_doc:from_json_obj({[
+-        {<<"_id">>, list_to_binary(integer_to_list(Counter + 1))},
+-        {<<"value">>, Counter + 1},
+-        {<<"_attachments">>, {[
+-            {<<"icon1.png">>, {[
+-                {<<"data">>, base64:encode(att_data())},
+-                {<<"content_type">>, <<"image/png">>}
+-            ]}},
+-            {<<"icon2.png">>, {[
+-                {<<"data">>, base64:encode(iolist_to_binary(
+-                    [att_data(), att_data()]))},
+-                {<<"content_type">>, <<"image/png">>}
+-            ]}}
+-        ]}}
+-    ]}),
+-    maybe_pause(Parent, Counter),
+-    {ok, Db} = couch_db:open_int(DbName, []),
+-    {ok, _} = couch_db:update_doc(Db, Doc, []),
+-    ok = couch_db:close(Db),
+-    receive
+-    {get_count, Ref} ->
+-        Parent ! {count, Ref, Counter + 1},
+-        writer_loop(Db, Parent, Counter + 1);
+-    {stop, Ref} ->
+-        Parent ! {stopped, Ref, Counter + 1}
+-    after 0 ->
+-        ok = timer:sleep(500),
+-        writer_loop(Db, Parent, Counter + 1)
+-    end.
+-
+-
+-maybe_pause(Parent, Counter) ->
+-    receive
+-    {get_count, Ref} ->
+-        Parent ! {count, Ref, Counter};
+-    {pause, Ref} ->
+-        Parent ! {paused, Ref},
+-        receive {continue, Ref2} -> Parent ! {ok, Ref2} end
+-    after 0 ->
+-        ok
+-    end.
+-
+-
+-db_url(DbName) ->
+-    iolist_to_binary([
+-        "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
+-        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+-        "/", DbName
+-    ]).
+-
+-
+-create_db(DbName) ->
+-    {ok, Db} = couch_db:create(
+-        DbName,
+-        [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]),
+-    couch_db:close(Db),
+-    {ok, Db}.
+-
+-
+-delete_db(#db{name = DbName, main_pid = Pid}) ->
+-    ok = couch_server:delete(
+-        DbName, [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]),
+-    MonRef = erlang:monitor(process, Pid),
+-    receive
+-    {'DOWN', MonRef, process, Pid, _Reason} ->
+-        ok
+-    after 30000 ->
+-        etap:bail("Timeout deleting database")
+-    end.
+-
+-
+-replicate({remote, Db}, Target) ->
+-    replicate(db_url(Db), Target);
+-
+-replicate(Source, {remote, Db}) ->
+-    replicate(Source, db_url(Db));
+-
+-replicate(Source, Target) ->
+-    RepObject = {[
+-        {<<"source">>, Source},
+-        {<<"target">>, Target},
+-        {<<"continuous">>, true}
+-    ]},
+-    {ok, Rep} = couch_replicator_utils:parse_rep_doc(
+-        RepObject, #user_ctx{roles = [<<"_admin">>]}),
+-    {ok, Pid} = couch_replicator:async_replicate(Rep),
+-    {ok, Pid, Rep#rep.id}.
+-
+-
+-cancel_replication(RepId, RepPid) ->
+-    {ok, _} = couch_replicator:cancel_replication(RepId),
+-    etap:is(is_process_alive(RepPid), false,
+-        "Replication process is no longer alive after cancel").
+-
+-
+-att_data() ->
+-    {ok, Data} = file:read_file(
+-        test_util:source_file("share/www/image/logo.png")),
+-    Data.
+diff --git a/src/couch_replicator/test/04-replication-large-atts.t b/src/couch_replicator/test/04-replication-large-atts.t
+deleted file mode 100755
+index a7063c7..0000000
+--- a/src/couch_replicator/test/04-replication-large-atts.t
++++ /dev/null
+@@ -1,267 +0,0 @@
+-#!/usr/bin/env escript
+-%% -*- erlang -*-
+-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+-% use this file except in compliance with the License. You may obtain a copy of
+-% the License at
+-%
+-%   http://www.apache.org/licenses/LICENSE-2.0
+-%
+-% Unless required by applicable law or agreed to in writing, software
+-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+-% License for the specific language governing permissions and limitations under
+-% the License.
+-
+-% Test replication of large attachments. Verify that both source and
+-% target have the same attachment data and metadata.
+-
+--define(b2l(Bin), binary_to_list(Bin)).
+-
+--record(user_ctx, {
+-    name = null,
+-    roles = [],
+-    handler
+-}).
+-
+--record(doc, {
+-    id = <<"">>,
+-    revs = {0, []},
+-    body = {[]},
+-    atts = [],
+-    deleted = false,
+-    meta = []
+-}).
+-
+--record(att, {
+-    name,
+-    type,
+-    att_len,
+-    disk_len,
+-    md5= <<>>,
+-    revpos=0,
+-    data,
+-    encoding=identity
+-}).
+-
+-
+-source_db_name() -> <<"couch_test_rep_db_a">>.
+-target_db_name() -> <<"couch_test_rep_db_b">>.
+-
+-
+-main(_) ->
+-    test_util:init_code_path(),
+-
+-    etap:plan(1192),
+-    case (catch test()) of
+-        ok ->
+-            etap:end_tests();
+-        Other ->
+-            etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+-            etap:bail(Other)
+-    end,
+-    ok.
+-
+-
+-test() ->
+-    couch_server_sup:start_link(test_util:config_files()),
+-    application:start(ibrowse),
+-    application:start(crypto),
+-    couch_config:set("attachments", "compressible_types", "text/*", false),
+-
+-    Pairs = [
+-        {source_db_name(), target_db_name()},
+-        {{remote, source_db_name()}, target_db_name()},
+-        {source_db_name(), {remote, target_db_name()}},
+-        {{remote, source_db_name()}, {remote, (target_db_name())}}
+-    ],
+-
+-    {ok, SourceDb} = create_db(source_db_name()),
+-    etap:diag("Populating source database"),
+-    populate_db(SourceDb, 11),
+-    ok = couch_db:close(SourceDb),
+-
+-    lists:foreach(
+-        fun({Source, Target}) ->
+-            etap:diag("Creating target database"),
+-            {ok, TargetDb} = create_db(target_db_name()),
+-
+-            ok = couch_db:close(TargetDb),
+-            etap:diag("Triggering replication"),
+-            replicate(Source, Target),
+-            etap:diag("Replication finished, comparing source and target databases"),
+-            compare_dbs(SourceDb, TargetDb),
+-
+-            etap:diag("Deleting target database"),
+-            delete_db(TargetDb),
+-            ok = timer:sleep(1000)
+-        end,
+-        Pairs),
+-
+-    delete_db(SourceDb),
+-    couch_server_sup:stop(),
+-    ok.
+-
+-
+-populate_db(Db, DocCount) ->
+-    Docs = lists:foldl(
+-        fun(DocIdCounter, Acc) ->
+-            Doc = #doc{
+-                id = iolist_to_binary(["doc", integer_to_list(DocIdCounter)]),
+-                body = {[]},
+-                atts = [
+-                    att(<<"att1">>, 2 * 1024 * 1024, <<"text/plain">>),
+-                    att(<<"att2">>, round(6.6 * 1024 * 1024), <<"app/binary">>)
+-                ]
+-            },
+-            [Doc | Acc]
+-        end,
+-        [], lists:seq(1, DocCount)),
+-    {ok, _} = couch_db:update_docs(Db, Docs, []).
+-
+-
+-att(Name, Size, Type) ->
+-    #att{
+-        name = Name,
+-        type = Type,
+-        att_len = Size,
+-        data = fun(Count) -> crypto:rand_bytes(Count) end
+-    }.
+-
+-
+-compare_dbs(Source, Target) ->
+-    {ok, SourceDb} = couch_db:open_int(couch_db:name(Source), []),
+-    {ok, TargetDb} = couch_db:open_int(couch_db:name(Target), []),
+-
+-    Fun = fun(FullDocInfo, _, Acc) ->
+-        {ok, DocSource} = couch_db:open_doc(SourceDb, FullDocInfo),
+-        Id = DocSource#doc.id,
+-
+-        etap:diag("Verifying document " ++ ?b2l(Id)),
+-
+-        {ok, DocTarget} = couch_db:open_doc(TargetDb, Id),
+-        etap:is(DocTarget#doc.body, DocSource#doc.body,
+-            "Same body in source and target databases"),
+-
+-        #doc{atts = SourceAtts} = DocSource,
+-        #doc{atts = TargetAtts} = DocTarget,
+-        etap:is(
+-            lists:sort([N || #att{name = N} <- SourceAtts]),
+-            lists:sort([N || #att{name = N} <- TargetAtts]),
+-            "Document has same number (and names) of attachments in "
+-            "source and target databases"),
+-
+-        lists:foreach(
+-            fun(#att{name = AttName} = Att) ->
+-                etap:diag("Verifying attachment " ++ ?b2l(AttName)),
+-
+-                {ok, AttTarget} = find_att(TargetAtts, AttName),
+-                SourceMd5 = att_md5(Att),
+-                TargetMd5 = att_md5(AttTarget),
+-                case AttName of
+-                <<"att1">> ->
+-                    etap:is(Att#att.encoding, gzip,
+-                        "Attachment is gzip encoded in source database"),
+-                    etap:is(AttTarget#att.encoding, gzip,
+-                        "Attachment is gzip encoded in target database"),
+-                    DecSourceMd5 = att_decoded_md5(Att),
+-                    DecTargetMd5 = att_decoded_md5(AttTarget),
+-                    etap:is(DecTargetMd5, DecSourceMd5,
+-                        "Same identity content in source and target databases");
+-                _ ->
+-                    etap:is(Att#att.encoding, identity,
+-                        "Attachment is not encoded in source database"),
+-                    etap:is(AttTarget#att.encoding, identity,
+-                        "Attachment is not encoded in target database")
+-                end,
+-                etap:is(TargetMd5, SourceMd5,
+-                    "Same content in source and target databases"),
+-                etap:is(is_integer(Att#att.disk_len), true,
+-                    "#att.disk_len is an integer in source database"),
+-                etap:is(is_integer(Att#att.att_len), true,
+-                    "#att.att_len is an integer in source database"),
+-                etap:is(is_integer(AttTarget#att.disk_len), true,
+-                    "#att.disk_len is an integer in target database"),
+-                etap:is(is_integer(AttTarget#att.att_len), true,
+-                    "#att.att_len is an integer in target database"),
+-                etap:is(Att#att.disk_len, AttTarget#att.disk_len,
+-                    "Same identity length in source and target databases"),
+-                etap:is(Att#att.att_len, AttTarget#att.att_len,
+-                    "Same encoded length in source and target databases"),
+-                etap:is(Att#att.type, AttTarget#att.type,
+-                    "Same type in source and target databases"),
+-                etap:is(Att#att.md5, SourceMd5, "Correct MD5 in source database"),
+-                etap:is(AttTarget#att.md5, SourceMd5, "Correct MD5 in target database")
+-            end,
+-            SourceAtts),
+-
+-        {ok, Acc}
+-    end,
+-
+-    {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
+-    ok = couch_db:close(SourceDb),
+-    ok = couch_db:close(TargetDb).
+-
+-
+-find_att([], _Name) ->
+-    nil;
+-find_att([#att{name = Name} = Att | _], Name) ->
+-    {ok, Att};
+-find_att([_ | Rest], Name) ->
+-    find_att(Rest, Name).
+-
+-
+-att_md5(Att) ->
+-    Md50 = couch_doc:att_foldl(
+-        Att,
+-        fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
+-        couch_util:md5_init()),
+-    couch_util:md5_final(Md50).
+-
+-att_decoded_md5(Att) ->
+-    Md50 = couch_doc:att_foldl_decode(
+-        Att,
+-        fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
+-        couch_util:md5_init()),
+-    couch_util:md5_final(Md50).
+-
+-
+-db_url(DbName) ->
+-    iolist_to_binary([
+-        "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
+-        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+-        "/", DbName
+-    ]).
+-
+-
+-create_db(DbName) ->
+-    couch_db:create(
+-        DbName,
+-        [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]).
+-
+-
+-delete_db(Db) ->
+-    ok = couch_server:delete(
+-        couch_db:name(Db), [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]).
+-
+-
+-replicate({remote, Db}, Target) ->
+-    replicate(db_url(Db), Target);
+-
+-replicate(Source, {remote, Db}) ->
+-    replicate(Source, db_url(Db));
+-
+-replicate(Source, Target) ->
+-    RepObject = {[
+-        {<<"source">>, Source},
+-        {<<"target">>, Target}
+-    ]},
+-    {ok, Rep} = couch_replicator_utils:parse_rep_doc(
+-        RepObject, #user_ctx{roles = [<<"_admin">>]}),
+-    {ok, Pid} = couch_replicator:async_replicate(Rep),
+-    MonRef = erlang:monitor(process, Pid),
+-    receive
+-    {'DOWN', MonRef, process, Pid, Reason} ->
+-        etap:is(Reason, normal, "Replication finished successfully")
+-    after 300000 ->
+-        etap:bail("Timeout waiting for replication to finish")
+-    end.
+diff --git a/src/couch_replicator/test/05-replication-many-leaves.t b/src/couch_replicator/test/05-replication-many-leaves.t
+deleted file mode 100755
+index 212ee99..0000000
+--- a/src/couch_replicator/test/05-replication-many-leaves.t
++++ /dev/null
+@@ -1,294 +0,0 @@
+-#!/usr/bin/env escript
+-%% -*- erlang -*-
+-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+-% use this file except in compliance with the License. You may obtain a copy of
+-% the License at
+-%
+-%   http://www.apache.org/licenses/LICENSE-2.0
+-%
+-% Unless required by applicable law or agreed to in writing, software
+-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+-% License for the specific language governing permissions and limitations under
+-% the License.
+-
+-% Test replication of documents with many leaf revisions.
+-% Motivated by COUCHDB-1340 and other similar issues where a document
+-% GET with a too long ?open_revs revision list doesn't work due to
+-% maximum web server limits for the HTTP request path.
+-
+--record(user_ctx, {
+-    name = null,
+-    roles = [],
+-    handler
+-}).
+-
+--record(doc, {
+-    id = <<"">>,
+-    revs = {0, []},
+-    body = {[]},
+-    atts = [],
+-    deleted = false,
+-    meta = []
+-}).
+-
+--record(att, {
+-    name,
+-    type,
+-    att_len,
+-    disk_len,
+-    md5= <<>>,
+-    revpos=0,
+-    data,
+-    encoding=identity
+-}).
+-
+--define(b2l(B), binary_to_list(B)).
+--define(l2b(L), list_to_binary(L)).
+--define(i2l(I), integer_to_list(I)).
+-
+-
+-source_db_name() -> <<"couch_test_rep_db_a">>.
+-target_db_name() -> <<"couch_test_rep_db_b">>.
+-
+-doc_ids() ->
+-    [<<"doc1">>, <<"doc2">>, <<"doc3">>].
+-
+-doc_num_conflicts(<<"doc1">>) -> 10;
+-doc_num_conflicts(<<"doc2">>) -> 100;
+-% a number > MaxURLlength (7000) / length(DocRevisionString)
+-doc_num_conflicts(<<"doc3">>) -> 210.
+-
+-
+-main(_) ->
+-    test_util:init_code_path(),
+-
+-    etap:plan(56),
+-    case (catch test()) of
+-        ok ->
+-            etap:end_tests();
+-        Other ->
+-            etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+-            etap:bail(Other)
+-    end,
+-    ok.
+-
+-
+-test() ->
+-    couch_server_sup:start_link(test_util:config_files()),
+-    ibrowse:start(),
+-    crypto:start(),
+-    couch_config:set("replicator", "connection_timeout", "90000", false),
+-
+-    Pairs = [
+-        {source_db_name(), target_db_name()},
+-        {{remote, source_db_name()}, target_db_name()},
+-        {source_db_name(), {remote, target_db_name()}},
+-        {{remote, source_db_name()}, {remote, (target_db_name())}}
+-    ],
+-
+-    lists:foreach(
+-        fun({Source, Target}) ->
+-            {ok, SourceDb} = create_db(source_db_name()),
+-            etap:diag("Populating source database"),
+-            {ok, DocRevs} = populate_db(SourceDb),
+-            ok = couch_db:close(SourceDb),
+-            etap:diag("Creating target database"),
+-            {ok, TargetDb} = create_db(target_db_name()),
+-
+-            ok = couch_db:close(TargetDb),
+-            etap:diag("Triggering replication"),
+-            replicate(Source, Target),
+-            etap:diag("Replication finished, comparing source and target databases"),
+-            {ok, SourceDb2} = couch_db:open_int(source_db_name(), []),
+-            {ok, TargetDb2} = couch_db:open_int(target_db_name(), []),
+-            verify_target(SourceDb2, TargetDb2, DocRevs),
+-            ok = couch_db:close(SourceDb2),
+-            ok = couch_db:close(TargetDb2),
+-
+-            {ok, SourceDb3} = couch_db:open_int(source_db_name(), []),
+-            {ok, DocRevs2} = add_attachments(SourceDb3, DocRevs, 2),
+-            ok = couch_db:close(SourceDb3),
+-            etap:diag("Triggering replication again"),
+-            replicate(Source, Target),
+-            etap:diag("Replication finished, comparing source and target databases"),
+-            {ok, SourceDb4} = couch_db:open_int(source_db_name(), []),
+-            {ok, TargetDb4} = couch_db:open_int(target_db_name(), []),
+-            verify_target(SourceDb4, TargetDb4, DocRevs2),
+-            ok = couch_db:close(SourceDb4),
+-            ok = couch_db:close(TargetDb4),
+-
+-            etap:diag("Deleting source and target databases"),
+-            delete_db(TargetDb),
+-            delete_db(SourceDb),
+-            ok = timer:sleep(1000)
+-        end,
+-        Pairs),
+-
+-    couch_server_sup:stop(),
+-    ok.
+-
+-
+-populate_db(Db) ->
+-    DocRevsDict = lists:foldl(
+-        fun(DocId, Acc) ->
+-            Value = <<"0">>,
+-            Doc = #doc{
+-                id = DocId,
+-                body = {[ {<<"value">>, Value} ]}
+-            },
+-            {ok, Rev} = couch_db:update_doc(Db, Doc, []),
+-            {ok, DocRevs} = add_doc_siblings(Db, DocId, doc_num_conflicts(DocId)),
+-            dict:store(DocId, [Rev | DocRevs], Acc)
+-        end,
+-        dict:new(), doc_ids()),
+-    {ok, dict:to_list(DocRevsDict)}.
+-
+-
+-add_doc_siblings(Db, DocId, NumLeaves) when NumLeaves > 0 ->
+-    add_doc_siblings(Db, DocId, NumLeaves, [], []).
+-
+-
+-add_doc_siblings(Db, _DocId, 0, AccDocs, AccRevs) ->
+-    {ok, []} = couch_db:update_docs(Db, AccDocs, [], replicated_changes),
+-    {ok, AccRevs};
+-
+-add_doc_siblings(Db, DocId, NumLeaves, AccDocs, AccRevs) ->
+-    Value = list_to_binary(integer_to_list(NumLeaves)),
+-    Rev = couch_util:md5(Value),
+-    Doc = #doc{
+-        id = DocId,
+-        revs = {1, [Rev]},
+-        body = {[ {<<"value">>, Value} ]}
+-    },
+-    add_doc_siblings(Db, DocId, NumLeaves - 1, [Doc | AccDocs], [{1, Rev} | AccRevs]).
+-
+-
+-verify_target(_SourceDb, _TargetDb, []) ->
+-    ok;
+-
+-verify_target(SourceDb, TargetDb, [{DocId, RevList} | Rest]) ->
+-    {ok, Lookups} = couch_db:open_doc_revs(
+-        TargetDb,
+-        DocId,
+-        RevList,
+-        [conflicts, deleted_conflicts]),
+-    Docs = [Doc || {ok, Doc} <- Lookups],
+-    {ok, SourceLookups} = couch_db:open_doc_revs(
+-        SourceDb,
+-        DocId,
+-        RevList,
+-        [conflicts, deleted_conflicts]),
+-    SourceDocs = [Doc || {ok, Doc} <- SourceLookups],
+-    Total = doc_num_conflicts(DocId) + 1,
+-    etap:is(
+-        length(Docs),
+-        Total,
+-        "Target has " ++ ?i2l(Total) ++ " leaf revisions of document " ++ ?b2l(DocId)),
+-    etap:diag("Verifying all revisions of document " ++ ?b2l(DocId)),
+-    lists:foreach(
+-        fun({#doc{id = Id, revs = Revs} = TgtDoc, #doc{id = Id, revs = Revs} = SrcDoc}) ->
+-            SourceJson = couch_doc:to_json_obj(SrcDoc, [attachments]),
+-            TargetJson = couch_doc:to_json_obj(TgtDoc, [attachments]),
+-            case TargetJson of
+-            SourceJson ->
+-                ok;
+-            _ ->
+-                {Pos, [Rev | _]} = Revs,
+-                etap:bail("Wrong value for revision " ++
+-                    ?b2l(couch_doc:rev_to_str({Pos, Rev})) ++
+-                    " of document " ++ ?b2l(DocId))
+-            end
+-        end,
+-        lists:zip(Docs, SourceDocs)),
+-    verify_target(SourceDb, TargetDb, Rest).
+-
+-
+-add_attachments(Source, DocIdRevs, NumAtts) ->
+-    add_attachments(Source, DocIdRevs, NumAtts, []).
+-
+-add_attachments(_SourceDb, [], _NumAtts, Acc) ->
+-    {ok, Acc};
+-
+-add_attachments(SourceDb, [{DocId, RevList} | Rest], NumAtts, IdRevsAcc) ->
+-    {ok, SourceLookups} = couch_db:open_doc_revs(
+-        SourceDb,
+-        DocId,
+-        RevList,
+-        []),
+-    SourceDocs = [Doc || {ok, Doc} <- SourceLookups],
+-    Total = doc_num_conflicts(DocId) + 1,
+-    etap:is(
+-        length(SourceDocs),
+-        Total,
+-        "Source still has " ++ ?i2l(Total) ++
+-            " leaf revisions of document " ++ ?b2l(DocId)),
+-    etap:diag("Adding " ++ ?i2l(NumAtts) ++
+-        " attachments to each revision of the document " ++ ?b2l(DocId)),
+-    NewDocs = lists:foldl(
+-        fun(#doc{atts = Atts, revs = {Pos, [Rev | _]}} = Doc, Acc) ->
+-            NewAtts = lists:foldl(
+-                fun(I, AttAcc) ->
+-                    AttData = crypto:rand_bytes(100),
+-                    NewAtt = #att{
+-                        name = iolist_to_binary(
+-                            ["att_", ?i2l(I), "_", couch_doc:rev_to_str({Pos, Rev})]),
+-                        type = <<"application/foobar">>,
+-                        att_len = byte_size(AttData),
+-                        data = AttData
+-                    },
+-                    [NewAtt | AttAcc]
+-                end,
+-                [], lists:seq(1, NumAtts)),
+-            [Doc#doc{atts = Atts ++ NewAtts} | Acc]
+-        end,
+-        [], SourceDocs),
+-    {ok, UpdateResults} = couch_db:update_docs(SourceDb, NewDocs, []),
+-    NewRevs = [R || {ok, R} <- UpdateResults],
+-    etap:is(
+-        length(NewRevs),
+-        length(NewDocs),
+-        "Document revisions updated with " ++ ?i2l(NumAtts) ++ " attachments"),
+-    add_attachments(SourceDb, Rest, NumAtts, [{DocId, NewRevs} | IdRevsAcc]).
+-
+-
+-db_url(DbName) ->
+-    iolist_to_binary([
+-        "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
+-        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+-        "/", DbName
+-    ]).
+-
+-
+-create_db(DbName) ->
+-    couch_db:create(
+-        DbName,
+-        [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]).
+-
+-
+-delete_db(Db) ->
+-    ok = couch_server:delete(
+-        couch_db:name(Db), [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]).
+-
+-
+-replicate({remote, Db}, Target) ->
+-    replicate(db_url(Db), Target);
+-
+-replicate(Source, {remote, Db}) ->
+-    replicate(Source, db_url(Db));
+-
+-replicate(Source, Target) ->
+-    RepObject = {[
+-        {<<"source">>, Source},
+-        {<<"target">>, Target}
+-    ]},
+-    {ok, Rep} = couch_replicator_utils:parse_rep_doc(
+-        RepObject, #user_ctx{roles = [<<"_admin">>]}),
+-    {ok, Pid} = couch_replicator:async_replicate(Rep),
+-    MonRef = erlang:monitor(process, Pid),
+-    receive
+-    {'DOWN', MonRef, process, Pid, Reason} ->
+-        etap:is(Reason, normal, "Replication finished successfully")
+-    after 900000 ->
+-        etap:bail("Timeout waiting for replication to finish")
+-    end.
+diff --git a/src/couch_replicator/test/06-doc-missing-stubs.t b/src/couch_replicator/test/06-doc-missing-stubs.t
+deleted file mode 100755
+index e17efc9..0000000
+--- a/src/couch_replicator/test/06-doc-missing-stubs.t
++++ /dev/null
+@@ -1,304 +0,0 @@
+-#!/usr/bin/env escript
+-%% -*- erlang -*-
+-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+-% use this file except in compliance with the License. You may obtain a copy of
+-% the License at
+-%
+-%   http://www.apache.org/licenses/LICENSE-2.0
+-%
+-% Unless required by applicable law or agreed to in writing, software
+-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+-% License for the specific language governing permissions and limitations under
+-% the License.
+-
+-% Test replication of documents with many leaf revisions.
+-% Motivated by COUCHDB-1340 and other similar issues where a document
+-% GET with a too long ?open_revs revision list doesn't work due to
+-% maximum web server limits for the HTTP request path.
+-
+--record(user_ctx, {
+-    name = null,
+-    roles = [],
+-    handler
+-}).
+-
+--record(doc, {
+-    id = <<"">>,
+-    revs = {0, []},
+-    body = {[]},
+-    atts = [],
+-    deleted = false,
+-    meta = []
+-}).
+-
+--record(att, {
+-    name,
+-    type,
+-    att_len,
+-    disk_len,
+-    md5= <<>>,
+-    revpos=0,
+-    data,
+-    encoding=identity
+-}).
+-
+--define(b2l(B), binary_to_list(B)).
+-
+-source_db_name() -> <<"couch_test_rep_db_a">>.
+-target_db_name() -> <<"couch_test_rep_db_b">>.
+-
+-target_revs_limit() -> 3.
+-
+-
+-main(_) ->
+-    test_util:init_code_path(),
+-
+-    etap:plan(128),
+-    case (catch test()) of
+-        ok ->
+-            etap:end_tests();
+-        Other ->
+-            etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+-            etap:bail(Other)
+-    end,
+-    ok.
+-
+-
+-% Test motivated by COUCHDB-1365.
+-test() ->
+-    couch_server_sup:start_link(test_util:config_files()),
+-    ibrowse:start(),
+-
+-    Pairs = [
+-        {source_db_name(), target_db_name()},
+-        {{remote, source_db_name()}, target_db_name()},
+-        {source_db_name(), {remote, target_db_name()}},
+-        {{remote, source_db_name()}, {remote, (target_db_name())}}
+-    ],
+-
+-    lists:foreach(
+-        fun({Source, Target}) ->
+-            {ok, SourceDb} = create_db(source_db_name()),
+-            etap:diag("Populating source database"),
+-            populate_db(SourceDb),
+-            ok = couch_db:close(SourceDb),
+-
+-            etap:diag("Creating target database"),
+-            {ok, TargetDb} = create_db(target_db_name()),
+-            ok = couch_db:set_revs_limit(TargetDb, target_revs_limit()),
+-            ok = couch_db:close(TargetDb),
+-
+-            etap:diag("Triggering replication"),
+-            replicate(Source, Target),
+-            etap:diag("Replication finished, comparing source and target databases"),
+-            compare_dbs(SourceDb, TargetDb),
+-
+-            etap:diag("Updating source database docs"),
+-            update_db_docs(couch_db:name(SourceDb), target_revs_limit() + 2),
+-
+-            etap:diag("Triggering replication again"),
+-            replicate(Source, Target),
+-            etap:diag("Replication finished, comparing source and target databases"),
+-            compare_dbs(SourceDb, TargetDb),
+-
+-            etap:diag("Deleting databases"),
+-            delete_db(TargetDb),
+-            delete_db(SourceDb),
+-            ok = timer:sleep(1000)
+-        end,
+-        Pairs),
+-
+-    couch_server_sup:stop(),
+-    ok.
+-
+-
+-populate_db(Db) ->
+-    AttData = crypto:rand_bytes(6000),
+-    Doc1 = #doc{
+-        id = <<"doc1">>,
+-        atts = [
+-            #att{
+-                name = <<"doc1_att1">>,
+-                type = <<"application/foobar">>,
+-                att_len = byte_size(AttData),
+-                data = AttData
+-            }
+-        ]
+-    },
+-    {ok, _} = couch_db:update_doc(Db, Doc1, []).
+-
+-
+-update_db_docs(DbName, Times) ->
+-    {ok, Db} = couch_db:open_int(DbName, []),
+-    {ok, _, _} = couch_db:enum_docs(
+-        Db,
+-        fun(FDI, _, Acc) -> db_fold_fun(FDI, Acc) end,
+-        {DbName, Times},
+-        []),
+-    ok = couch_db:close(Db).
+-
+-
+-db_fold_fun(FullDocInfo, {DbName, Times}) ->
+-    {ok, Db} = couch_db:open_int(DbName, []),
+-    {ok, Doc} = couch_db:open_doc(Db, FullDocInfo),
+-    lists:foldl(
+-        fun(_, {Pos, RevId}) ->
+-            {ok, Db2} = couch_db:reopen(Db),
+-            NewDocVersion = Doc#doc{
+-                revs = {Pos, [RevId]},
+-                body = {[{<<"value">>, base64:encode(crypto:rand_bytes(100))}]}
+-            },
+-            {ok, NewRev} = couch_db:update_doc(Db2, NewDocVersion, []),
+-            NewRev
+-        end,
+-        {element(1, Doc#doc.revs), hd(element(2, Doc#doc.revs))},
+-        lists:seq(1, Times)),
+-    ok = couch_db:close(Db),
+-    {ok, {DbName, Times}}.
+-
+-
+-compare_dbs(Source, Target) ->
+-    {ok, SourceDb} = couch_db:open_int(couch_db:name(Source), []),
+-    {ok, TargetDb} = couch_db:open_int(couch_db:name(Target), []),
+-
+-    Fun = fun(FullDocInfo, _, Acc) ->
+-        {ok, DocSource} = couch_db:open_doc(
+-            SourceDb, FullDocInfo, [conflicts, deleted_conflicts]),
+-        Id = DocSource#doc.id,
+-
+-        etap:diag("Verifying document " ++ ?b2l(Id)),
+-
+-        {ok, DocTarget} = couch_db:open_doc(
+-            TargetDb, Id, [conflicts, deleted_conflicts]),
+-        etap:is(DocTarget#doc.body, DocSource#doc.body,
+-            "Same body in source and target databases"),
+-
+-        etap:is(
+-            couch_doc:to_json_obj(DocTarget, []),
+-            couch_doc:to_json_obj(DocSource, []),
+-            "Same doc body in source and target databases"),
+-
+-        #doc{atts = SourceAtts} = DocSource,
+-        #doc{atts = TargetAtts} = DocTarget,
+-        etap:is(
+-            lists:sort([N || #att{name = N} <- SourceAtts]),
+-            lists:sort([N || #att{name = N} <- TargetAtts]),
+-            "Document has same number (and names) of attachments in "
+-            "source and target databases"),
+-
+-        lists:foreach(
+-            fun(#att{name = AttName} = Att) ->
+-                etap:diag("Verifying attachment " ++ ?b2l(AttName)),
+-
+-                {ok, AttTarget} = find_att(TargetAtts, AttName),
+-                SourceMd5 = att_md5(Att),
+-                TargetMd5 = att_md5(AttTarget),
+-                case AttName of
+-                <<"att1">> ->
+-                    etap:is(Att#att.encoding, gzip,
+-                        "Attachment is gzip encoded in source database"),
+-                    etap:is(AttTarget#att.encoding, gzip,
+-                        "Attachment is gzip encoded in target database"),
+-                    DecSourceMd5 = att_decoded_md5(Att),
+-                    DecTargetMd5 = att_decoded_md5(AttTarget),
+-                    etap:is(DecTargetMd5, DecSourceMd5,
+-                        "Same identity content in source and target databases");
+-                _ ->
+-                    etap:is(Att#att.encoding, identity,
+-                        "Attachment is not encoded in source database"),
+-                    etap:is(AttTarget#att.encoding, identity,
+-                        "Attachment is not encoded in target database")
+-                end,
+-                etap:is(TargetMd5, SourceMd5,
+-                    "Same content in source and target databases"),
+-                etap:is(is_integer(Att#att.disk_len), true,
+-                    "#att.disk_len is an integer in source database"),
+-                etap:is(is_integer(Att#att.att_len), true,
+-                    "#att.att_len is an integer in source database"),
+-                etap:is(is_integer(AttTarget#att.disk_len), true,
+-                    "#att.disk_len is an integer in target database"),
+-                etap:is(is_integer(AttTarget#att.att_len), true,
+-                    "#att.att_len is an integer in target database"),
+-                etap:is(Att#att.disk_len, AttTarget#att.disk_len,
+-                    "Same identity length in source and target databases"),
+-                etap:is(Att#att.att_len, AttTarget#att.att_len,
+-                    "Same encoded length in source and target databases"),
+-                etap:is(Att#att.type, AttTarget#att.type,
+-                    "Same type in source and target databases"),
+-                etap:is(Att#att.md5, SourceMd5, "Correct MD5 in source database"),
+-                etap:is(AttTarget#att.md5, SourceMd5, "Correct MD5 in target database")
+-            end,
+-            SourceAtts),
+-
+-        {ok, Acc}
+-    end,
+-
+-    {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
+-    ok = couch_db:close(SourceDb),
+-    ok = couch_db:close(TargetDb).
+-
+-
+-find_att([], _Name) ->
+-    nil;
+-find_att([#att{name = Name} = Att | _], Name) ->
+-    {ok, Att};
+-find_att([_ | Rest], Name) ->
+-    find_att(Rest, Name).
+-
+-
+-att_md5(Att) ->
+-    Md50 = couch_doc:att_foldl(
+-        Att,
+-        fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
+-        couch_util:md5_init()),
+-    couch_util:md5_final(Md50).
+-
+-att_decoded_md5(Att) ->
+-    Md50 = couch_doc:att_foldl_decode(
+-        Att,
+-        fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
+-        couch_util:md5_init()),
+-    couch_util:md5_final(Md50).
+-
+-
+-db_url(DbName) ->
+-    iolist_to_binary([
+-        "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
+-        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+-        "/", DbName
+-    ]).
+-
+-
+-create_db(DbName) ->
+-    couch_db:create(
+-        DbName,
+-        [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]).
+-
+-
+-delete_db(Db) ->
+-    ok = couch_server:delete(
+-        couch_db:name(Db), [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]).
+-
+-
+-replicate({remote, Db}, Target) ->
+-    replicate(db_url(Db), Target);
+-
+-replicate(Source, {remote, Db}) ->
+-    replicate(Source, db_url(Db));
+-
+-replicate(Source, Target) ->
+-    RepObject = {[
+-        {<<"source">>, Source},
+-        {<<"target">>, Target}
+-    ]},
+-    {ok, Rep} = couch_replicator_utils:parse_rep_doc(
+-        RepObject, #user_ctx{roles = [<<"_admin">>]}),
+-    {ok, Pid} = couch_replicator:async_replicate(Rep),
+-    MonRef = erlang:monitor(process, Pid),
+-    receive
+-    {'DOWN', MonRef, process, Pid, Reason} ->
+-        etap:is(Reason, normal, "Replication finished successfully")
+-    after 300000 ->
+-        etap:bail("Timeout waiting for replication to finish")
+-    end.
+diff --git a/src/couch_replicator/test/07-use-checkpoints.t b/src/couch_replicator/test/07-use-checkpoints.t
+deleted file mode 100755
+index a3295c7..0000000
+--- a/src/couch_replicator/test/07-use-checkpoints.t
++++ /dev/null
+@@ -1,273 +0,0 @@
+-#!/usr/bin/env escript
+-%% -*- erlang -*-
+-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+-% use this file except in compliance with the License. You may obtain a copy of
+-% the License at
+-%
+-%   http://www.apache.org/licenses/LICENSE-2.0
+-%
+-% Unless required by applicable law or agreed to in writing, software
+-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+-% License for the specific language governing permissions and limitations under
+-% the License.
+-
+-% Verify that compacting databases that are being used as the source or
+-% target of a replication doesn't affect the replication and that the
+-% replication doesn't hold their reference counters forever.
+-
+--define(b2l(B), binary_to_list(B)).
+-
+--record(user_ctx, {
+-    name = null,
+-    roles = [],
+-    handler
+-}).
+-
+--record(doc, {
+-    id = <<"">>,
+-    revs = {0, []},
+-    body = {[]},
+-    atts = [],
+-    deleted = false,
+-    meta = []
+-}).
+-
+--record(db, {
+-    main_pid = nil,
+-    update_pid = nil,
+-    compactor_pid = nil,
+-    instance_start_time, % number of microsecs since jan 1 1970 as a binary string
+-    fd,
+-    updater_fd,
+-    fd_ref_counter,
+-    header = nil,
+-    committed_update_seq,
+-    fulldocinfo_by_id_btree,
+-    docinfo_by_seq_btree,
+-    local_docs_btree,
+-    update_seq,
+-    name,
+-    filepath,
+-    validate_doc_funs = [],
+-    security = [],
+-    security_ptr = nil,
+-    user_ctx = #user_ctx{},
+-    waiting_delayed_commit = nil,
+-    revs_limit = 1000,
+-    fsync_options = [],
+-    options = [],
+-    compression,
+-    before_doc_update,
+-    after_doc_read
+-}).
+-
+--record(rep, {
+-    id,
+-    source,
+-    target,
+-    options,
+-    user_ctx,
+-    doc_id
+-}).
+-
+-
+-source_db_name() -> <<"couch_test_rep_db_a">>.
+-target_db_name() -> <<"couch_test_rep_db_b">>.
+-
+-
+-main(_) ->
+-    test_util:init_code_path(),
+-
+-    etap:plan(16),
+-    case (catch test()) of
+-        ok ->
+-            etap:end_tests();
+-        Other ->
+-            etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+-            etap:bail(Other)
+-    end,
+-    ok.
+-
+-
+-test() ->
+-    couch_server_sup:start_link(test_util:config_files()),
+-    ibrowse:start(),
+-
+-    % order matters
+-    test_use_checkpoints(false),
+-    test_use_checkpoints(true),
+-
+-    couch_server_sup:stop(),
+-    ok.
+-
+-
+-test_use_checkpoints(UseCheckpoints) ->
+-    Pairs = [
+-        {source_db_name(), target_db_name()},
+-        {{remote, source_db_name()}, target_db_name()},
+-        {source_db_name(), {remote, target_db_name()}},
+-        {{remote, source_db_name()}, {remote, (target_db_name())}}
+-    ],
+-
+-    ListenerFun = case UseCheckpoints of
+-    false ->
+-        fun({finished, _, {CheckpointHistory}}) ->
+-            etap:is(CheckpointHistory,
+-            [{<<"use_checkpoints">>,false}],
+-            "No checkpoints found");
+-        (_) ->
+-            ok
+-        end;
+-    true ->
+-        fun({finished, _, {CheckpointHistory}}) ->
+-            SessionId = lists:keyfind(
+-                <<"session_id">>, 1, CheckpointHistory),
+-            case SessionId of
+-                false ->
+-                    OtpRel = erlang:system_info(otp_release),
+-                    case OtpRel >= "R14B01" orelse OtpRel < "R14B03" of
+-                        false ->
+-                            etap:bail("Checkpoint expected, but not found");
+-                        true ->
+-                            etap:ok(true,
+-                                " Checkpoint expected, but wan't found."
+-                                " Your Erlang " ++ OtpRel ++ " version is"
+-                                " affected to OTP-9167 issue which causes"
+-                                " failure of this test. Try to upgrade Erlang"
+-                                " and if this failure repeats file the bug.")
+-                    end;
+-                _ ->
+-                    etap:ok(true, "There's a checkpoint")
+-            end;
+-        (_) ->
+-            ok
+-        end
+-    end,
+-    {ok, Listener} = couch_replicator_notifier:start_link(ListenerFun),
+-
+-    lists:foreach(
+-        fun({Source, Target}) ->
+-            {ok, SourceDb} = create_db(source_db_name()),
+-            etap:diag("Populating source database"),
+-            populate_db(SourceDb, 100),
+-            ok = couch_db:close(SourceDb),
+-
+-            etap:diag("Creating target database"),
+-            {ok, TargetDb} = create_db(target_db_name()),
+-            ok = couch_db:close(TargetDb),
+-
+-            etap:diag("Setup replicator notifier listener"),
+-
+-            etap:diag("Triggering replication"),
+-            replicate(Source, Target, UseCheckpoints),
+-
+-            etap:diag("Replication finished, comparing source and target databases"),
+-            compare_dbs(SourceDb, TargetDb),
+-
+-            etap:diag("Deleting databases"),
+-            delete_db(TargetDb),
+-            delete_db(SourceDb),
+-
+-            ok = timer:sleep(1000)
+-        end,
+-        Pairs),
+-
+-    couch_replicator_notifier:stop(Listener).
+-
+-
+-populate_db(Db, DocCount) ->
+-    Docs = lists:foldl(
+-        fun(DocIdCounter, Acc) ->
+-            Id = iolist_to_binary(["doc", integer_to_list(DocIdCounter)]),
+-            Value = iolist_to_binary(["val", integer_to_list(DocIdCounter)]),
+-            Doc = #doc{
+-                id = Id,
+-                body = {[ {<<"value">>, Value} ]}
+-            },
+-            [Doc | Acc]
+-        end,
+-        [], lists:seq(1, DocCount)),
+-    {ok, _} = couch_db:update_docs(Db, Docs, []).
+-
+-
+-compare_dbs(#db{name = SourceName}, #db{name = TargetName}) ->
+-    {ok, SourceDb} = couch_db:open_int(SourceName, []),
+-    {ok, TargetDb} = couch_db:open_int(TargetName, []),
+-    Fun = fun(FullDocInfo, _, Acc) ->
+-        {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo),
+-        {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]),
+-        DocId = couch_util:get_value(<<"_id">>, Props),
+-        DocTarget = case couch_db:open_doc(TargetDb, DocId) of
+-        {ok, DocT} ->
+-            DocT;
+-        Error ->
+-            etap:bail("Error opening document '" ++ ?b2l(DocId) ++
+-                "' from target: " ++ couch_util:to_list(Error))
+-        end,
+-        DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]),
+-        case DocTargetJson of
+-        DocJson ->
+-            ok;
+-        _ ->
+-            etap:bail("Content from document '" ++ ?b2l(DocId) ++
+-                "' differs in target database")
+-        end,
+-        {ok, Acc}
+-    end,
+-    {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
+-    etap:diag("Target database has the same documents as the source database"),
+-    ok = couch_db:close(SourceDb),
+-    ok = couch_db:close(TargetDb).
+-
+-
+-db_url(DbName) ->
+-    iolist_to_binary([
+-        "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
+-        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+-        "/", DbName
+-    ]).
+-
+-
+-create_db(DbName) ->
+-    {ok, Db} = couch_db:create(
+-        DbName,
+-        [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]),
+-    couch_db:close(Db),
+-    {ok, Db}.
+-
+-
+-delete_db(#db{name = DbName, main_pid = Pid}) ->
+-    ok = couch_server:delete(
+-        DbName, [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]),
+-    MonRef = erlang:monitor(process, Pid),
+-    receive
+-    {'DOWN', MonRef, process, Pid, _Reason} ->
+-        ok
+-    after 30000 ->
+-        etap:bail("Timeout deleting database")
+-    end.
+-
+-
+-replicate({remote, Db}, Target, UseCheckpoints) ->
+-    replicate(db_url(Db), Target, UseCheckpoints);
+-
+-replicate(Source, {remote, Db}, UseCheckpoints) ->
+-    replicate(Source, db_url(Db), UseCheckpoints);
+-
+-replicate(Source, Target, UseCheckpoints) ->
+-    RepObject = {[
+-        {<<"source">>, Source},
+-        {<<"target">>, Target},
+-        {<<"use_checkpoints">>, UseCheckpoints}
+-    ]},
+-    {ok, Rep} = couch_replicator_utils:parse_rep_doc(
+-        RepObject, #user_ctx{roles = [<<"_admin">>]}),
+-    {ok, Pid} = couch_replicator:async_replicate(Rep),
+-    MonRef = erlang:monitor(process, Pid),
+-    receive
+-    {'DOWN', MonRef, process, Pid, Reason} ->
+-        etap:is(Reason, normal, "Replication finished successfully")
+-    after 300000 ->
+-        etap:bail("Timeout waiting for replication to finish")
+-    end.
+diff --git a/src/couch_replicator/test/couch_replicator_compact_tests.erl b/src/couch_replicator/test/couch_replicator_compact_tests.erl
+new file mode 100644
+index 0000000..05b368e
+--- /dev/null
++++ b/src/couch_replicator/test/couch_replicator_compact_tests.erl
+@@ -0,0 +1,448 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_replicator_compact_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++-include_lib("couch_replicator/src/couch_replicator.hrl").
++
++-define(ADMIN_ROLE, #user_ctx{roles=[<<"_admin">>]}).
++-define(ADMIN_USER, {user_ctx, ?ADMIN_ROLE}).
++-define(ATTFILE, filename:join([?FIXTURESDIR, "logo.png"])).
++-define(DELAY, 100).
++-define(TIMEOUT, 30000).
++-define(TIMEOUT_STOP, 1000).
++-define(TIMEOUT_WRITER, 3000).
++-define(TIMEOUT_EUNIT, ?TIMEOUT div 1000 + 5).
++
++setup() ->
++    DbName = ?tempdb(),
++    {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
++    ok = couch_db:close(Db),
++    DbName.
++
++setup(local) ->
++    setup();
++setup(remote) ->
++    {remote, setup()};
++setup({A, B}) ->
++    {ok, _} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    Source = setup(A),
++    Target = setup(B),
++    {Source, Target}.
++
++teardown({remote, DbName}) ->
++    teardown(DbName);
++teardown(DbName) ->
++    ok = couch_server:delete(DbName, [?ADMIN_USER]),
++    ok.
++
++teardown(_, {Source, Target}) ->
++    teardown(Source),
++    teardown(Target),
++
++    Pid = whereis(couch_server_sup),
++    erlang:monitor(process, Pid),
++    couch_server_sup:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT_STOP ->
++        throw({timeout, server_stop})
++    end.
++
++
++compact_test_() ->
++    Pairs = [{local, local}, {local, remote},
++             {remote, local}, {remote, remote}],
++    {
++        "Compaction during replication tests",
++        {
++            foreachx,
++            fun setup/1, fun teardown/2,
++            [{Pair, fun should_populate_replicate_compact/2}
++             || Pair <- Pairs]
++        }
++    }.
++
++
++should_populate_replicate_compact({From, To}, {Source, Target}) ->
++    {ok, RepPid, RepId} = replicate(Source, Target),
++    {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
++     {inorder, [
++         should_run_replication(RepPid, RepId, Source, Target),
++         should_all_processes_be_alive(RepPid, Source, Target),
++         should_populate_and_compact(RepPid, Source, Target, 50, 5),
++         should_wait_target_in_sync(Source, Target),
++         should_ensure_replication_still_running(RepPid, RepId, Source, Target),
++         should_cancel_replication(RepId, RepPid),
++         should_compare_databases(Source, Target)
++     ]}}.
++
++should_all_processes_be_alive(RepPid, Source, Target) ->
++    ?_test(begin
++        {ok, SourceDb} = reopen_db(Source),
++        {ok, TargetDb} = reopen_db(Target),
++        ?assert(is_process_alive(RepPid)),
++        ?assert(is_process_alive(SourceDb#db.main_pid)),
++        ?assert(is_process_alive(TargetDb#db.main_pid))
++    end).
++
++should_run_replication(RepPid, RepId, Source, Target) ->
++    ?_test(check_active_tasks(RepPid, RepId, Source, Target)).
++
++should_ensure_replication_still_running(RepPid, RepId, Source, Target) ->
++    ?_test(check_active_tasks(RepPid, RepId, Source, Target)).
++
++check_active_tasks(RepPid, {BaseId, Ext} = _RepId, Src, Tgt) ->
++    Source = case Src of
++        {remote, NameSrc} ->
++            <<(db_url(NameSrc))/binary, $/>>;
++        _ ->
++            Src
++    end,
++    Target = case Tgt of
++        {remote, NameTgt} ->
++            <<(db_url(NameTgt))/binary, $/>>;
++        _ ->
++            Tgt
++    end,
++    FullRepId = ?l2b(BaseId ++ Ext),
++    Pid = ?l2b(pid_to_list(RepPid)),
++    [RepTask] = couch_task_status:all(),
++    ?assertEqual(Pid, couch_util:get_value(pid, RepTask)),
++    ?assertEqual(FullRepId, couch_util:get_value(replication_id, RepTask)),
++    ?assertEqual(true, couch_util:get_value(continuous, RepTask)),
++    ?assertEqual(Source, couch_util:get_value(source, RepTask)),
++    ?assertEqual(Target, couch_util:get_value(target, RepTask)),
++    ?assert(is_integer(couch_util:get_value(docs_read, RepTask))),
++    ?assert(is_integer(couch_util:get_value(docs_written, RepTask))),
++    ?assert(is_integer(couch_util:get_value(doc_write_failures, RepTask))),
++    ?assert(is_integer(couch_util:get_value(revisions_checked, RepTask))),
++    ?assert(is_integer(couch_util:get_value(missing_revisions_found, RepTask))),
++    ?assert(is_integer(couch_util:get_value(checkpointed_source_seq, RepTask))),
++    ?assert(is_integer(couch_util:get_value(source_seq, RepTask))),
++    Progress = couch_util:get_value(progress, RepTask),
++    ?assert(is_integer(Progress)),
++    ?assert(Progress =< 100).
++
++should_cancel_replication(RepId, RepPid) ->
++    ?_assertNot(begin
++        {ok, _} = couch_replicator:cancel_replication(RepId),
++        is_process_alive(RepPid)
++    end).
++
++should_populate_and_compact(RepPid, Source, Target, BatchSize, Rounds) ->
++    {timeout, ?TIMEOUT_EUNIT, ?_test(begin
++        {ok, SourceDb0} = reopen_db(Source),
++        Writer = spawn_writer(SourceDb0),
++        lists:foreach(
++            fun(N) ->
++                {ok, SourceDb} = reopen_db(Source),
++                {ok, TargetDb} = reopen_db(Target),
++                pause_writer(Writer),
++
++                compact_db("source", SourceDb),
++                ?assert(is_process_alive(RepPid)),
++                ?assert(is_process_alive(SourceDb#db.main_pid)),
++                check_ref_counter("source", SourceDb),
++
++                compact_db("target", TargetDb),
++                ?assert(is_process_alive(RepPid)),
++                ?assert(is_process_alive(TargetDb#db.main_pid)),
++                check_ref_counter("target", TargetDb),
++
++                {ok, SourceDb2} = reopen_db(SourceDb),
++                {ok, TargetDb2} = reopen_db(TargetDb),
++
++                resume_writer(Writer),
++                wait_writer(Writer, BatchSize * N),
++
++                compact_db("source", SourceDb2),
++                ?assert(is_process_alive(RepPid)),
++                ?assert(is_process_alive(SourceDb2#db.main_pid)),
++                pause_writer(Writer),
++                check_ref_counter("source", SourceDb2),
++                resume_writer(Writer),
++
++                compact_db("target", TargetDb2),
++                ?assert(is_process_alive(RepPid)),
++                ?assert(is_process_alive(TargetDb2#db.main_pid)),
++                pause_writer(Writer),
++                check_ref_counter("target", TargetDb2),
++                resume_writer(Writer)
++            end, lists:seq(1, Rounds)),
++        stop_writer(Writer)
++    end)}.
++
++should_wait_target_in_sync({remote, Source}, Target) ->
++    should_wait_target_in_sync(Source, Target);
++should_wait_target_in_sync(Source, {remote, Target}) ->
++    should_wait_target_in_sync(Source, Target);
++should_wait_target_in_sync(Source, Target) ->
++    {timeout, ?TIMEOUT_EUNIT, ?_assert(begin
++        {ok, SourceDb} = couch_db:open_int(Source, []),
++        {ok, SourceInfo} = couch_db:get_db_info(SourceDb),
++        ok = couch_db:close(SourceDb),
++        SourceDocCount = couch_util:get_value(doc_count, SourceInfo),
++        wait_target_in_sync_loop(SourceDocCount, Target, 300)
++    end)}.
++
++wait_target_in_sync_loop(_DocCount, _TargetName, 0) ->
++    erlang:error(
++        {assertion_failed,
++         [{module, ?MODULE}, {line, ?LINE},
++          {reason, "Could not get source and target databases in sync"}]});
++wait_target_in_sync_loop(DocCount, {remote, TargetName}, RetriesLeft) ->
++    wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft);
++wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) ->
++    {ok, Target} = couch_db:open_int(TargetName, []),
++    {ok, TargetInfo} = couch_db:get_db_info(Target),
++    ok = couch_db:close(Target),
++    TargetDocCount = couch_util:get_value(doc_count, TargetInfo),
++    case TargetDocCount == DocCount of
++        true ->
++            true;
++        false ->
++            ok = timer:sleep(?DELAY),
++            wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft - 1)
++    end.
++
++should_compare_databases({remote, Source}, Target) ->
++    should_compare_databases(Source, Target);
++should_compare_databases(Source, {remote, Target}) ->
++    should_compare_databases(Source, Target);
++should_compare_databases(Source, Target) ->
++    {timeout, 35, ?_test(begin
++        {ok, SourceDb} = couch_db:open_int(Source, []),
++        {ok, TargetDb} = couch_db:open_int(Target, []),
++        Fun = fun(FullDocInfo, _, Acc) ->
++            {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo),
++            {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]),
++            DocId = couch_util:get_value(<<"_id">>, Props),
++            DocTarget = case couch_db:open_doc(TargetDb, DocId) of
++                {ok, DocT} ->
++                    DocT;
++                Error ->
++                    erlang:error(
++                        {assertion_failed,
++                         [{module, ?MODULE}, {line, ?LINE},
++                          {reason, lists:concat(["Error opening document '",
++                                                 ?b2l(DocId), "' from target: ",
++                                                 couch_util:to_list(Error)])}]})
++            end,
++            DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]),
++            ?assertEqual(DocJson, DocTargetJson),
++            {ok, Acc}
++        end,
++        {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
++        ok = couch_db:close(SourceDb),
++        ok = couch_db:close(TargetDb)
++    end)}.
++
++
++reopen_db({remote, Db}) ->
++    reopen_db(Db);
++reopen_db(#db{name=DbName}) ->
++    reopen_db(DbName);
++reopen_db(DbName) ->
++    {ok, Db} = couch_db:open_int(DbName, []),
++    ok = couch_db:close(Db),
++    {ok, Db}.
++
++compact_db(Type, #db{name = Name}) ->
++    {ok, Db} = couch_db:open_int(Name, []),
++    {ok, CompactPid} = couch_db:start_compact(Db),
++    MonRef = erlang:monitor(process, CompactPid),
++    receive
++        {'DOWN', MonRef, process, CompactPid, normal} ->
++            ok;
++        {'DOWN', MonRef, process, CompactPid, Reason} ->
++            erlang:error(
++                {assertion_failed,
++                 [{module, ?MODULE}, {line, ?LINE},
++                  {reason,
++                   lists:concat(["Error compacting ", Type, " database ",
++                                 ?b2l(Name), ": ",
++                                 couch_util:to_list(Reason)])}]})
++    after ?TIMEOUT ->
++        erlang:error(
++            {assertion_failed,
++             [{module, ?MODULE}, {line, ?LINE},
++              {reason, lists:concat(["Compaction for ", Type, " database ",
++                                     ?b2l(Name), " didn't finish"])}]})
++    end,
++    ok = couch_db:close(Db).
++
++check_ref_counter(Type, #db{name = Name, fd_ref_counter = OldRefCounter}) ->
++    MonRef = erlang:monitor(process, OldRefCounter),
++    receive
++        {'DOWN', MonRef, process, OldRefCounter, _} ->
++            ok
++        after ?TIMEOUT ->
++            erlang:error(
++                {assertion_failed,
++                 [{module, ?MODULE}, {line, ?LINE},
++                  {reason, lists:concat(["Old ", Type,
++                                         " database ref counter didn't"
++                                         " terminate"])}]})
++    end,
++    {ok, #db{fd_ref_counter = NewRefCounter} = Db} = couch_db:open_int(Name, []),
++    ok = couch_db:close(Db),
++    ?assertNotEqual(OldRefCounter, NewRefCounter).
++
++db_url(DbName) ->
++    iolist_to_binary([
++        "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
++        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
++        "/", DbName
++    ]).
++
++replicate({remote, Db}, Target) ->
++    replicate(db_url(Db), Target);
++
++replicate(Source, {remote, Db}) ->
++    replicate(Source, db_url(Db));
++
++replicate(Source, Target) ->
++    RepObject = {[
++        {<<"source">>, Source},
++        {<<"target">>, Target},
++        {<<"continuous">>, true}
++    ]},
++    {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_ROLE),
++    {ok, Pid} = couch_replicator:async_replicate(Rep),
++    {ok, Pid, Rep#rep.id}.
++
++
++wait_writer(Pid, NumDocs) ->
++    case get_writer_num_docs_written(Pid) of
++        N when N >= NumDocs ->
++            ok;
++        _ ->
++            wait_writer(Pid, NumDocs)
++    end.
++
++spawn_writer(Db) ->
++    Parent = self(),
++    Pid = spawn(fun() -> writer_loop(Db, Parent, 0) end),
++    Pid.
++
++
++pause_writer(Pid) ->
++    Ref = make_ref(),
++    Pid ! {pause, Ref},
++    receive
++        {paused, Ref} ->
++            ok
++    after ?TIMEOUT_WRITER ->
++        erlang:error({assertion_failed,
++                      [{module, ?MODULE},
++                       {line, ?LINE},
++                       {reason, "Failed to pause source database writer"}]})
++    end.
++
++resume_writer(Pid) ->
++    Ref = make_ref(),
++    Pid ! {continue, Ref},
++    receive
++        {ok, Ref} ->
++            ok
++    after ?TIMEOUT_WRITER ->
++        erlang:error({assertion_failed,
++                      [{module, ?MODULE},
++                       {line, ?LINE},
++                       {reason, "Failed to pause source database writer"}]})
++    end.
++
++get_writer_num_docs_written(Pid) ->
++    Ref = make_ref(),
++    Pid ! {get_count, Ref},
++    receive
++        {count, Ref, Count} ->
++            Count
++    after ?TIMEOUT_WRITER ->
++        erlang:error({assertion_failed,
++                      [{module, ?MODULE},
++                       {line, ?LINE},
++                       {reason, "Timeout getting number of documents written"
++                                " from source database writer"}]})
++    end.
++
++stop_writer(Pid) ->
++    Ref = make_ref(),
++    Pid ! {stop, Ref},
++    receive
++        {stopped, Ref, DocsWritten} ->
++            MonRef = erlang:monitor(process, Pid),
++            receive
++                {'DOWN', MonRef, process, Pid, _Reason} ->
++                    DocsWritten
++            after ?TIMEOUT ->
++                erlang:error({assertion_failed,
++                      [{module, ?MODULE},
++                       {line, ?LINE},
++                       {reason, "Timeout stopping source database writer"}]})
++            end
++    after ?TIMEOUT_WRITER ->
++        erlang:error({assertion_failed,
++                      [{module, ?MODULE},
++                       {line, ?LINE},
++                       {reason, "Timeout stopping source database writer"}]})
++    end.
++
++writer_loop(#db{name = DbName}, Parent, Counter) ->
++    {ok, Data} = file:read_file(?ATTFILE),
++    maybe_pause(Parent, Counter),
++    Doc = couch_doc:from_json_obj({[
++        {<<"_id">>, ?l2b(integer_to_list(Counter + 1))},
++        {<<"value">>, Counter + 1},
++        {<<"_attachments">>, {[
++            {<<"icon1.png">>, {[
++                {<<"data">>, base64:encode(Data)},
++                {<<"content_type">>, <<"image/png">>}
++            ]}},
++            {<<"icon2.png">>, {[
++                {<<"data">>, base64:encode(iolist_to_binary([Data, Data]))},
++                {<<"content_type">>, <<"image/png">>}
++            ]}}
++        ]}}
++    ]}),
++    maybe_pause(Parent, Counter),
++    {ok, Db} = couch_db:open_int(DbName, []),
++    {ok, _} = couch_db:update_doc(Db, Doc, []),
++    ok = couch_db:close(Db),
++    receive
++        {get_count, Ref} ->
++            Parent ! {count, Ref, Counter + 1},
++            writer_loop(Db, Parent, Counter + 1);
++        {stop, Ref} ->
++            Parent ! {stopped, Ref, Counter + 1}
++    after 0 ->
++        timer:sleep(?DELAY),
++        writer_loop(Db, Parent, Counter + 1)
++    end.
++
++maybe_pause(Parent, Counter) ->
++    receive
++        {get_count, Ref} ->
++            Parent ! {count, Ref, Counter};
++        {pause, Ref} ->
++            Parent ! {paused, Ref},
++            receive
++                {continue, Ref2} ->
++                    Parent ! {ok, Ref2}
++            end
++    after 0 ->
++        ok
++    end.
+diff --git a/src/couch_replicator/test/couch_replicator_httpc_pool_tests.erl b/src/couch_replicator/test/couch_replicator_httpc_pool_tests.erl
+new file mode 100644
+index 0000000..88534ed
+--- /dev/null
++++ b/src/couch_replicator/test/couch_replicator_httpc_pool_tests.erl
+@@ -0,0 +1,189 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_replicator_httpc_pool_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
++-define(TIMEOUT, 1000).
++
++
++start() ->
++    {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    Pid.
++
++stop(Pid) ->
++    erlang:monitor(process, Pid),
++    couch_server_sup:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout, server_stop})
++    end.
++
++setup() ->
++    spawn_pool().
++
++teardown(Pool) ->
++    stop_pool(Pool).
++
++
++httpc_pool_test_() ->
++    {
++        "httpc pool tests",
++        {
++            setup,
++            fun start/0, fun stop/1,
++            {
++                foreach,
++                fun setup/0, fun teardown/1,
++                [
++                    fun should_block_new_clients_when_full/1,
++                    fun should_replace_worker_on_death/1
++                ]
++            }
++        }
++    }.
++
++
++should_block_new_clients_when_full(Pool) ->
++    ?_test(begin
++        Client1 = spawn_client(Pool),
++        Client2 = spawn_client(Pool),
++        Client3 = spawn_client(Pool),
++
++        ?assertEqual(ok, ping_client(Client1)),
++        ?assertEqual(ok, ping_client(Client2)),
++        ?assertEqual(ok, ping_client(Client3)),
++
++        Worker1 = get_client_worker(Client1, "1"),
++        Worker2 = get_client_worker(Client2, "2"),
++        Worker3 = get_client_worker(Client3, "3"),
++
++        ?assert(is_process_alive(Worker1)),
++        ?assert(is_process_alive(Worker2)),
++        ?assert(is_process_alive(Worker3)),
++
++        ?assertNotEqual(Worker1, Worker2),
++        ?assertNotEqual(Worker2, Worker3),
++        ?assertNotEqual(Worker3, Worker1),
++
++        Client4 = spawn_client(Pool),
++        ?assertEqual(timeout, ping_client(Client4)),
++
++        ?assertEqual(ok, stop_client(Client1)),
++        ?assertEqual(ok, ping_client(Client4)),
++
++        Worker4 = get_client_worker(Client4, "4"),
++        ?assertEqual(Worker1, Worker4),
++
++        lists:foreach(
++            fun(C) ->
++                ?assertEqual(ok, stop_client(C))
++            end, [Client2, Client3, Client4])
++    end).
++
++should_replace_worker_on_death(Pool) ->
++    ?_test(begin
++        Client1 = spawn_client(Pool),
++        ?assertEqual(ok, ping_client(Client1)),
++        Worker1 = get_client_worker(Client1, "1"),
++        ?assert(is_process_alive(Worker1)),
++
++        ?assertEqual(ok, kill_client_worker(Client1)),
++        ?assertNot(is_process_alive(Worker1)),
++        ?assertEqual(ok, stop_client(Client1)),
++
++        Client2 = spawn_client(Pool),
++        ?assertEqual(ok, ping_client(Client2)),
++        Worker2 = get_client_worker(Client2, "2"),
++        ?assert(is_process_alive(Worker2)),
++
++        ?assertNotEqual(Worker1, Worker2),
++        ?assertEqual(ok, stop_client(Client2))
++    end).
++
++
++spawn_client(Pool) ->
++    Parent = self(),
++    Ref = make_ref(),
++    Pid = spawn(fun() ->
++        {ok, Worker} = couch_replicator_httpc_pool:get_worker(Pool),
++        loop(Parent, Ref, Worker, Pool)
++    end),
++    {Pid, Ref}.
++
++ping_client({Pid, Ref}) ->
++    Pid ! ping,
++    receive
++        {pong, Ref} ->
++            ok
++    after ?TIMEOUT ->
++        timeout
++    end.
++
++get_client_worker({Pid, Ref}, ClientName) ->
++    Pid ! get_worker,
++    receive
++        {worker, Ref, Worker} ->
++            Worker
++    after ?TIMEOUT ->
++        erlang:error(
++            {assertion_failed,
++             [{module, ?MODULE}, {line, ?LINE},
++              {reason, "Timeout getting client " ++ ClientName ++ " worker"}]})
++    end.
++
++stop_client({Pid, Ref}) ->
++    Pid ! stop,
++    receive
++        {stop, Ref} ->
++            ok
++    after ?TIMEOUT ->
++        timeout
++    end.
++
++kill_client_worker({Pid, Ref}) ->
++    Pid ! get_worker,
++    receive
++        {worker, Ref, Worker} ->
++            exit(Worker, kill),
++            ok
++    after ?TIMEOUT ->
++        timeout
++    end.
++
++loop(Parent, Ref, Worker, Pool) ->
++    receive
++        ping ->
++            Parent ! {pong, Ref},
++            loop(Parent, Ref, Worker, Pool);
++        get_worker  ->
++            Parent ! {worker, Ref, Worker},
++            loop(Parent, Ref, Worker, Pool);
++        stop ->
++            couch_replicator_httpc_pool:release_worker(Pool, Worker),
++            Parent ! {stop, Ref}
++    end.
++
++spawn_pool() ->
++    Host = couch_config:get("httpd", "bind_address", "127.0.0.1"),
++    Port = couch_config:get("httpd", "port", "5984"),
++    {ok, Pool} = couch_replicator_httpc_pool:start_link(
++        "http://" ++ Host ++ ":" ++ Port, [{max_connections, 3}]),
++    Pool.
++
++stop_pool(Pool) ->
++    ok = couch_replicator_httpc_pool:stop(Pool).
+diff --git a/src/couch_replicator/test/couch_replicator_large_atts_tests.erl b/src/couch_replicator/test/couch_replicator_large_atts_tests.erl
+new file mode 100644
+index 0000000..7c4e334
+--- /dev/null
++++ b/src/couch_replicator/test/couch_replicator_large_atts_tests.erl
+@@ -0,0 +1,218 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_replicator_large_atts_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(ADMIN_ROLE, #user_ctx{roles=[<<"_admin">>]}).
++-define(ADMIN_USER, {user_ctx, ?ADMIN_ROLE}).
++-define(ATT_SIZE_1, 2 * 1024 * 1024).
++-define(ATT_SIZE_2, round(6.6 * 1024 * 1024)).
++-define(DOCS_COUNT, 11).
++-define(TIMEOUT_EUNIT, 30).
++-define(TIMEOUT_STOP, 1000).
++
++
++setup() ->
++    DbName = ?tempdb(),
++    {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
++    ok = couch_db:close(Db),
++    DbName.
++
++setup(local) ->
++    setup();
++setup(remote) ->
++    {remote, setup()};
++setup({A, B}) ->
++    {ok, _} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    couch_config:set("attachments", "compressible_types", "text/*", false),
++    Source = setup(A),
++    Target = setup(B),
++    {Source, Target}.
++
++teardown({remote, DbName}) ->
++    teardown(DbName);
++teardown(DbName) ->
++    ok = couch_server:delete(DbName, [?ADMIN_USER]),
++    ok.
++
++teardown(_, {Source, Target}) ->
++    teardown(Source),
++    teardown(Target),
++
++    Pid = whereis(couch_server_sup),
++    erlang:monitor(process, Pid),
++    couch_server_sup:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT_STOP ->
++        throw({timeout, server_stop})
++    end.
++
++
++large_atts_test_() ->
++    Pairs = [{local, local}, {local, remote},
++             {remote, local}, {remote, remote}],
++    {
++        "Replicate docs with large attachments",
++        {
++            foreachx,
++            fun setup/1, fun teardown/2,
++            [{Pair, fun should_populate_replicate_compact/2}
++             || Pair <- Pairs]
++        }
++    }.
++
++
++should_populate_replicate_compact({From, To}, {Source, Target}) ->
++    {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
++     {inorder, [should_populate_source(Source),
++                should_replicate(Source, Target),
++                should_compare_databases(Source, Target)]}}.
++
++should_populate_source({remote, Source}) ->
++    should_populate_source(Source);
++should_populate_source(Source) ->
++    {timeout, ?TIMEOUT_EUNIT, ?_test(populate_db(Source, ?DOCS_COUNT))}.
++
++should_replicate({remote, Source}, Target) ->
++    should_replicate(db_url(Source), Target);
++should_replicate(Source, {remote, Target}) ->
++    should_replicate(Source, db_url(Target));
++should_replicate(Source, Target) ->
++    {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target))}.
++
++should_compare_databases({remote, Source}, Target) ->
++    should_compare_databases(Source, Target);
++should_compare_databases(Source, {remote, Target}) ->
++    should_compare_databases(Source, Target);
++should_compare_databases(Source, Target) ->
++    {timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target))}.
++
++
++populate_db(DbName, DocCount) ->
++    {ok, Db} = couch_db:open_int(DbName, []),
++    Docs = lists:foldl(
++        fun(DocIdCounter, Acc) ->
++            Doc = #doc{
++                id = iolist_to_binary(["doc", integer_to_list(DocIdCounter)]),
++                body = {[]},
++                atts = [
++                    att(<<"att1">>, ?ATT_SIZE_1, <<"text/plain">>),
++                    att(<<"att2">>, ?ATT_SIZE_2, <<"app/binary">>)
++                ]
++            },
++            [Doc | Acc]
++        end,
++        [], lists:seq(1, DocCount)),
++    {ok, _} = couch_db:update_docs(Db, Docs, []),
++    couch_db:close(Db).
++
++compare_dbs(Source, Target) ->
++    {ok, SourceDb} = couch_db:open_int(Source, []),
++    {ok, TargetDb} = couch_db:open_int(Target, []),
++
++    Fun = fun(FullDocInfo, _, Acc) ->
++        {ok, DocSource} = couch_db:open_doc(SourceDb, FullDocInfo),
++        Id = DocSource#doc.id,
++
++        {ok, DocTarget} = couch_db:open_doc(TargetDb, Id),
++        ?assertEqual(DocSource#doc.body, DocTarget#doc.body),
++
++        #doc{atts = SourceAtts} = DocSource,
++        #doc{atts = TargetAtts} = DocTarget,
++        ?assertEqual(lists:sort([N || #att{name = N} <- SourceAtts]),
++                     lists:sort([N || #att{name = N} <- TargetAtts])),
++
++        FunCompareAtts = fun(#att{name = AttName} = Att) ->
++            {ok, AttTarget} = find_att(TargetAtts, AttName),
++            SourceMd5 = att_md5(Att),
++            TargetMd5 = att_md5(AttTarget),
++            case AttName of
++                <<"att1">> ->
++                    ?assertEqual(gzip, Att#att.encoding),
++                    ?assertEqual(gzip, AttTarget#att.encoding),
++                    DecSourceMd5 = att_decoded_md5(Att),
++                    DecTargetMd5 = att_decoded_md5(AttTarget),
++                    ?assertEqual(DecSourceMd5, DecTargetMd5);
++                _ ->
++                    ?assertEqual(identity, Att#att.encoding),
++                    ?assertEqual(identity, AttTarget#att.encoding)
++            end,
++            ?assertEqual(SourceMd5, TargetMd5),
++            ?assert(is_integer(Att#att.disk_len)),
++            ?assert(is_integer(Att#att.att_len)),
++            ?assert(is_integer(AttTarget#att.disk_len)),
++            ?assert(is_integer(AttTarget#att.att_len)),
++            ?assertEqual(Att#att.disk_len, AttTarget#att.disk_len),
++            ?assertEqual(Att#att.att_len, AttTarget#att.att_len),
++            ?assertEqual(Att#att.type, AttTarget#att.type),
++            ?assertEqual(Att#att.md5, AttTarget#att.md5)
++        end,
++
++        lists:foreach(FunCompareAtts, SourceAtts),
++
++        {ok, Acc}
++    end,
++
++    {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
++    ok = couch_db:close(SourceDb),
++    ok = couch_db:close(TargetDb).
++
++att(Name, Size, Type) ->
++    #att{
++        name = Name,
++        type = Type,
++        att_len = Size,
++        data = fun(Count) -> crypto:rand_bytes(Count) end
++    }.
++
++find_att([], _Name) ->
++    nil;
++find_att([#att{name = Name} = Att | _], Name) ->
++    {ok, Att};
++find_att([_ | Rest], Name) ->
++    find_att(Rest, Name).
++
++att_md5(Att) ->
++    Md50 = couch_doc:att_foldl(
++        Att,
++        fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
++        couch_util:md5_init()),
++    couch_util:md5_final(Md50).
++
++att_decoded_md5(Att) ->
++    Md50 = couch_doc:att_foldl_decode(
++        Att,
++        fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
++        couch_util:md5_init()),
++    couch_util:md5_final(Md50).
++
++db_url(DbName) ->
++    iolist_to_binary([
++        "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
++        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
++        "/", DbName
++    ]).
++
++replicate(Source, Target) ->
++    RepObject = {[{<<"source">>, Source}, {<<"target">>, Target}]},
++    {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_ROLE),
++    {ok, Pid} = couch_replicator:async_replicate(Rep),
++    MonRef = erlang:monitor(process, Pid),
++    receive
++        {'DOWN', MonRef, process, Pid, _} ->
++            ok
++    end.
+diff --git a/src/couch_replicator/test/couch_replicator_many_leaves_tests.erl b/src/couch_replicator/test/couch_replicator_many_leaves_tests.erl
+new file mode 100644
+index 0000000..27d51db
+--- /dev/null
++++ b/src/couch_replicator/test/couch_replicator_many_leaves_tests.erl
+@@ -0,0 +1,232 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_replicator_many_leaves_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(ADMIN_ROLE, #user_ctx{roles=[<<"_admin">>]}).
++-define(ADMIN_USER, {user_ctx, ?ADMIN_ROLE}).
++-define(DOCS_CONFLICTS, [
++    {<<"doc1">>, 10},
++    {<<"doc2">>, 100},
++    % a number > MaxURLlength (7000) / length(DocRevisionString)
++    {<<"doc3">>, 210}
++]).
++-define(NUM_ATTS, 2).
++-define(TIMEOUT_STOP, 1000).
++-define(TIMEOUT_EUNIT, 60).
++-define(i2l(I), integer_to_list(I)).
++-define(io2b(Io), iolist_to_binary(Io)).
++
++setup() ->
++    DbName = ?tempdb(),
++    {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
++    ok = couch_db:close(Db),
++    DbName.
++
++setup(local) ->
++    setup();
++setup(remote) ->
++    {remote, setup()};
++setup({A, B}) ->
++    {ok, _} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    Source = setup(A),
++    Target = setup(B),
++    {Source, Target}.
++
++teardown({remote, DbName}) ->
++    teardown(DbName);
++teardown(DbName) ->
++    ok = couch_server:delete(DbName, [?ADMIN_USER]),
++    ok.
++
++teardown(_, {Source, Target}) ->
++    teardown(Source),
++    teardown(Target),
++
++    Pid = whereis(couch_server_sup),
++    erlang:monitor(process, Pid),
++    couch_server_sup:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT_STOP ->
++        throw({timeout, server_stop})
++    end.
++
++
++docs_with_many_leaves_test_() ->
++    Pairs = [{local, local}, {local, remote},
++             {remote, local}, {remote, remote}],
++    {
++        "Replicate documents with many leaves",
++        {
++            foreachx,
++            fun setup/1, fun teardown/2,
++            [{Pair, fun should_populate_replicate_compact/2}
++             || Pair <- Pairs]
++        }
++    }.
++
++
++should_populate_replicate_compact({From, To}, {Source, Target}) ->
++    {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
++     {inorder, [
++        should_populate_source(Source),
++        should_replicate(Source, Target),
++        should_verify_target(Source, Target),
++        should_add_attachments_to_source(Source),
++        should_replicate(Source, Target),
++        should_verify_target(Source, Target)
++     ]}}.
++
++should_populate_source({remote, Source}) ->
++    should_populate_source(Source);
++should_populate_source(Source) ->
++    {timeout, ?TIMEOUT_EUNIT, ?_test(populate_db(Source))}.
++
++should_replicate({remote, Source}, Target) ->
++    should_replicate(db_url(Source), Target);
++should_replicate(Source, {remote, Target}) ->
++    should_replicate(Source, db_url(Target));
++should_replicate(Source, Target) ->
++    {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target))}.
++
++should_verify_target({remote, Source}, Target) ->
++    should_verify_target(Source, Target);
++should_verify_target(Source, {remote, Target}) ->
++    should_verify_target(Source, Target);
++should_verify_target(Source, Target) ->
++    {timeout, ?TIMEOUT_EUNIT, ?_test(begin
++        {ok, SourceDb} = couch_db:open_int(Source, []),
++        {ok, TargetDb} = couch_db:open_int(Target, []),
++        verify_target(SourceDb, TargetDb, ?DOCS_CONFLICTS),
++        ok = couch_db:close(SourceDb),
++        ok = couch_db:close(TargetDb)
++    end)}.
++
++should_add_attachments_to_source({remote, Source}) ->
++    should_add_attachments_to_source(Source);
++should_add_attachments_to_source(Source) ->
++    {timeout, ?TIMEOUT_EUNIT, ?_test(begin
++        {ok, SourceDb} = couch_db:open_int(Source, []),
++        add_attachments(SourceDb, ?NUM_ATTS, ?DOCS_CONFLICTS),
++        ok = couch_db:close(SourceDb)
++    end)}.
++
++populate_db(DbName) ->
++    {ok, Db} = couch_db:open_int(DbName, []),
++    lists:foreach(
++       fun({DocId, NumConflicts}) ->
++            Value = <<"0">>,
++            Doc = #doc{
++                id = DocId,
++                body = {[ {<<"value">>, Value} ]}
++            },
++            {ok, _} = couch_db:update_doc(Db, Doc, []),
++            {ok, _} = add_doc_siblings(Db, DocId, NumConflicts)
++        end, ?DOCS_CONFLICTS),
++    couch_db:close(Db).
++
++add_doc_siblings(Db, DocId, NumLeaves) when NumLeaves > 0 ->
++    add_doc_siblings(Db, DocId, NumLeaves, [], []).
++
++add_doc_siblings(Db, _DocId, 0, AccDocs, AccRevs) ->
++    {ok, []} = couch_db:update_docs(Db, AccDocs, [], replicated_changes),
++    {ok, AccRevs};
++
++add_doc_siblings(Db, DocId, NumLeaves, AccDocs, AccRevs) ->
++    Value = ?l2b(?i2l(NumLeaves)),
++    Rev = couch_util:md5(Value),
++    Doc = #doc{
++        id = DocId,
++        revs = {1, [Rev]},
++        body = {[ {<<"value">>, Value} ]}
++    },
++    add_doc_siblings(Db, DocId, NumLeaves - 1,
++                     [Doc | AccDocs], [{1, Rev} | AccRevs]).
++
++verify_target(_SourceDb, _TargetDb, []) ->
++    ok;
++verify_target(SourceDb, TargetDb, [{DocId, NumConflicts} | Rest]) ->
++    {ok, SourceLookups} = couch_db:open_doc_revs(
++        SourceDb,
++        DocId,
++        all,
++        [conflicts, deleted_conflicts]),
++    {ok, TargetLookups} = couch_db:open_doc_revs(
++        TargetDb,
++        DocId,
++        all,
++        [conflicts, deleted_conflicts]),
++    SourceDocs = [Doc || {ok, Doc} <- SourceLookups],
++    TargetDocs = [Doc || {ok, Doc} <- TargetLookups],
++    Total = NumConflicts + 1,
++    ?assertEqual(Total, length(TargetDocs)),
++    lists:foreach(
++        fun({SourceDoc, TargetDoc}) ->
++            SourceJson = couch_doc:to_json_obj(SourceDoc, [attachments]),
++            TargetJson = couch_doc:to_json_obj(TargetDoc, [attachments]),
++            ?assertEqual(SourceJson, TargetJson)
++        end,
++        lists:zip(SourceDocs, TargetDocs)),
++    verify_target(SourceDb, TargetDb, Rest).
++
++add_attachments(_SourceDb, _NumAtts,  []) ->
++    ok;
++add_attachments(SourceDb, NumAtts,  [{DocId, NumConflicts} | Rest]) ->
++    {ok, SourceLookups} = couch_db:open_doc_revs(SourceDb, DocId, all, []),
++    SourceDocs = [Doc || {ok, Doc} <- SourceLookups],
++    Total = NumConflicts + 1,
++    ?assertEqual(Total, length(SourceDocs)),
++    NewDocs = lists:foldl(
++        fun(#doc{atts = Atts, revs = {Pos, [Rev | _]}} = Doc, Acc) ->
++            NewAtts = lists:foldl(fun(I, AttAcc) ->
++                AttData = crypto:rand_bytes(100),
++                NewAtt = #att{
++                    name = ?io2b(["att_", ?i2l(I), "_",
++                                  couch_doc:rev_to_str({Pos, Rev})]),
++                    type = <<"application/foobar">>,
++                    att_len = byte_size(AttData),
++                    data = AttData
++                },
++                [NewAtt | AttAcc]
++            end, [], lists:seq(1, NumAtts)),
++            [Doc#doc{atts = Atts ++ NewAtts} | Acc]
++        end,
++        [], SourceDocs),
++    {ok, UpdateResults} = couch_db:update_docs(SourceDb, NewDocs, []),
++    NewRevs = [R || {ok, R} <- UpdateResults],
++    ?assertEqual(length(NewDocs), length(NewRevs)),
++    add_attachments(SourceDb, NumAtts, Rest).
++
++db_url(DbName) ->
++    iolist_to_binary([
++        "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
++        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
++        "/", DbName
++    ]).
++
++replicate(Source, Target) ->
++    RepObject = {[
++        {<<"source">>, Source},
++        {<<"target">>, Target}
++    ]},
++    {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_ROLE),
++    {ok, Pid} = couch_replicator:async_replicate(Rep),
++    MonRef = erlang:monitor(process, Pid),
++    receive
++        {'DOWN', MonRef, process, Pid, _} ->
++            ok
++    end.
+diff --git a/src/couch_replicator/test/couch_replicator_missing_stubs_tests.erl b/src/couch_replicator/test/couch_replicator_missing_stubs_tests.erl
+new file mode 100644
+index 0000000..8c64929
+--- /dev/null
++++ b/src/couch_replicator/test/couch_replicator_missing_stubs_tests.erl
+@@ -0,0 +1,260 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_replicator_missing_stubs_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(ADMIN_ROLE, #user_ctx{roles=[<<"_admin">>]}).
++-define(ADMIN_USER, {user_ctx, ?ADMIN_ROLE}).
++-define(REVS_LIMIT, 3).
++-define(TIMEOUT_STOP, 1000).
++-define(TIMEOUT_EUNIT, 30).
++
++
++setup() ->
++    DbName = ?tempdb(),
++    {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
++    ok = couch_db:close(Db),
++    DbName.
++
++setup(local) ->
++    setup();
++setup(remote) ->
++    {remote, setup()};
++setup({A, B}) ->
++    {ok, _} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    Source = setup(A),
++    Target = setup(B),
++    {Source, Target}.
++
++teardown({remote, DbName}) ->
++    teardown(DbName);
++teardown(DbName) ->
++    ok = couch_server:delete(DbName, [?ADMIN_USER]),
++    ok.
++
++teardown(_, {Source, Target}) ->
++    teardown(Source),
++    teardown(Target),
++
++    Pid = whereis(couch_server_sup),
++    erlang:monitor(process, Pid),
++    couch_server_sup:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT_STOP ->
++        throw({timeout, server_stop})
++    end.
++
++
++missing_stubs_test_() ->
++    Pairs = [{local, local}, {local, remote},
++             {remote, local}, {remote, remote}],
++    {
++        "Replicate docs with missing stubs (COUCHDB-1365)",
++        {
++            foreachx,
++            fun setup/1, fun teardown/2,
++            [{Pair, fun should_replicate_docs_with_missed_att_stubs/2}
++             || Pair <- Pairs]
++        }
++    }.
++
++
++should_replicate_docs_with_missed_att_stubs({From, To}, {Source, Target}) ->
++    {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
++     {inorder, [
++        should_populate_source(Source),
++        should_set_target_revs_limit(Target, ?REVS_LIMIT),
++        should_replicate(Source, Target),
++        should_compare_databases(Source, Target),
++        should_update_source_docs(Source, ?REVS_LIMIT * 2),
++        should_replicate(Source, Target),
++        should_compare_databases(Source, Target)
++     ]}}.
++
++should_populate_source({remote, Source}) ->
++    should_populate_source(Source);
++should_populate_source(Source) ->
++    {timeout, ?TIMEOUT_EUNIT, ?_test(populate_db(Source))}.
++
++should_replicate({remote, Source}, Target) ->
++    should_replicate(db_url(Source), Target);
++should_replicate(Source, {remote, Target}) ->
++    should_replicate(Source, db_url(Target));
++should_replicate(Source, Target) ->
++    {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target))}.
++
++should_set_target_revs_limit({remote, Target}, RevsLimit) ->
++    should_set_target_revs_limit(Target, RevsLimit);
++should_set_target_revs_limit(Target, RevsLimit) ->
++    ?_test(begin
++        {ok, Db} = couch_db:open_int(Target, [?ADMIN_USER]),
++        ?assertEqual(ok, couch_db:set_revs_limit(Db, RevsLimit)),
++        ok = couch_db:close(Db)
++    end).
++
++should_compare_databases({remote, Source}, Target) ->
++    should_compare_databases(Source, Target);
++should_compare_databases(Source, {remote, Target}) ->
++    should_compare_databases(Source, Target);
++should_compare_databases(Source, Target) ->
++    {timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target))}.
++
++should_update_source_docs({remote, Source}, Times) ->
++    should_update_source_docs(Source, Times);
++should_update_source_docs(Source, Times) ->
++    {timeout, ?TIMEOUT_EUNIT, ?_test(update_db_docs(Source, Times))}.
++
++
++populate_db(DbName) ->
++    {ok, Db} = couch_db:open_int(DbName, []),
++    AttData = crypto:rand_bytes(6000),
++    Doc = #doc{
++        id = <<"doc1">>,
++        atts = [
++            #att{
++                name = <<"doc1_att1">>,
++                type = <<"application/foobar">>,
++                att_len = byte_size(AttData),
++                data = AttData
++            }
++        ]
++    },
++    {ok, _} = couch_db:update_doc(Db, Doc, []),
++    couch_db:close(Db).
++
++update_db_docs(DbName, Times) ->
++    {ok, Db} = couch_db:open_int(DbName, []),
++    {ok, _, _} = couch_db:enum_docs(
++        Db,
++        fun(FDI, _, Acc) -> db_fold_fun(FDI, Acc) end,
++        {DbName, Times},
++        []),
++    ok = couch_db:close(Db).
++
++db_fold_fun(FullDocInfo, {DbName, Times}) ->
++    {ok, Db} = couch_db:open_int(DbName, []),
++    {ok, Doc} = couch_db:open_doc(Db, FullDocInfo),
++    lists:foldl(
++        fun(_, {Pos, RevId}) ->
++            {ok, Db2} = couch_db:reopen(Db),
++            NewDocVersion = Doc#doc{
++                revs = {Pos, [RevId]},
++                body = {[{<<"value">>, base64:encode(crypto:rand_bytes(100))}]}
++            },
++            {ok, NewRev} = couch_db:update_doc(Db2, NewDocVersion, []),
++            NewRev
++        end,
++        {element(1, Doc#doc.revs), hd(element(2, Doc#doc.revs))},
++        lists:seq(1, Times)),
++    ok = couch_db:close(Db),
++    {ok, {DbName, Times}}.
++
++compare_dbs(Source, Target) ->
++    {ok, SourceDb} = couch_db:open_int(Source, []),
++    {ok, TargetDb} = couch_db:open_int(Target, []),
++
++    Fun = fun(FullDocInfo, _, Acc) ->
++        {ok, DocSource} = couch_db:open_doc(SourceDb, FullDocInfo,
++                                            [conflicts, deleted_conflicts]),
++        Id = DocSource#doc.id,
++
++        {ok, DocTarget} = couch_db:open_doc(TargetDb, Id,
++                                            [conflicts, deleted_conflicts]),
++        ?assertEqual(DocSource#doc.body, DocTarget#doc.body),
++
++        ?assertEqual(couch_doc:to_json_obj(DocSource, []),
++                     couch_doc:to_json_obj(DocTarget, [])),
++
++        #doc{atts = SourceAtts} = DocSource,
++        #doc{atts = TargetAtts} = DocTarget,
++        ?assertEqual(lists:sort([N || #att{name = N} <- SourceAtts]),
++                     lists:sort([N || #att{name = N} <- TargetAtts])),
++
++        lists:foreach(
++            fun(#att{name = AttName} = Att) ->
++                {ok, AttTarget} = find_att(TargetAtts, AttName),
++                SourceMd5 = att_md5(Att),
++                TargetMd5 = att_md5(AttTarget),
++                case AttName of
++                    <<"att1">> ->
++                        ?assertEqual(gzip, Att#att.encoding),
++                        ?assertEqual(gzip, AttTarget#att.encoding),
++                        DecSourceMd5 = att_decoded_md5(Att),
++                        DecTargetMd5 = att_decoded_md5(AttTarget),
++                        ?assertEqual(DecSourceMd5, DecTargetMd5);
++                    _ ->
++                        ?assertEqual(identity, Att#att.encoding),
++                        ?assertEqual(identity, AttTarget#att.encoding)
++                end,
++                ?assertEqual(SourceMd5, TargetMd5),
++                ?assert(is_integer(Att#att.disk_len)),
++                ?assert(is_integer(Att#att.att_len)),
++                ?assert(is_integer(AttTarget#att.disk_len)),
++                ?assert(is_integer(AttTarget#att.att_len)),
++                ?assertEqual(Att#att.disk_len, AttTarget#att.disk_len),
++                ?assertEqual(Att#att.att_len, AttTarget#att.att_len),
++                ?assertEqual(Att#att.type, AttTarget#att.type),
++                ?assertEqual(Att#att.md5, AttTarget#att.md5)
++            end,
++            SourceAtts),
++        {ok, Acc}
++    end,
++
++    {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
++    ok = couch_db:close(SourceDb),
++    ok = couch_db:close(TargetDb).
++
++find_att([], _Name) ->
++    nil;
++find_att([#att{name = Name} = Att | _], Name) ->
++    {ok, Att};
++find_att([_ | Rest], Name) ->
++    find_att(Rest, Name).
++
++att_md5(Att) ->
++    Md50 = couch_doc:att_foldl(
++        Att,
++        fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
++        couch_util:md5_init()),
++    couch_util:md5_final(Md50).
++
++att_decoded_md5(Att) ->
++    Md50 = couch_doc:att_foldl_decode(
++        Att,
++        fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
++        couch_util:md5_init()),
++    couch_util:md5_final(Md50).
++
++db_url(DbName) ->
++    iolist_to_binary([
++        "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
++        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
++        "/", DbName
++    ]).
++
++replicate(Source, Target) ->
++    RepObject = {[
++        {<<"source">>, Source},
++        {<<"target">>, Target}
++    ]},
++    {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_ROLE),
++    {ok, Pid} = couch_replicator:async_replicate(Rep),
++    MonRef = erlang:monitor(process, Pid),
++    receive
++        {'DOWN', MonRef, process, Pid, _} ->
++            ok
++    end.
+diff --git a/src/couch_replicator/test/couch_replicator_modules_load_tests.erl b/src/couch_replicator/test/couch_replicator_modules_load_tests.erl
+new file mode 100644
+index 0000000..7107b9e
+--- /dev/null
++++ b/src/couch_replicator/test/couch_replicator_modules_load_tests.erl
+@@ -0,0 +1,40 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_replicator_modules_load_tests).
++
++-include("couch_eunit.hrl").
++
++
++modules_load_test_() ->
++    {
++        "Verify that all modules loads",
++        should_load_modules()
++    }.
++
++
++should_load_modules() ->
++    Modules = [
++        couch_replicator_api_wrap,
++        couch_replicator_httpc,
++        couch_replicator_httpd,
++        couch_replicator_manager,
++        couch_replicator_notifier,
++        couch_replicator,
++        couch_replicator_worker,
++        couch_replicator_utils,
++        couch_replicator_job_sup
++    ],
++    [should_load_module(Mod) || Mod <- Modules].
++
++should_load_module(Mod) ->
++    {atom_to_list(Mod), ?_assertMatch({module, _}, code:load_file(Mod))}.
+diff --git a/src/couch_replicator/test/couch_replicator_use_checkpoints_tests.erl b/src/couch_replicator/test/couch_replicator_use_checkpoints_tests.erl
+new file mode 100644
+index 0000000..5356a37
+--- /dev/null
++++ b/src/couch_replicator/test/couch_replicator_use_checkpoints_tests.erl
+@@ -0,0 +1,200 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_replicator_use_checkpoints_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(ADMIN_ROLE, #user_ctx{roles=[<<"_admin">>]}).
++-define(ADMIN_USER, {user_ctx, ?ADMIN_ROLE}).
++-define(DOCS_COUNT, 100).
++-define(TIMEOUT_STOP, 1000).
++-define(TIMEOUT_EUNIT, 30).
++-define(i2l(I), integer_to_list(I)).
++-define(io2b(Io), iolist_to_binary(Io)).
++
++
++start(false) ->
++    fun
++        ({finished, _, {CheckpointHistory}}) ->
++            ?assertEqual([{<<"use_checkpoints">>,false}], CheckpointHistory);
++        (_) ->
++            ok
++    end;
++start(true) ->
++    fun
++        ({finished, _, {CheckpointHistory}}) ->
++            ?assertNotEqual(false, lists:keyfind(<<"session_id">>,
++                                                 1, CheckpointHistory));
++        (_) ->
++            ok
++    end.
++
++stop(_, _) ->
++    ok.
++
++setup() ->
++    DbName = ?tempdb(),
++    {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
++    ok = couch_db:close(Db),
++    DbName.
++
++setup(local) ->
++    setup();
++setup(remote) ->
++    {remote, setup()};
++setup({_, Fun, {A, B}}) ->
++    {ok, _} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    {ok, Listener} = couch_replicator_notifier:start_link(Fun),
++    Source = setup(A),
++    Target = setup(B),
++    {Source, Target, Listener}.
++
++teardown({remote, DbName}) ->
++    teardown(DbName);
++teardown(DbName) ->
++    ok = couch_server:delete(DbName, [?ADMIN_USER]),
++    ok.
++
++teardown(_, {Source, Target, Listener}) ->
++    teardown(Source),
++    teardown(Target),
++
++    couch_replicator_notifier:stop(Listener),
++    Pid = whereis(couch_server_sup),
++    erlang:monitor(process, Pid),
++    couch_server_sup:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT_STOP ->
++        throw({timeout, server_stop})
++    end.
++
++
++use_checkpoints_test_() ->
++    {
++        "Replication use_checkpoints feature tests",
++        {
++            foreachx,
++            fun start/1, fun stop/2,
++            [{UseCheckpoints, fun use_checkpoints_tests/2}
++             || UseCheckpoints <- [false, true]]
++        }
++    }.
++
++use_checkpoints_tests(UseCheckpoints, Fun) ->
++    Pairs = [{local, local}, {local, remote},
++             {remote, local}, {remote, remote}],
++    {
++        "use_checkpoints: " ++ atom_to_list(UseCheckpoints),
++        {
++            foreachx,
++            fun setup/1, fun teardown/2,
++            [{{UseCheckpoints, Fun, Pair}, fun should_test_checkpoints/2}
++             || Pair <- Pairs]
++        }
++    }.
++
++should_test_checkpoints({UseCheckpoints, _, {From, To}}, {Source, Target, _}) ->
++    should_test_checkpoints(UseCheckpoints, {From, To}, {Source, Target}).
++should_test_checkpoints(UseCheckpoints, {From, To}, {Source, Target}) ->
++    {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
++     {inorder, [
++        should_populate_source(Source, ?DOCS_COUNT),
++        should_replicate(Source, Target, UseCheckpoints),
++        should_compare_databases(Source, Target)
++     ]}}.
++
++should_populate_source({remote, Source}, DocCount) ->
++    should_populate_source(Source, DocCount);
++should_populate_source(Source, DocCount) ->
++    {timeout, ?TIMEOUT_EUNIT, ?_test(populate_db(Source, DocCount))}.
++
++should_replicate({remote, Source}, Target, UseCheckpoints) ->
++    should_replicate(db_url(Source), Target, UseCheckpoints);
++should_replicate(Source, {remote, Target}, UseCheckpoints) ->
++    should_replicate(Source, db_url(Target), UseCheckpoints);
++should_replicate(Source, Target, UseCheckpoints) ->
++    {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target, UseCheckpoints))}.
++
++should_compare_databases({remote, Source}, Target) ->
++    should_compare_databases(Source, Target);
++should_compare_databases(Source, {remote, Target}) ->
++    should_compare_databases(Source, Target);
++should_compare_databases(Source, Target) ->
++    {timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target))}.
++
++
++populate_db(DbName, DocCount) ->
++    {ok, Db} = couch_db:open_int(DbName, []),
++    Docs = lists:foldl(
++        fun(DocIdCounter, Acc) ->
++            Id = ?io2b(["doc", ?i2l(DocIdCounter)]),
++            Value = ?io2b(["val", ?i2l(DocIdCounter)]),
++            Doc = #doc{
++                id = Id,
++                body = {[ {<<"value">>, Value} ]}
++            },
++            [Doc | Acc]
++        end,
++        [], lists:seq(1, DocCount)),
++    {ok, _} = couch_db:update_docs(Db, Docs, []),
++    ok = couch_db:close(Db).
++
++compare_dbs(Source, Target) ->
++    {ok, SourceDb} = couch_db:open_int(Source, []),
++    {ok, TargetDb} = couch_db:open_int(Target, []),
++    Fun = fun(FullDocInfo, _, Acc) ->
++        {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo),
++        {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]),
++        DocId = couch_util:get_value(<<"_id">>, Props),
++        DocTarget = case couch_db:open_doc(TargetDb, DocId) of
++            {ok, DocT} ->
++                DocT;
++            Error ->
++                erlang:error(
++                    {assertion_failed,
++                     [{module, ?MODULE}, {line, ?LINE},
++                      {reason, lists:concat(["Error opening document '",
++                                             ?b2l(DocId), "' from target: ",
++                                             couch_util:to_list(Error)])}]})
++            end,
++        DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]),
++        ?assertEqual(DocJson, DocTargetJson),
++        {ok, Acc}
++    end,
++    {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
++    ok = couch_db:close(SourceDb),
++    ok = couch_db:close(TargetDb).
++
++db_url(DbName) ->
++    iolist_to_binary([
++        "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
++        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
++        "/", DbName
++    ]).
++
++replicate(Source, Target, UseCheckpoints) ->
++    RepObject = {[
++        {<<"source">>, Source},
++        {<<"target">>, Target},
++        {<<"use_checkpoints">>, UseCheckpoints}
++    ]},
++    {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_ROLE),
++    {ok, Pid} = couch_replicator:async_replicate(Rep),
++    MonRef = erlang:monitor(process, Pid),
++    receive
++        {'DOWN', MonRef, process, Pid, _} ->
++            ok
++    end.
+diff --git a/src/couchdb/couch_key_tree.erl b/src/couchdb/couch_key_tree.erl
+index ce45ab8..58204e2 100644
+--- a/src/couchdb/couch_key_tree.erl
++++ b/src/couchdb/couch_key_tree.erl
+@@ -418,5 +418,5 @@ value_pref(Last, _) ->
+     Last.
+ 
+ 
+-% Tests moved to test/etap/06?-*.t
++% Tests moved to test/couchdb/couch_key_tree_tests.erl
+ 
+diff --git a/test/Makefile.am b/test/Makefile.am
+index 7c70a5a..f93baae 100644
+--- a/test/Makefile.am
++++ b/test/Makefile.am
+@@ -10,6 +10,6 @@
+ ## License for the specific language governing permissions and limitations under
+ ## the License.
+ 
+-SUBDIRS = bench etap javascript view_server
++SUBDIRS = bench couchdb javascript view_server
+ EXTRA_DIST = random_port.ini
+ 
+diff --git a/test/couchdb/Makefile.am b/test/couchdb/Makefile.am
+new file mode 100644
+index 0000000..1d9406c
+--- /dev/null
++++ b/test/couchdb/Makefile.am
+@@ -0,0 +1,82 @@
++## Licensed under the Apache License, Version 2.0 (the "License"); you may not
++## use this file except in compliance with the License. You may obtain a copy of
++## the License at
++##
++##   http://www.apache.org/licenses/LICENSE-2.0
++##
++## Unless required by applicable law or agreed to in writing, software
++## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++## License for the specific language governing permissions and limitations under
++## the License.
++
++SUBDIRS = fixtures
++
++noinst_SCRIPTS = run
++
++eunit_files = \
++    couch_auth_cache_tests.erl \
++    couch_btree_tests.erl \
++    couch_changes_tests.erl \
++    couch_config_tests.erl \
++    couch_db_tests.erl \
++    couch_doc_json_tests.erl \
++    couch_file_tests.erl \
++    couch_key_tree_tests.erl \
++    couch_passwords_tests.erl \
++    couch_ref_counter_tests.erl \
++    couch_stream_tests.erl \
++    couch_stats_tests.erl \
++    couch_task_status_tests.erl \
++    couch_util_tests.erl \
++    couch_uuids_tests.erl \
++    couch_work_queue_tests.erl \
++    couchdb_attachments_tests.erl \
++    couchdb_compaction_daemon.erl \
++    couchdb_cors_tests.erl \
++    couchdb_file_compression_tests.erl \
++    couchdb_http_proxy_tests.erl \
++    couchdb_modules_load_tests.erl \
++    couchdb_os_daemons_tests.erl \
++    couchdb_os_proc_pool.erl \
++    couchdb_update_conflicts_tests.erl \
++    couchdb_vhosts_tests.erl \
++    couchdb_views_tests.erl \
++    json_stream_parse_tests.erl \
++    test_request.erl \
++    test_web.erl \
++    include/couch_eunit.hrl
++
++fixture_files = \
++    fixtures/couch_config_tests_1.ini \
++    fixtures/couch_config_tests_2.ini \
++    fixtures/couch_stats_aggregates.cfg \
++    fixtures/couch_stats_aggregates.ini \
++    fixtures/os_daemon_looper.escript \
++    fixtures/os_daemon_configer.escript \
++    fixtures/os_daemon_bad_perm.sh \
++    fixtures/os_daemon_can_reboot.sh \
++    fixtures/os_daemon_die_on_boot.sh \
++    fixtures/os_daemon_die_quickly.sh \
++    fixtures/logo.png \
++    fixtures/3b835456c235b1827e012e25666152f3.view \
++    fixtures/test.couch
++
++EXTRA_DIST = \
++    run.in \
++    eunit.ini \
++    $(eunit_files) \
++    $(fixture_files)
++
++all:
++	@mkdir -p ebin/
++	@mkdir -p temp/
++	$(ERLC) -Wall -I$(top_srcdir)/src -I$(top_srcdir)/test/couchdb/include \
++            -o $(top_builddir)/test/couchdb/ebin/ $(ERLC_FLAGS) ${TEST} \
++            $(top_srcdir)/test/couchdb/test_request.erl \
++            $(top_srcdir)/test/couchdb/test_web.erl
++	chmod +x run
++
++clean-local:
++	rm -rf ebin/
++	rm -rf temp/
+diff --git a/test/couchdb/couch_auth_cache_tests.erl b/test/couchdb/couch_auth_cache_tests.erl
+new file mode 100644
+index 0000000..3b2321c
+--- /dev/null
++++ b/test/couchdb/couch_auth_cache_tests.erl
+@@ -0,0 +1,238 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_auth_cache_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
++-define(SALT, <<"SALT">>).
++-define(TIMEOUT, 1000).
++
++
++start() ->
++    {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    Pid.
++
++stop(Pid) ->
++    erlang:monitor(process, Pid),
++    couch_server_sup:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout, server_stop})
++    end.
++
++setup() ->
++    DbName = ?tempdb(),
++    couch_config:set("couch_httpd_auth", "authentication_db",
++                     ?b2l(DbName), false),
++    DbName.
++
++teardown(DbName) ->
++    ok = couch_server:delete(DbName, [?ADMIN_USER]),
++    ok.
++
++
++couch_auth_cache_test_() ->
++    {
++        "CouchDB auth cache tests",
++        {
++            setup,
++            fun start/0, fun stop/1,
++            {
++                foreach,
++                fun setup/0, fun teardown/1,
++                [
++                    fun should_get_nil_on_missed_cache/1,
++                    fun should_get_right_password_hash/1,
++                    fun should_ensure_doc_hash_equals_cached_one/1,
++                    fun should_update_password/1,
++                    fun should_cleanup_cache_after_userdoc_deletion/1,
++                    fun should_restore_cache_after_userdoc_recreation/1,
++                    fun should_drop_cache_on_auth_db_change/1,
++                    fun should_restore_cache_on_auth_db_change/1,
++                    fun should_recover_cache_after_shutdown/1
++                ]
++            }
++        }
++    }.
++
++
++should_get_nil_on_missed_cache(_) ->
++    ?_assertEqual(nil, couch_auth_cache:get_user_creds("joe")).
++
++should_get_right_password_hash(DbName) ->
++    ?_test(begin
++        PasswordHash = hash_password("pass1"),
++        {ok, _} = update_user_doc(DbName, "joe", "pass1"),
++        Creds = couch_auth_cache:get_user_creds("joe"),
++        ?assertEqual(PasswordHash,
++                      couch_util:get_value(<<"password_sha">>, Creds))
++    end).
++
++should_ensure_doc_hash_equals_cached_one(DbName) ->
++    ?_test(begin
++        {ok, _} = update_user_doc(DbName, "joe", "pass1"),
++        Creds = couch_auth_cache:get_user_creds("joe"),
++
++        CachedHash = couch_util:get_value(<<"password_sha">>, Creds),
++        StoredHash = get_user_doc_password_sha(DbName, "joe"),
++        ?assertEqual(StoredHash, CachedHash)
++    end).
++
++should_update_password(DbName) ->
++    ?_test(begin
++        PasswordHash = hash_password("pass2"),
++        {ok, Rev} = update_user_doc(DbName, "joe", "pass1"),
++        {ok, _} = update_user_doc(DbName, "joe", "pass2", Rev),
++        Creds = couch_auth_cache:get_user_creds("joe"),
++        ?assertEqual(PasswordHash,
++                      couch_util:get_value(<<"password_sha">>, Creds))
++    end).
++
++should_cleanup_cache_after_userdoc_deletion(DbName) ->
++    ?_test(begin
++        {ok, _} = update_user_doc(DbName, "joe", "pass1"),
++        delete_user_doc(DbName, "joe"),
++        ?assertEqual(nil, couch_auth_cache:get_user_creds("joe"))
++    end).
++
++should_restore_cache_after_userdoc_recreation(DbName) ->
++    ?_test(begin
++        PasswordHash = hash_password("pass5"),
++        {ok, _} = update_user_doc(DbName, "joe", "pass1"),
++        delete_user_doc(DbName, "joe"),
++        ?assertEqual(nil, couch_auth_cache:get_user_creds("joe")),
++
++        {ok, _} = update_user_doc(DbName, "joe", "pass5"),
++        Creds = couch_auth_cache:get_user_creds("joe"),
++
++        ?assertEqual(PasswordHash,
++                      couch_util:get_value(<<"password_sha">>, Creds))
++    end).
++
++should_drop_cache_on_auth_db_change(DbName) ->
++    ?_test(begin
++        {ok, _} = update_user_doc(DbName, "joe", "pass1"),
++        full_commit(DbName),
++        couch_config:set("couch_httpd_auth", "authentication_db",
++                         ?b2l(?tempdb()), false),
++        ?assertEqual(nil, couch_auth_cache:get_user_creds("joe"))
++    end).
++
++should_restore_cache_on_auth_db_change(DbName) ->
++    ?_test(begin
++        PasswordHash = hash_password("pass1"),
++        {ok, _} = update_user_doc(DbName, "joe", "pass1"),
++        Creds = couch_auth_cache:get_user_creds("joe"),
++        full_commit(DbName),
++
++        DbName1 = ?tempdb(),
++        couch_config:set("couch_httpd_auth", "authentication_db",
++                         ?b2l(DbName1), false),
++
++        {ok, _} = update_user_doc(DbName1, "joe", "pass5"),
++        full_commit(DbName1),
++
++        couch_config:set("couch_httpd_auth", "authentication_db",
++                         ?b2l(DbName), false),
++
++        Creds = couch_auth_cache:get_user_creds("joe"),
++        ?assertEqual(PasswordHash,
++                      couch_util:get_value(<<"password_sha">>, Creds))
++    end).
++
++should_recover_cache_after_shutdown(DbName) ->
++    ?_test(begin
++        PasswordHash = hash_password("pass2"),
++        {ok, Rev0} = update_user_doc(DbName, "joe", "pass1"),
++        {ok, Rev1} = update_user_doc(DbName, "joe", "pass2", Rev0),
++        full_commit(DbName),
++        shutdown_db(DbName),
++        {ok, Rev1} = get_doc_rev(DbName, "joe"),
++        ?assertEqual(PasswordHash, get_user_doc_password_sha(DbName, "joe"))
++    end).
++
++
++update_user_doc(DbName, UserName, Password) ->
++    update_user_doc(DbName, UserName, Password, nil).
++
++update_user_doc(DbName, UserName, Password, Rev) ->
++    User = iolist_to_binary(UserName),
++    Doc = couch_doc:from_json_obj({[
++        {<<"_id">>, <<"org.couchdb.user:", User/binary>>},
++        {<<"name">>, User},
++        {<<"type">>, <<"user">>},
++        {<<"salt">>, ?SALT},
++        {<<"password_sha">>, hash_password(Password)},
++        {<<"roles">>, []}
++    ] ++ case Rev of
++            nil -> [];
++            _ ->   [{<<"_rev">>, Rev}]
++         end
++    }),
++    {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_USER]),
++    {ok, NewRev} = couch_db:update_doc(AuthDb, Doc, []),
++    ok = couch_db:close(AuthDb),
++    {ok, couch_doc:rev_to_str(NewRev)}.
++
++hash_password(Password) ->
++    ?l2b(couch_util:to_hex(crypto:sha(iolist_to_binary([Password, ?SALT])))).
++
++shutdown_db(DbName) ->
++    {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_USER]),
++    ok = couch_db:close(AuthDb),
++    couch_util:shutdown_sync(AuthDb#db.main_pid),
++    ok = timer:sleep(1000).
++
++get_doc_rev(DbName, UserName) ->
++    DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]),
++    {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_USER]),
++    UpdateRev =
++    case couch_db:open_doc(AuthDb, DocId, []) of
++    {ok, Doc} ->
++        {Props} = couch_doc:to_json_obj(Doc, []),
++        couch_util:get_value(<<"_rev">>, Props);
++    {not_found, missing} ->
++        nil
++    end,
++    ok = couch_db:close(AuthDb),
++    {ok, UpdateRev}.
++
++get_user_doc_password_sha(DbName, UserName) ->
++    DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]),
++    {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_USER]),
++    {ok, Doc} = couch_db:open_doc(AuthDb, DocId, []),
++    ok = couch_db:close(AuthDb),
++    {Props} = couch_doc:to_json_obj(Doc, []),
++    couch_util:get_value(<<"password_sha">>, Props).
++
++delete_user_doc(DbName, UserName) ->
++    DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]),
++    {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_USER]),
++    {ok, Doc} = couch_db:open_doc(AuthDb, DocId, []),
++    {Props} = couch_doc:to_json_obj(Doc, []),
++    DeletedDoc = couch_doc:from_json_obj({[
++        {<<"_id">>, DocId},
++        {<<"_rev">>, couch_util:get_value(<<"_rev">>, Props)},
++        {<<"_deleted">>, true}
++    ]}),
++    {ok, _} = couch_db:update_doc(AuthDb, DeletedDoc, []),
++    ok = couch_db:close(AuthDb).
++
++full_commit(DbName) ->
++    {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_USER]),
++    {ok, _} = couch_db:ensure_full_commit(AuthDb),
++    ok = couch_db:close(AuthDb).
+diff --git a/test/couchdb/couch_btree_tests.erl b/test/couchdb/couch_btree_tests.erl
+new file mode 100644
+index 0000000..911640f
+--- /dev/null
++++ b/test/couchdb/couch_btree_tests.erl
+@@ -0,0 +1,551 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_btree_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(ROWS, 1000).
++
++
++setup() ->
++    {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
++    {ok, Btree} = couch_btree:open(nil, Fd, [{compression, none},
++                                             {reduce, fun reduce_fun/2}]),
++    {Fd, Btree}.
++
++setup_kvs(_) ->
++    setup().
++
++setup_red() ->
++    {_, EvenOddKVs} = lists:foldl(
++        fun(Idx, {Key, Acc}) ->
++            case Key of
++                "even" -> {"odd", [{{Key, Idx}, 1} | Acc]};
++                _ -> {"even", [{{Key, Idx}, 1} | Acc]}
++            end
++        end, {"odd", []}, lists:seq(1, ?ROWS)),
++    {Fd, Btree} = setup(),
++    {ok, Btree1} = couch_btree:add_remove(Btree, EvenOddKVs, []),
++    {Fd, Btree1}.
++setup_red(_) ->
++    setup_red().
++
++teardown(Fd) when is_pid(Fd) ->
++    ok = couch_file:close(Fd);
++teardown({Fd, _}) ->
++    teardown(Fd).
++teardown(_, {Fd, _}) ->
++    teardown(Fd).
++
++
++kvs_test_funs() ->
++    [
++        fun should_set_fd_correctly/2,
++        fun should_set_root_correctly/2,
++        fun should_create_zero_sized_btree/2,
++        fun should_set_reduce_option/2,
++        fun should_fold_over_empty_btree/2,
++        fun should_add_all_keys/2,
++        fun should_continuously_add_new_kv/2,
++        fun should_continuously_remove_keys/2,
++        fun should_insert_keys_in_reversed_order/2,
++        fun should_add_every_odd_key_remove_every_even/2,
++        fun should_add_every_even_key_remove_every_old/2
++    ].
++
++red_test_funs() ->
++    [
++        fun should_reduce_whole_range/2,
++        fun should_reduce_first_half/2,
++        fun should_reduce_second_half/2
++    ].
++
++
++btree_open_test_() ->
++    {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
++    {ok, Btree} = couch_btree:open(nil, Fd, [{compression, none}]),
++    {
++        "Ensure that created btree is really a btree record",
++        ?_assert(is_record(Btree, btree))
++    }.
++
++sorted_kvs_test_() ->
++    Funs = kvs_test_funs(),
++    Sorted = [{Seq, random:uniform()} || Seq <- lists:seq(1, ?ROWS)],
++    {
++        "BTree with sorted keys",
++        {
++            foreachx,
++            fun setup_kvs/1, fun teardown/2,
++            [{Sorted, Fun} || Fun <- Funs]
++        }
++    }.
++
++rsorted_kvs_test_() ->
++    Sorted = [{Seq, random:uniform()} || Seq <- lists:seq(1, ?ROWS)],
++    Funs = kvs_test_funs(),
++    Reversed = Sorted,
++    {
++        "BTree with backward sorted keys",
++        {
++            foreachx,
++            fun setup_kvs/1, fun teardown/2,
++            [{Reversed, Fun} || Fun <- Funs]
++        }
++    }.
++
++shuffled_kvs_test_() ->
++    Funs = kvs_test_funs(),
++    Sorted = [{Seq, random:uniform()} || Seq <- lists:seq(1, ?ROWS)],
++    Shuffled = shuffle(Sorted),
++    {
++        "BTree with shuffled keys",
++        {
++            foreachx,
++            fun setup_kvs/1, fun teardown/2,
++            [{Shuffled, Fun} || Fun <- Funs]
++        }
++    }.
++
++reductions_test_() ->
++    {
++        "BTree reductions",
++        [
++            {
++                "Common tests",
++                {
++                    foreach,
++                    fun setup_red/0, fun teardown/1,
++                    [
++                        fun should_reduce_without_specified_direction/1,
++                        fun should_reduce_forward/1,
++                        fun should_reduce_backward/1
++                    ]
++                }
++            },
++            {
++                "Range requests",
++                [
++                    {
++                        "Forward direction",
++                        {
++                            foreachx,
++                            fun setup_red/1, fun teardown/2,
++                            [{fwd, F} || F <- red_test_funs()]
++                        }
++                    },
++                    {
++                        "Backward direction",
++                        {
++                            foreachx,
++                            fun setup_red/1, fun teardown/2,
++                            [{rev, F} || F <- red_test_funs()]
++                        }
++                    }
++                ]
++            }
++        ]
++    }.
++
++
++should_set_fd_correctly(_, {Fd, Btree}) ->
++    ?_assertMatch(Fd, Btree#btree.fd).
++
++should_set_root_correctly(_, {_, Btree}) ->
++    ?_assertMatch(nil, Btree#btree.root).
++
++should_create_zero_sized_btree(_, {_, Btree}) ->
++    ?_assertMatch(0, couch_btree:size(Btree)).
++
++should_set_reduce_option(_, {_, Btree}) ->
++    ReduceFun = fun reduce_fun/2,
++    Btree1 = couch_btree:set_options(Btree, [{reduce, ReduceFun}]),
++    ?_assertMatch(ReduceFun, Btree1#btree.reduce).
++
++should_fold_over_empty_btree(_, {_, Btree}) ->
++    {ok, _, EmptyRes} = couch_btree:foldl(Btree, fun(_, X) -> {ok, X+1} end, 0),
++    ?_assertEqual(EmptyRes, 0).
++
++should_add_all_keys(KeyValues, {Fd, Btree}) ->
++    {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
++    [
++        should_return_complete_btree_on_adding_all_keys(KeyValues, Btree1),
++        should_have_non_zero_size(Btree1),
++        should_have_lesser_size_than_file(Fd, Btree1),
++        should_keep_root_pointer_to_kp_node(Fd, Btree1),
++        should_remove_all_keys(KeyValues, Btree1)
++    ].
++
++should_return_complete_btree_on_adding_all_keys(KeyValues, Btree) ->
++    ?_assert(test_btree(Btree, KeyValues)).
++
++should_have_non_zero_size(Btree) ->
++    ?_assert(couch_btree:size(Btree) > 0).
++
++should_have_lesser_size_than_file(Fd, Btree) ->
++    ?_assert((couch_btree:size(Btree) =< couch_file:bytes(Fd))).
++
++should_keep_root_pointer_to_kp_node(Fd, Btree) ->
++    ?_assertMatch({ok, {kp_node, _}},
++                  couch_file:pread_term(Fd, element(1, Btree#btree.root))).
++
++should_remove_all_keys(KeyValues, Btree) ->
++    Keys = keys(KeyValues),
++    {ok, Btree1} = couch_btree:add_remove(Btree, [], Keys),
++    {
++        "Should remove all the keys",
++        [
++            should_produce_valid_btree(Btree1, []),
++            should_be_empty(Btree1)
++        ]
++    }.
++
++should_continuously_add_new_kv(KeyValues, {_, Btree}) ->
++    {Btree1, _} = lists:foldl(
++        fun(KV, {BtAcc, PrevSize}) ->
++            {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
++            ?assert(couch_btree:size(BtAcc2) > PrevSize),
++            {BtAcc2, couch_btree:size(BtAcc2)}
++        end, {Btree, couch_btree:size(Btree)}, KeyValues),
++    {
++        "Should continuously add key-values to btree",
++        [
++            should_produce_valid_btree(Btree1, KeyValues),
++            should_not_be_empty(Btree1)
++        ]
++    }.
++
++should_continuously_remove_keys(KeyValues, {_, Btree}) ->
++    {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
++    {Btree2, _} = lists:foldl(
++        fun({K, _}, {BtAcc, PrevSize}) ->
++            {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]),
++            ?assert(couch_btree:size(BtAcc2) < PrevSize),
++            {BtAcc2, couch_btree:size(BtAcc2)}
++        end, {Btree1, couch_btree:size(Btree1)}, KeyValues),
++    {
++        "Should continuously remove keys from btree",
++        [
++            should_produce_valid_btree(Btree2, []),
++            should_be_empty(Btree2)
++        ]
++    }.
++
++should_insert_keys_in_reversed_order(KeyValues, {_, Btree}) ->
++    KeyValuesRev = lists:reverse(KeyValues),
++    {Btree1, _} = lists:foldl(
++        fun(KV, {BtAcc, PrevSize}) ->
++            {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
++            ?assert(couch_btree:size(BtAcc2) > PrevSize),
++            {BtAcc2, couch_btree:size(BtAcc2)}
++        end, {Btree, couch_btree:size(Btree)}, KeyValuesRev),
++    should_produce_valid_btree(Btree1, KeyValues).
++
++should_add_every_odd_key_remove_every_even(KeyValues, {_, Btree}) ->
++    {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
++    {_, Rem2Keys0, Rem2Keys1} = lists:foldl(fun(X, {Count, Left, Right}) ->
++        case Count rem 2 == 0 of
++            true -> {Count + 1, [X | Left], Right};
++            false -> {Count + 1, Left, [X | Right]}
++        end
++                                            end, {0, [], []}, KeyValues),
++    ?_assert(test_add_remove(Btree1, Rem2Keys0, Rem2Keys1)).
++
++should_add_every_even_key_remove_every_old(KeyValues, {_, Btree}) ->
++    {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
++    {_, Rem2Keys0, Rem2Keys1} = lists:foldl(fun(X, {Count, Left, Right}) ->
++        case Count rem 2 == 0 of
++            true -> {Count + 1, [X | Left], Right};
++            false -> {Count + 1, Left, [X | Right]}
++        end
++                                            end, {0, [], []}, KeyValues),
++    ?_assert(test_add_remove(Btree1, Rem2Keys1, Rem2Keys0)).
++
++
++should_reduce_without_specified_direction({_, Btree}) ->
++    ?_assertMatch(
++        {ok, [{{"odd", _}, ?ROWS div 2}, {{"even", _}, ?ROWS div 2}]},
++        fold_reduce(Btree, [])).
++
++should_reduce_forward({_, Btree}) ->
++    ?_assertMatch(
++        {ok, [{{"odd", _}, ?ROWS div 2}, {{"even", _}, ?ROWS div 2}]},
++        fold_reduce(Btree, [{dir, fwd}])).
++
++should_reduce_backward({_, Btree}) ->
++    ?_assertMatch(
++        {ok, [{{"even", _}, ?ROWS div 2}, {{"odd", _}, ?ROWS div 2}]},
++        fold_reduce(Btree, [{dir, rev}])).
++
++should_reduce_whole_range(fwd, {_, Btree}) ->
++    {SK, EK} = {{"even", 0}, {"odd", ?ROWS - 1}},
++    [
++        {
++            "include endkey",
++            ?_assertMatch(
++                {ok, [{{"odd", 1}, ?ROWS div 2},
++                      {{"even", 2}, ?ROWS div 2}]},
++                fold_reduce(Btree, [{dir, fwd},
++                                    {start_key, SK},
++                                    {end_key, EK}]))
++        },
++        {
++            "exclude endkey",
++            ?_assertMatch(
++                {ok, [{{"odd", 1}, (?ROWS div 2) - 1},
++                      {{"even", 2}, ?ROWS div 2}]},
++                fold_reduce(Btree, [{dir, fwd},
++                                    {start_key, SK},
++                                    {end_key_gt, EK}]))
++        }
++    ];
++should_reduce_whole_range(rev, {_, Btree}) ->
++    {SK, EK} = {{"odd", ?ROWS - 1}, {"even", 2}},
++    [
++        {
++            "include endkey",
++            ?_assertMatch(
++                {ok, [{{"even", ?ROWS}, ?ROWS div 2},
++                      {{"odd", ?ROWS - 1}, ?ROWS div 2}]},
++                fold_reduce(Btree, [{dir, rev},
++                                    {start_key, SK},
++                                    {end_key, EK}]))
++        },
++        {
++            "exclude endkey",
++            ?_assertMatch(
++                {ok, [{{"even", ?ROWS}, (?ROWS div 2) - 1},
++                      {{"odd", ?ROWS - 1}, ?ROWS div 2}]},
++                fold_reduce(Btree, [{dir, rev},
++                                    {start_key, SK},
++                                    {end_key_gt, EK}]))
++        }
++    ].
++
++should_reduce_first_half(fwd, {_, Btree}) ->
++    {SK, EK} = {{"even", 0}, {"odd", (?ROWS div 2) - 1}},
++    [
++        {
++            "include endkey",
++            ?_assertMatch(
++                {ok, [{{"odd", 1}, ?ROWS div 4},
++                      {{"even", 2}, ?ROWS div 2}]},
++                fold_reduce(Btree, [{dir, fwd},
++                                    {start_key, SK}, {end_key, EK}]))
++        },
++        {
++            "exclude endkey",
++            ?_assertMatch(
++                {ok, [{{"odd", 1}, (?ROWS div 4) - 1},
++                      {{"even", 2}, ?ROWS div 2}]},
++                fold_reduce(Btree, [{dir, fwd},
++                                    {start_key, SK},
++                                    {end_key_gt, EK}]))
++        }
++    ];
++should_reduce_first_half(rev, {_, Btree}) ->
++    {SK, EK} = {{"odd", ?ROWS - 1}, {"even", ?ROWS div 2}},
++    [
++        {
++            "include endkey",
++            ?_assertMatch(
++                {ok, [{{"even", ?ROWS}, (?ROWS div 4) + 1},
++                      {{"odd", ?ROWS - 1}, ?ROWS div 2}]},
++                fold_reduce(Btree, [{dir, rev},
++                                    {start_key, SK},
++                                    {end_key, EK}]))
++        },
++        {
++            "exclude endkey",
++            ?_assertMatch(
++                {ok, [{{"even", ?ROWS}, ?ROWS div 4},
++                      {{"odd", ?ROWS - 1}, ?ROWS div 2}]},
++                fold_reduce(Btree, [{dir, rev},
++                                    {start_key, SK},
++                                    {end_key_gt, EK}]))
++        }
++    ].
++
++should_reduce_second_half(fwd, {_, Btree}) ->
++    {SK, EK} = {{"even", ?ROWS div 2}, {"odd", ?ROWS - 1}},
++    [
++        {
++            "include endkey",
++            ?_assertMatch(
++                {ok, [{{"odd", 1}, ?ROWS div 2},
++                      {{"even", ?ROWS div 2}, (?ROWS div 4) + 1}]},
++                fold_reduce(Btree, [{dir, fwd},
++                                    {start_key, SK},
++                                    {end_key, EK}]))
++        },
++        {
++            "exclude endkey",
++            ?_assertMatch(
++                {ok, [{{"odd", 1}, (?ROWS div 2) - 1},
++                      {{"even", ?ROWS div 2}, (?ROWS div 4) + 1}]},
++                fold_reduce(Btree, [{dir, fwd},
++                                    {start_key, SK},
++                                    {end_key_gt, EK}]))
++        }
++    ];
++should_reduce_second_half(rev, {_, Btree}) ->
++    {SK, EK} = {{"odd", (?ROWS div 2) + 1}, {"even", 2}},
++    [
++        {
++            "include endkey",
++            ?_assertMatch(
++                {ok, [{{"even", ?ROWS}, ?ROWS div 2},
++                      {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1}]},
++                fold_reduce(Btree, [{dir, rev},
++                                    {start_key, SK},
++                                    {end_key, EK}]))
++        },
++        {
++            "exclude endkey",
++            ?_assertMatch(
++                {ok, [{{"even", ?ROWS}, (?ROWS div 2) - 1},
++                      {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1}]},
++                fold_reduce(Btree, [{dir, rev},
++                                    {start_key, SK},
++                                    {end_key_gt, EK}]))
++        }
++    ].
++
++should_produce_valid_btree(Btree, KeyValues) ->
++    ?_assert(test_btree(Btree, KeyValues)).
++
++should_be_empty(Btree) ->
++    ?_assertEqual(couch_btree:size(Btree), 0).
++
++should_not_be_empty(Btree) ->
++    ?_assert(couch_btree:size(Btree) > 0).
++
++fold_reduce(Btree, Opts) ->
++    GroupFun = fun({K1, _}, {K2, _}) ->
++        K1 == K2
++    end,
++    FoldFun = fun(GroupedKey, Unreduced, Acc) ->
++        {ok, [{GroupedKey, couch_btree:final_reduce(Btree, Unreduced)} | Acc]}
++    end,
++    couch_btree:fold_reduce(Btree, FoldFun, [],
++                            [{key_group_fun, GroupFun}] ++ Opts).
++
++
++keys(KVs) ->
++    [K || {K, _} <- KVs].
++
++reduce_fun(reduce, KVs) ->
++    length(KVs);
++reduce_fun(rereduce, Reds) ->
++    lists:sum(Reds).
++
++
++shuffle(List) ->
++    randomize(round(math:log(length(List)) + 0.5), List).
++
++randomize(1, List) ->
++    randomize(List);
++randomize(T, List) ->
++    lists:foldl(
++        fun(_E, Acc) ->
++            randomize(Acc)
++        end, randomize(List), lists:seq(1, (T - 1))).
++
++randomize(List) ->
++    D = lists:map(fun(A) -> {random:uniform(), A} end, List),
++    {_, D1} = lists:unzip(lists:keysort(1, D)),
++    D1.
++
++test_btree(Btree, KeyValues) ->
++    ok = test_key_access(Btree, KeyValues),
++    ok = test_lookup_access(Btree, KeyValues),
++    ok = test_final_reductions(Btree, KeyValues),
++    ok = test_traversal_callbacks(Btree, KeyValues),
++    true.
++
++test_add_remove(Btree, OutKeyValues, RemainingKeyValues) ->
++    Btree2 = lists:foldl(
++        fun({K, _}, BtAcc) ->
++            {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]),
++            BtAcc2
++        end, Btree, OutKeyValues),
++    true = test_btree(Btree2, RemainingKeyValues),
++
++    Btree3 = lists:foldl(
++        fun(KV, BtAcc) ->
++            {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
++            BtAcc2
++        end, Btree2, OutKeyValues),
++    true = test_btree(Btree3, OutKeyValues ++ RemainingKeyValues).
++
++test_key_access(Btree, List) ->
++    FoldFun = fun(Element, {[HAcc|TAcc], Count}) ->
++        case Element == HAcc of
++            true -> {ok, {TAcc, Count + 1}};
++            _ -> {ok, {TAcc, Count + 1}}
++        end
++    end,
++    Length = length(List),
++    Sorted = lists:sort(List),
++    {ok, _, {[], Length}} = couch_btree:foldl(Btree, FoldFun, {Sorted, 0}),
++    {ok, _, {[], Length}} = couch_btree:fold(Btree, FoldFun,
++                                             {Sorted, 0}, [{dir, rev}]),
++    ok.
++
++test_lookup_access(Btree, KeyValues) ->
++    FoldFun = fun({Key, Value}, {Key, Value}) -> {stop, true} end,
++    lists:foreach(
++        fun({Key, Value}) ->
++            [{ok, {Key, Value}}] = couch_btree:lookup(Btree, [Key]),
++            {ok, _, true} = couch_btree:foldl(Btree, FoldFun,
++                                              {Key, Value}, [{start_key, Key}])
++        end, KeyValues).
++
++test_final_reductions(Btree, KeyValues) ->
++    KVLen = length(KeyValues),
++    FoldLFun = fun(_X, LeadingReds, Acc) ->
++        CountToStart = KVLen div 3 + Acc,
++        CountToStart = couch_btree:final_reduce(Btree, LeadingReds),
++        {ok, Acc + 1}
++    end,
++    FoldRFun = fun(_X, LeadingReds, Acc) ->
++        CountToEnd = KVLen - KVLen div 3 + Acc,
++        CountToEnd = couch_btree:final_reduce(Btree, LeadingReds),
++        {ok, Acc + 1}
++    end,
++    {LStartKey, _} = case KVLen of
++        0 -> {nil, nil};
++        _ -> lists:nth(KVLen div 3 + 1, lists:sort(KeyValues))
++    end,
++    {RStartKey, _} = case KVLen of
++        0 -> {nil, nil};
++        _ -> lists:nth(KVLen div 3, lists:sort(KeyValues))
++    end,
++    {ok, _, FoldLRed} = couch_btree:foldl(Btree, FoldLFun, 0,
++                                          [{start_key, LStartKey}]),
++    {ok, _, FoldRRed} = couch_btree:fold(Btree, FoldRFun, 0,
++                                         [{dir, rev}, {start_key, RStartKey}]),
++    KVLen = FoldLRed + FoldRRed,
++    ok.
++
++test_traversal_callbacks(Btree, _KeyValues) ->
++    FoldFun = fun
++        (visit, _GroupedKey, _Unreduced, Acc) ->
++            {ok, Acc andalso false};
++        (traverse, _LK, _Red, Acc) ->
++            {skip, Acc andalso true}
++    end,
++    % With 250 items the root is a kp. Always skipping should reduce to true.
++    {ok, _, true} = couch_btree:fold(Btree, FoldFun, true, [{dir, fwd}]),
++    ok.
+diff --git a/test/couchdb/couch_changes_tests.erl b/test/couchdb/couch_changes_tests.erl
+new file mode 100644
+index 0000000..a129ba2
+--- /dev/null
++++ b/test/couchdb/couch_changes_tests.erl
+@@ -0,0 +1,612 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_changes_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(ADMIN_USER, {user_ctx, #user_ctx{roles = [<<"_admin">>]}}).
++-define(TIMEOUT, 3000).
++-define(TEST_TIMEOUT, 10000).
++
++-record(row, {
++    id,
++    seq,
++    deleted = false
++}).
++
++
++start() ->
++    {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    Pid.
++
++stop(Pid) ->
++    erlang:monitor(process, Pid),
++    couch_server_sup:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout, server_stop})
++    end.
++
++setup() ->
++    DbName = ?tempdb(),
++    {ok, Db} = create_db(DbName),
++    Revs = [R || {ok, R} <- [
++        save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}),
++        save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}),
++        save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}),
++        save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}),
++        save_doc(Db, {[{<<"_id">>, <<"doc5">>}]})
++    ]],
++    Rev = lists:nth(3, Revs),
++    {ok, Rev1} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}, {<<"_rev">>, Rev}]}),
++    Revs1 = Revs ++ [Rev1],
++    Revs2 = Revs1 ++ [R || {ok, R} <- [
++        save_doc(Db, {[{<<"_id">>, <<"doc6">>}]}),
++        save_doc(Db, {[{<<"_id">>, <<"_design/foo">>}]}),
++        save_doc(Db, {[{<<"_id">>, <<"doc7">>}]}),
++        save_doc(Db, {[{<<"_id">>, <<"doc8">>}]})
++    ]],
++    {DbName, list_to_tuple(Revs2)}.
++
++teardown({DbName, _}) ->
++    delete_db(DbName),
++    ok.
++
++
++changes_test_() ->
++    {
++        "Changes feeed",
++        {
++            setup,
++            fun start/0, fun stop/1,
++            [
++                filter_by_doc_id(),
++                filter_by_design(),
++                continuous_feed(),
++                filter_by_custom_function()
++            ]
++        }
++    }.
++
++filter_by_doc_id() ->
++    {
++        "Filter _doc_id",
++        {
++            foreach,
++            fun setup/0, fun teardown/1,
++            [
++                fun should_filter_by_specific_doc_ids/1,
++                fun should_filter_by_specific_doc_ids_descending/1,
++                fun should_filter_by_specific_doc_ids_with_since/1,
++                fun should_filter_by_specific_doc_ids_no_result/1,
++                fun should_handle_deleted_docs/1
++            ]
++        }
++    }.
++
++filter_by_design() ->
++    {
++        "Filter _design",
++        {
++            foreach,
++            fun setup/0, fun teardown/1,
++            [
++                fun should_emit_only_design_documents/1
++            ]
++        }
++    }.
++
++filter_by_custom_function() ->
++    {
++        "Filter function",
++        {
++            foreach,
++            fun setup/0, fun teardown/1,
++            [
++                fun should_receive_heartbeats/1
++            ]
++        }
++    }.
++
++continuous_feed() ->
++    {
++        "Continuous Feed",
++        {
++            foreach,
++            fun setup/0, fun teardown/1,
++            [
++                fun should_filter_continuous_feed_by_specific_doc_ids/1
++            ]
++        }
++    }.
++
++
++should_filter_by_specific_doc_ids({DbName, _}) ->
++    ?_test(
++        begin
++            ChangesArgs = #changes_args{
++                filter = "_doc_ids"
++            },
++            DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
++            Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
++            Consumer = spawn_consumer(DbName, ChangesArgs, Req),
++
++            {Rows, LastSeq} = wait_finished(Consumer),
++            {ok, Db} = couch_db:open_int(DbName, []),
++            UpSeq = couch_db:get_update_seq(Db),
++            couch_db:close(Db),
++            stop_consumer(Consumer),
++
++            ?assertEqual(2, length(Rows)),
++            [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows,
++            ?assertEqual(<<"doc4">>, Id1),
++            ?assertEqual(4, Seq1),
++            ?assertEqual(<<"doc3">>, Id2),
++            ?assertEqual(6, Seq2),
++            ?assertEqual(UpSeq, LastSeq)
++        end).
++
++should_filter_by_specific_doc_ids_descending({DbName, _}) ->
++    ?_test(
++        begin
++            ChangesArgs = #changes_args{
++                filter = "_doc_ids",
++                dir = rev
++            },
++            DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
++            Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
++            Consumer = spawn_consumer(DbName, ChangesArgs, Req),
++
++            {Rows, LastSeq} = wait_finished(Consumer),
++            {ok, Db} = couch_db:open_int(DbName, []),
++            couch_db:close(Db),
++            stop_consumer(Consumer),
++
++            ?assertEqual(2, length(Rows)),
++            [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows,
++            ?assertEqual(<<"doc3">>, Id1),
++            ?assertEqual(6, Seq1),
++            ?assertEqual(<<"doc4">>, Id2),
++            ?assertEqual(4, Seq2),
++            ?assertEqual(4, LastSeq)
++        end).
++
++should_filter_by_specific_doc_ids_with_since({DbName, _}) ->
++    ?_test(
++        begin
++            ChangesArgs = #changes_args{
++                filter = "_doc_ids",
++                since = 5
++            },
++            DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
++            Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
++            Consumer = spawn_consumer(DbName, ChangesArgs, Req),
++
++            {Rows, LastSeq} = wait_finished(Consumer),
++            {ok, Db} = couch_db:open_int(DbName, []),
++            UpSeq = couch_db:get_update_seq(Db),
++            couch_db:close(Db),
++            stop_consumer(Consumer),
++
++            ?assertEqual(1, length(Rows)),
++            [#row{seq = Seq1, id = Id1}] = Rows,
++            ?assertEqual(<<"doc3">>, Id1),
++            ?assertEqual(6, Seq1),
++            ?assertEqual(UpSeq, LastSeq)
++        end).
++
++should_filter_by_specific_doc_ids_no_result({DbName, _}) ->
++    ?_test(
++        begin
++            ChangesArgs = #changes_args{
++                filter = "_doc_ids",
++                since = 6
++            },
++            DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
++            Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
++            Consumer = spawn_consumer(DbName, ChangesArgs, Req),
++
++            {Rows, LastSeq} = wait_finished(Consumer),
++            {ok, Db} = couch_db:open_int(DbName, []),
++            UpSeq = couch_db:get_update_seq(Db),
++            couch_db:close(Db),
++            stop_consumer(Consumer),
++
++            ?assertEqual(0, length(Rows)),
++            ?assertEqual(UpSeq, LastSeq)
++        end).
++
++should_handle_deleted_docs({DbName, Revs}) ->
++    ?_test(
++        begin
++            Rev3_2 = element(6, Revs),
++            {ok, Db} = couch_db:open_int(DbName, []),
++            {ok, _} = save_doc(
++                Db,
++                {[{<<"_id">>, <<"doc3">>},
++                  {<<"_deleted">>, true},
++                  {<<"_rev">>, Rev3_2}]}),
++
++            ChangesArgs = #changes_args{
++                filter = "_doc_ids",
++                since = 9
++            },
++            DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
++            Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
++            Consumer = spawn_consumer(DbName, ChangesArgs, Req),
++
++            {Rows, LastSeq} = wait_finished(Consumer),
++            couch_db:close(Db),
++            stop_consumer(Consumer),
++
++            ?assertEqual(1, length(Rows)),
++            ?assertMatch(
++                [#row{seq = LastSeq, id = <<"doc3">>, deleted = true}],
++                Rows
++            ),
++            ?assertEqual(11, LastSeq)
++        end).
++
++should_filter_continuous_feed_by_specific_doc_ids({DbName, Revs}) ->
++    ?_test(
++        begin
++            {ok, Db} = couch_db:open_int(DbName, []),
++            ChangesArgs = #changes_args{
++                filter = "_doc_ids",
++                feed = "continuous"
++            },
++            DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
++            Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
++            Consumer = spawn_consumer(DbName, ChangesArgs, Req),
++            pause(Consumer),
++
++            Rows = get_rows(Consumer),
++            ?assertEqual(2, length(Rows)),
++            [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows,
++            ?assertEqual(<<"doc4">>, Id1),
++            ?assertEqual(4, Seq1),
++            ?assertEqual(<<"doc3">>, Id2),
++            ?assertEqual(6, Seq2),
++
++            clear_rows(Consumer),
++            {ok, _Rev9} = save_doc(Db, {[{<<"_id">>, <<"doc9">>}]}),
++            {ok, _Rev10} = save_doc(Db, {[{<<"_id">>, <<"doc10">>}]}),
++            unpause(Consumer),
++            pause(Consumer),
++            ?assertEqual([], get_rows(Consumer)),
++
++            Rev4 = element(4, Revs),
++            Rev3_2 = element(6, Revs),
++            {ok, Rev4_2} = save_doc(Db, {[{<<"_id">>, <<"doc4">>},
++                                          {<<"_rev">>, Rev4}]}),
++            {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc11">>}]}),
++            {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc4">>},
++                                     {<<"_rev">>, Rev4_2}]}),
++            {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc12">>}]}),
++            {ok, Rev3_3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>},
++                                          {<<"_rev">>, Rev3_2}]}),
++            unpause(Consumer),
++            pause(Consumer),
++
++            NewRows = get_rows(Consumer),
++            ?assertEqual(2, length(NewRows)),
++            [Row14, Row16] = NewRows,
++            ?assertEqual(<<"doc4">>, Row14#row.id),
++            ?assertEqual(15, Row14#row.seq),
++            ?assertEqual(<<"doc3">>, Row16#row.id),
++            ?assertEqual(17, Row16#row.seq),
++
++            clear_rows(Consumer),
++            {ok, _Rev3_4} = save_doc(Db, {[{<<"_id">>, <<"doc3">>},
++                                           {<<"_rev">>, Rev3_3}]}),
++            unpause(Consumer),
++            pause(Consumer),
++
++            FinalRows = get_rows(Consumer),
++
++            unpause(Consumer),
++            stop_consumer(Consumer),
++
++            ?assertMatch([#row{seq = 18, id = <<"doc3">>}], FinalRows)
++        end).
++
++should_emit_only_design_documents({DbName, Revs}) ->
++    ?_test(
++        begin
++            ChangesArgs = #changes_args{
++                filter = "_design"
++            },
++            Consumer = spawn_consumer(DbName, ChangesArgs, {json_req, null}),
++
++            {Rows, LastSeq} = wait_finished(Consumer),
++            {ok, Db} = couch_db:open_int(DbName, []),
++            UpSeq = couch_db:get_update_seq(Db),
++            couch_db:close(Db),
++
++            ?assertEqual(1, length(Rows)),
++            ?assertEqual(UpSeq, LastSeq),
++            ?assertEqual([#row{seq = 8, id = <<"_design/foo">>}], Rows),
++
++            stop_consumer(Consumer),
++
++            {ok, Db2} = couch_db:open_int(DbName, [?ADMIN_USER]),
++            {ok, _} = save_doc(Db2, {[{<<"_id">>, <<"_design/foo">>},
++                                      {<<"_rev">>, element(8, Revs)},
++                                      {<<"_deleted">>, true}]}),
++
++            Consumer2 = spawn_consumer(DbName, ChangesArgs, {json_req, null}),
++
++            {Rows2, LastSeq2} = wait_finished(Consumer2),
++            UpSeq2 = UpSeq + 1,
++            couch_db:close(Db2),
++
++            ?assertEqual(1, length(Rows2)),
++            ?assertEqual(UpSeq2, LastSeq2),
++            ?assertEqual([#row{seq = 11,
++                               id = <<"_design/foo">>,
++                               deleted = true}],
++                          Rows2)
++        end).
++
++should_receive_heartbeats(_) ->
++    {timeout, ?TEST_TIMEOUT div 1000,
++     ?_test(
++         begin
++             DbName = ?tempdb(),
++             Timeout = 100,
++             {ok, Db} = create_db(DbName),
++
++             {ok, _} = save_doc(Db, {[
++                 {<<"_id">>, <<"_design/filtered">>},
++                 {<<"language">>, <<"javascript">>},
++                     {<<"filters">>, {[
++                         {<<"foo">>, <<"function(doc) {
++                             return ['doc10', 'doc11', 'doc12'].indexOf(doc._id) != -1;}">>
++                     }]}}
++             ]}),
++
++             ChangesArgs = #changes_args{
++                 filter = "filtered/foo",
++                 feed = "continuous",
++                 timeout = 10000,
++                 heartbeat = 1000
++             },
++             Consumer = spawn_consumer(DbName, ChangesArgs, {json_req, null}),
++
++             {ok, _Rev1} = save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}),
++             timer:sleep(Timeout),
++             {ok, _Rev2} = save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}),
++             timer:sleep(Timeout),
++             {ok, _Rev3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}),
++             timer:sleep(Timeout),
++             {ok, _Rev4} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}),
++             timer:sleep(Timeout),
++             {ok, _Rev5} = save_doc(Db, {[{<<"_id">>, <<"doc5">>}]}),
++             timer:sleep(Timeout),
++             {ok, _Rev6} = save_doc(Db, {[{<<"_id">>, <<"doc6">>}]}),
++             timer:sleep(Timeout),
++             {ok, _Rev7} = save_doc(Db, {[{<<"_id">>, <<"doc7">>}]}),
++             timer:sleep(Timeout),
++             {ok, _Rev8} = save_doc(Db, {[{<<"_id">>, <<"doc8">>}]}),
++             timer:sleep(Timeout),
++             {ok, _Rev9} = save_doc(Db, {[{<<"_id">>, <<"doc9">>}]}),
++
++             Heartbeats = get_heartbeats(Consumer),
++             ?assert(Heartbeats > 0),
++
++             {ok, _Rev10} = save_doc(Db, {[{<<"_id">>, <<"doc10">>}]}),
++             timer:sleep(Timeout),
++             {ok, _Rev11} = save_doc(Db, {[{<<"_id">>, <<"doc11">>}]}),
++             timer:sleep(Timeout),
++             {ok, _Rev12} = save_doc(Db, {[{<<"_id">>, <<"doc12">>}]}),
++
++             Heartbeats2 = get_heartbeats(Consumer),
++             ?assert(Heartbeats2 > Heartbeats),
++
++             Rows = get_rows(Consumer),
++             ?assertEqual(3, length(Rows)),
++
++             {ok, _Rev13} = save_doc(Db, {[{<<"_id">>, <<"doc13">>}]}),
++             timer:sleep(Timeout),
++             {ok, _Rev14} = save_doc(Db, {[{<<"_id">>, <<"doc14">>}]}),
++             timer:sleep(Timeout),
++
++             Heartbeats3 = get_heartbeats(Consumer),
++             ?assert(Heartbeats3 > Heartbeats2)
++        end)}.
++
++
++save_doc(Db, Json) ->
++    Doc = couch_doc:from_json_obj(Json),
++    {ok, Rev} = couch_db:update_doc(Db, Doc, []),
++    {ok, couch_doc:rev_to_str(Rev)}.
++
++get_rows(Consumer) ->
++    Ref = make_ref(),
++    Consumer ! {get_rows, Ref},
++    Resp = receive
++        {rows, Ref, Rows} ->
++            Rows
++    after ?TIMEOUT ->
++        timeout
++    end,
++    ?assertNotEqual(timeout, Resp),
++    Resp.
++
++get_heartbeats(Consumer) ->
++    Ref = make_ref(),
++    Consumer ! {get_heartbeats, Ref},
++    Resp = receive
++        {hearthbeats, Ref, HeartBeats} ->
++            HeartBeats
++    after ?TIMEOUT ->
++        timeout
++    end,
++    ?assertNotEqual(timeout, Resp),
++    Resp.
++
++clear_rows(Consumer) ->
++    Ref = make_ref(),
++    Consumer ! {reset, Ref},
++    Resp = receive
++        {ok, Ref} ->
++            ok
++    after ?TIMEOUT ->
++        timeout
++    end,
++    ?assertNotEqual(timeout, Resp),
++    Resp.
++
++stop_consumer(Consumer) ->
++    Ref = make_ref(),
++    Consumer ! {stop, Ref},
++    Resp = receive
++        {ok, Ref} ->
++            ok
++    after ?TIMEOUT ->
++        timeout
++    end,
++    ?assertNotEqual(timeout, Resp),
++    Resp.
++
++pause(Consumer) ->
++    Ref = make_ref(),
++    Consumer ! {pause, Ref},
++    Resp = receive
++        {paused, Ref} ->
++            ok
++    after ?TIMEOUT ->
++        timeout
++    end,
++    ?assertNotEqual(timeout, Resp),
++    Resp.
++
++unpause(Consumer) ->
++    Ref = make_ref(),
++    Consumer ! {continue, Ref},
++    Resp = receive
++        {ok, Ref} ->
++            ok
++    after ?TIMEOUT ->
++       timeout
++    end,
++    ?assertNotEqual(timeout, Resp),
++    Resp.
++
++wait_finished(_Consumer) ->
++    Resp = receive
++        {consumer_finished, Rows, LastSeq} ->
++            {Rows, LastSeq}
++    after ?TIMEOUT ->
++        timeout
++    end,
++    ?assertNotEqual(timeout, Resp),
++    Resp.
++
++spawn_consumer(DbName, ChangesArgs0, Req) ->
++    Parent = self(),
++    spawn(fun() ->
++        put(heartbeat_count, 0),
++        Callback = fun
++            ({change, {Change}, _}, _, Acc) ->
++                Id = couch_util:get_value(<<"id">>, Change),
++                Seq = couch_util:get_value(<<"seq">>, Change),
++                Del = couch_util:get_value(<<"deleted">>, Change, false),
++                [#row{id = Id, seq = Seq, deleted = Del} | Acc];
++            ({stop, LastSeq}, _, Acc) ->
++                Parent ! {consumer_finished, lists:reverse(Acc), LastSeq},
++                stop_loop(Parent, Acc);
++            (timeout, _, Acc) ->
++                put(heartbeat_count, get(heartbeat_count) + 1),
++                maybe_pause(Parent, Acc);
++            (_, _, Acc) ->
++                maybe_pause(Parent, Acc)
++        end,
++        {ok, Db} = couch_db:open_int(DbName, []),
++        ChangesArgs = case (ChangesArgs0#changes_args.timeout =:= undefined)
++            andalso (ChangesArgs0#changes_args.heartbeat =:= undefined) of
++            true ->
++                ChangesArgs0#changes_args{timeout = 10, heartbeat = 10};
++            false ->
++                ChangesArgs0
++        end,
++        FeedFun = couch_changes:handle_changes(ChangesArgs, Req, Db),
++        try
++            FeedFun({Callback, []})
++        catch throw:{stop, _} ->
++            ok
++        end,
++        catch couch_db:close(Db)
++    end).
++
++maybe_pause(Parent, Acc) ->
++    receive
++        {get_rows, Ref} ->
++            Parent ! {rows, Ref, lists:reverse(Acc)},
++            maybe_pause(Parent, Acc);
++        {get_heartbeats, Ref} ->
++            Parent ! {hearthbeats, Ref, get(heartbeat_count)},
++            maybe_pause(Parent, Acc);
++        {reset, Ref} ->
++            Parent ! {ok, Ref},
++            maybe_pause(Parent, []);
++        {pause, Ref} ->
++            Parent ! {paused, Ref},
++            pause_loop(Parent, Acc);
++        {stop, Ref} ->
++            Parent ! {ok, Ref},
++            throw({stop, Acc});
++        V ->
++            erlang:error({assertion_failed,
++                      [{module, ?MODULE},
++                       {line, ?LINE},
++                       {value, V},
++                       {reason, "Received unexpected message"}]})
++    after 0 ->
++        Acc
++    end.
++
++pause_loop(Parent, Acc) ->
++    receive
++        {stop, Ref} ->
++            Parent ! {ok, Ref},
++            throw({stop, Acc});
++        {reset, Ref} ->
++            Parent ! {ok, Ref},
++            pause_loop(Parent, []);
++        {continue, Ref} ->
++            Parent ! {ok, Ref},
++            Acc;
++        {get_rows, Ref} ->
++            Parent ! {rows, Ref, lists:reverse(Acc)},
++            pause_loop(Parent, Acc)
++    end.
++
++stop_loop(Parent, Acc) ->
++    receive
++        {get_rows, Ref} ->
++            Parent ! {rows, Ref, lists:reverse(Acc)},
++            stop_loop(Parent, Acc);
++        {stop, Ref} ->
++            Parent ! {ok, Ref},
++            Acc
++    end.
++
++create_db(DbName) ->
++    couch_db:create(DbName, [?ADMIN_USER, overwrite]).
++
++delete_db(DbName) ->
++    ok = couch_server:delete(DbName, [?ADMIN_USER]).
+diff --git a/test/couchdb/couch_config_tests.erl b/test/couchdb/couch_config_tests.erl
+new file mode 100644
+index 0000000..9e9dfe7
+--- /dev/null
++++ b/test/couchdb/couch_config_tests.erl
+@@ -0,0 +1,463 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_config_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(SHORT_TIMEOUT, 100).
++-define(TIMEOUT, 1000).
++
++-define(CONFIG_DEFAULT,
++        filename:join([?BUILDDIR, "etc", "couchdb", "default_dev.ini"])).
++-define(CONFIG_FIXTURE_1,
++        filename:join([?FIXTURESDIR, "couch_config_tests_1.ini"])).
++-define(CONFIG_FIXTURE_2,
++        filename:join([?FIXTURESDIR, "couch_config_tests_2.ini"])).
++-define(CONFIG_FIXTURE_TEMP,
++    begin
++        FileName = filename:join([?TEMPDIR, "couch_config_temp.ini"]),
++        {ok, Fd} = file:open(FileName, write),
++        ok = file:truncate(Fd),
++        ok = file:close(Fd),
++        FileName
++    end).
++
++
++setup() ->
++    setup(?CONFIG_CHAIN).
++setup({temporary, Chain}) ->
++    setup(Chain);
++setup({persistent, Chain}) ->
++    setup(lists:append(Chain, [?CONFIG_FIXTURE_TEMP]));
++setup(Chain) ->
++    {ok, Pid} = couch_config:start_link(Chain),
++    Pid.
++
++setup_empty() ->
++    setup([]).
++
++setup_register() ->
++    ConfigPid = setup(),
++    SentinelFunc = fun() ->
++        % Ping/Pong to make sure we wait for this
++        % process to die
++        receive
++            {ping, From} ->
++                From ! pong
++        end
++    end,
++    SentinelPid = spawn(SentinelFunc),
++    {ConfigPid, SentinelPid}.
++
++teardown({ConfigPid, SentinelPid}) ->
++    teardown(ConfigPid),
++    case process_info(SentinelPid) of
++        undefined -> ok;
++        _ ->
++            SentinelPid ! {ping, self()},
++            receive
++                pong ->
++                    ok
++            after 100 ->
++                throw({timeout_error, registered_pid})
++            end
++    end;
++teardown(Pid) ->
++    couch_config:stop(),
++    erlang:monitor(process, Pid),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout_error, config_stop})
++    end.
++teardown(_, Pid) ->
++    teardown(Pid).
++
++
++couch_config_test_() ->
++    {
++        "CouchDB config tests",
++        [
++            couch_config_get_tests(),
++            couch_config_set_tests(),
++            couch_config_del_tests(),
++            config_override_tests(),
++            config_persistent_changes_tests(),
++            config_register_tests(),
++            config_no_files_tests()
++        ]
++    }.
++
++couch_config_get_tests() ->
++    {
++        "Config get tests",
++        {
++            foreach,
++            fun setup/0, fun teardown/1,
++            [
++                should_load_all_configs(),
++                should_locate_daemons_section(),
++                should_locate_mrview_handler(),
++                should_return_undefined_atom_on_missed_section(),
++                should_return_undefined_atom_on_missed_option(),
++                should_return_custom_default_value_on_missed_option(),
++                should_only_return_default_on_missed_option(),
++                should_get_binary_option()
++            ]
++        }
++    }.
++
++couch_config_set_tests() ->
++    {
++        "Config set tests",
++        {
++            foreach,
++            fun setup/0, fun teardown/1,
++            [
++                should_update_option(),
++                should_create_new_section(),
++                should_set_binary_option()
++            ]
++        }
++    }.
++
++couch_config_del_tests() ->
++    {
++        "Config deletion tests",
++        {
++            foreach,
++            fun setup/0, fun teardown/1,
++            [
++                should_return_undefined_atom_after_option_deletion(),
++                should_be_ok_on_deleting_unknown_options(),
++                should_delete_binary_option()
++            ]
++        }
++    }.
++
++config_override_tests() ->
++    {
++        "Configs overide tests",
++        {
++            foreachx,
++            fun setup/1, fun teardown/2,
++            [
++                {{temporary, [?CONFIG_DEFAULT]},
++                 fun should_ensure_in_defaults/2},
++                {{temporary, [?CONFIG_DEFAULT, ?CONFIG_FIXTURE_1]},
++                 fun should_override_options/2},
++                {{temporary, [?CONFIG_DEFAULT, ?CONFIG_FIXTURE_2]},
++                 fun should_create_new_sections_on_override/2},
++                {{temporary, [?CONFIG_DEFAULT, ?CONFIG_FIXTURE_1,
++                              ?CONFIG_FIXTURE_2]},
++                 fun should_win_last_in_chain/2}
++            ]
++        }
++    }.
++
++config_persistent_changes_tests() ->
++    {
++        "Config persistent changes",
++        {
++            foreachx,
++            fun setup/1, fun teardown/2,
++            [
++                {{persistent, [?CONFIG_DEFAULT]},
++                 fun should_write_changes/2},
++                {{temporary, [?CONFIG_DEFAULT]},
++                 fun should_ensure_that_default_wasnt_modified/2},
++                {{temporary, [?CONFIG_FIXTURE_TEMP]},
++                 fun should_ensure_that_written_to_last_config_in_chain/2}
++            ]
++        }
++    }.
++
++config_register_tests() ->
++    {
++        "Config changes subscriber",
++        {
++            foreach,
++            fun setup_register/0, fun teardown/1,
++            [
++                fun should_handle_port_changes/1,
++                fun should_pass_persistent_flag/1,
++                fun should_not_trigger_handler_on_other_options_changes/1,
++                fun should_not_trigger_handler_after_related_process_death/1
++            ]
++        }
++    }.
++
++config_no_files_tests() ->
++    {
++        "Test couch_config with no files",
++        {
++            foreach,
++            fun setup_empty/0, fun teardown/1,
++            [
++                should_ensure_that_no_ini_files_loaded(),
++                should_create_non_persistent_option(),
++                should_create_persistent_option()
++            ]
++        }
++    }.
++
++
++should_load_all_configs() ->
++    ?_assert(length(couch_config:all()) > 0).
++
++should_locate_daemons_section() ->
++    ?_assert(length(couch_config:get("daemons")) > 0).
++
++should_locate_mrview_handler() ->
++    ?_assertEqual("{couch_mrview_http, handle_view_req}",
++                  couch_config:get("httpd_design_handlers", "_view")).
++
++should_return_undefined_atom_on_missed_section() ->
++    ?_assertEqual(undefined,
++                  couch_config:get("foo", "bar")).
++
++should_return_undefined_atom_on_missed_option() ->
++    ?_assertEqual(undefined,
++                  couch_config:get("httpd", "foo")).
++
++should_return_custom_default_value_on_missed_option() ->
++    ?_assertEqual("bar",
++                  couch_config:get("httpd", "foo", "bar")).
++
++should_only_return_default_on_missed_option() ->
++    ?_assertEqual("0",
++                  couch_config:get("httpd", "port", "bar")).
++
++should_get_binary_option() ->
++    ?_assertEqual(<<"baz">>,
++                  couch_config:get(<<"foo">>, <<"bar">>, <<"baz">>)).
++
++should_update_option() ->
++    ?_assertEqual("severe",
++        begin
++            ok = couch_config:set("log", "level", "severe", false),
++            couch_config:get("log", "level")
++        end).
++
++should_create_new_section() ->
++    ?_assertEqual("bang",
++        begin
++            undefined = couch_config:get("new_section", "bizzle"),
++            ok = couch_config:set("new_section", "bizzle", "bang", false),
++            couch_config:get("new_section", "bizzle")
++        end).
++
++should_set_binary_option() ->
++    ?_assertEqual(<<"baz">>,
++        begin
++            ok = couch_config:set(<<"foo">>, <<"bar">>, <<"baz">>, false),
++            couch_config:get(<<"foo">>, <<"bar">>)
++        end).
++
++should_return_undefined_atom_after_option_deletion() ->
++    ?_assertEqual(undefined,
++        begin
++            ok = couch_config:delete("log", "level", false),
++            couch_config:get("log", "level")
++        end).
++
++should_be_ok_on_deleting_unknown_options() ->
++    ?_assertEqual(ok, couch_config:delete("zoo", "boo", false)).
++
++should_delete_binary_option() ->
++    ?_assertEqual(undefined,
++        begin
++            ok = couch_config:set(<<"foo">>, <<"bar">>, <<"baz">>, false),
++            ok = couch_config:delete(<<"foo">>, <<"bar">>, false),
++            couch_config:get(<<"foo">>, <<"bar">>)
++        end).
++
++should_ensure_in_defaults(_, _) ->
++    ?_test(begin
++        ?assertEqual("100",
++                     couch_config:get("couchdb", "max_dbs_open")),
++        ?assertEqual("5984",
++                     couch_config:get("httpd", "port")),
++        ?assertEqual(undefined,
++                     couch_config:get("fizbang", "unicode"))
++    end).
++
++should_override_options(_, _) ->
++    ?_test(begin
++        ?assertEqual("10",
++                     couch_config:get("couchdb", "max_dbs_open")),
++        ?assertEqual("4895",
++                     couch_config:get("httpd", "port"))
++    end).
++
++should_create_new_sections_on_override(_, _) ->
++    ?_test(begin
++        ?assertEqual("80",
++                     couch_config:get("httpd", "port")),
++        ?assertEqual("normalized",
++                     couch_config:get("fizbang", "unicode"))
++    end).
++
++should_win_last_in_chain(_, _) ->
++    ?_assertEqual("80", couch_config:get("httpd", "port")).
++
++should_write_changes(_, _) ->
++    ?_test(begin
++        ?assertEqual("5984",
++                     couch_config:get("httpd", "port")),
++        ?assertEqual(ok,
++                     couch_config:set("httpd", "port", "8080")),
++        ?assertEqual("8080",
++                     couch_config:get("httpd", "port")),
++        ?assertEqual(ok,
++                     couch_config:delete("httpd", "bind_address", "8080")),
++        ?assertEqual(undefined,
++                     couch_config:get("httpd", "bind_address"))
++    end).
++
++should_ensure_that_default_wasnt_modified(_, _) ->
++    ?_test(begin
++        ?assertEqual("5984",
++                     couch_config:get("httpd", "port")),
++        ?assertEqual("127.0.0.1",
++                     couch_config:get("httpd", "bind_address"))
++    end).
++
++should_ensure_that_written_to_last_config_in_chain(_, _) ->
++    ?_test(begin
++        ?assertEqual("8080",
++                     couch_config:get("httpd", "port")),
++        ?assertEqual(undefined,
++                     couch_config:get("httpd", "bind_address"))
++    end).
++
++should_handle_port_changes({_, SentinelPid}) ->
++    ?_assert(begin
++        MainProc = self(),
++        Port = "8080",
++
++        couch_config:register(
++            fun("httpd", "port", Value) ->
++                % couch_config catches every error raised from handler
++                % so it's not possible to just assert on wrong value.
++                % We have to return the result as message
++                MainProc ! (Value =:= Port)
++            end,
++            SentinelPid
++        ),
++        ok = couch_config:set("httpd", "port", Port, false),
++
++        receive
++            R ->
++                R
++        after ?TIMEOUT ->
++             erlang:error({assertion_failed,
++                           [{module, ?MODULE},
++                            {line, ?LINE},
++                            {reason, "Timeout"}]})
++        end
++    end).
++
++should_pass_persistent_flag({_, SentinelPid}) ->
++    ?_assert(begin
++        MainProc = self(),
++
++        couch_config:register(
++            fun("httpd", "port", _, Persist) ->
++                % couch_config catches every error raised from handler
++                % so it's not possible to just assert on wrong value.
++                % We have to return the result as message
++                MainProc ! Persist
++            end,
++            SentinelPid
++        ),
++        ok = couch_config:set("httpd", "port", "8080", false),
++
++        receive
++            false ->
++                true
++        after ?SHORT_TIMEOUT ->
++            false
++        end
++    end).
++
++should_not_trigger_handler_on_other_options_changes({_, SentinelPid}) ->
++    ?_assert(begin
++        MainProc = self(),
++
++        couch_config:register(
++            fun("httpd", "port", _) ->
++                MainProc ! ok
++            end,
++            SentinelPid
++        ),
++        ok = couch_config:set("httpd", "bind_address", "0.0.0.0", false),
++
++        receive
++            ok ->
++                false
++        after ?SHORT_TIMEOUT ->
++            true
++        end
++    end).
++
++should_not_trigger_handler_after_related_process_death({_, SentinelPid}) ->
++    ?_assert(begin
++        MainProc = self(),
++
++        couch_config:register(
++            fun("httpd", "port", _) ->
++                MainProc ! ok
++            end,
++            SentinelPid
++        ),
++
++        SentinelPid ! {ping, MainProc},
++        receive
++            pong ->
++                ok
++        after ?SHORT_TIMEOUT ->
++             erlang:error({assertion_failed,
++                           [{module, ?MODULE},
++                            {line, ?LINE},
++                            {reason, "Timeout"}]})
++        end,
++
++        ok = couch_config:set("httpd", "port", "12345", false),
++
++        receive
++            ok ->
++                false
++        after ?SHORT_TIMEOUT ->
++            true
++        end
++    end).
++
++should_ensure_that_no_ini_files_loaded() ->
++    ?_assertEqual(0, length(couch_config:all())).
++
++should_create_non_persistent_option() ->
++    ?_assertEqual("80",
++        begin
++            ok = couch_config:set("httpd", "port", "80", false),
++            couch_config:get("httpd", "port")
++        end).
++
++should_create_persistent_option() ->
++    ?_assertEqual("127.0.0.1",
++        begin
++            ok = couch_config:set("httpd", "bind_address", "127.0.0.1"),
++            couch_config:get("httpd", "bind_address")
++        end).
+diff --git a/test/couchdb/couch_db_tests.erl b/test/couchdb/couch_db_tests.erl
+new file mode 100644
+index 0000000..3089714
+--- /dev/null
++++ b/test/couchdb/couch_db_tests.erl
+@@ -0,0 +1,114 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_db_tests).
++
++-include("couch_eunit.hrl").
++
++-define(TIMEOUT, 120).
++
++
++setup() ->
++    {ok, _} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    couch_config:set("log", "include_sasl", "false", false),
++    ok.
++
++teardown(_) ->
++    couch_server_sup:stop().
++
++
++create_delete_db_test_()->
++    {
++        "Database create/delete tests",
++        {
++            setup,
++            fun setup/0, fun teardown/1,
++            fun(_) ->
++                [should_create_db(),
++                 should_delete_db(),
++                 should_create_multiple_dbs(),
++                 should_delete_multiple_dbs(),
++                 should_create_delete_database_continuously()]
++            end
++        }
++    }.
++
++
++should_create_db() ->
++    DbName = ?tempdb(),
++    {ok, Db} = couch_db:create(DbName, []),
++    ok = couch_db:close(Db),
++    {ok, AllDbs} = couch_server:all_databases(),
++    ?_assert(lists:member(DbName, AllDbs)).
++
++should_delete_db() ->
++    DbName = ?tempdb(),
++    couch_db:create(DbName, []),
++    couch_server:delete(DbName, []),
++    {ok, AllDbs} = couch_server:all_databases(),
++    ?_assertNot(lists:member(DbName, AllDbs)).
++
++should_create_multiple_dbs() ->
++    gen_server:call(couch_server, {set_max_dbs_open, 3}),
++
++    DbNames = [?tempdb() || _ <- lists:seq(1, 6)],
++    lists:foreach(fun(DbName) ->
++        {ok, Db} = couch_db:create(DbName, []),
++        ok = couch_db:close(Db)
++    end, DbNames),
++
++    {ok, AllDbs} = couch_server:all_databases(),
++    NumCreated = lists:foldl(fun(DbName, Acc) ->
++        ?assert(lists:member(DbName, AllDbs)),
++        Acc+1
++    end, 0, DbNames),
++
++    ?_assertEqual(NumCreated, 6).
++
++should_delete_multiple_dbs() ->
++    DbNames = [?tempdb() || _ <- lists:seq(1, 6)],
++    lists:foreach(fun(DbName) ->
++        {ok, Db} = couch_db:create(DbName, []),
++        ok = couch_db:close(Db)
++    end, DbNames),
++
++    lists:foreach(fun(DbName) ->
++        ok = couch_server:delete(DbName, [])
++    end, DbNames),
++
++    {ok, AllDbs} = couch_server:all_databases(),
++    NumDeleted = lists:foldl(fun(DbName, Acc) ->
++        ?assertNot(lists:member(DbName, AllDbs)),
++        Acc + 1
++    end, 0, DbNames),
++
++    ?_assertEqual(NumDeleted, 6).
++
++should_create_delete_database_continuously() ->
++    DbName = ?tempdb(),
++    {ok, Db} = couch_db:create(DbName, []),
++    couch_db:close(Db),
++    [{timeout, ?TIMEOUT, {integer_to_list(N) ++ " times",
++                           ?_assert(loop(DbName, N))}}
++     || N <- [10, 100, 1000]].
++
++loop(_, 0) ->
++    true;
++loop(DbName, N) ->
++    ok = cycle(DbName),
++    loop(DbName, N - 1).
++
++cycle(DbName) ->
++    ok = couch_server:delete(DbName, []),
++    {ok, Db} = couch_db:create(DbName, []),
++    couch_db:close(Db),
++    ok.
+diff --git a/test/couchdb/couch_doc_json_tests.erl b/test/couchdb/couch_doc_json_tests.erl
+new file mode 100644
+index 0000000..1592b6b
+--- /dev/null
++++ b/test/couchdb/couch_doc_json_tests.erl
+@@ -0,0 +1,391 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_doc_json_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++
++setup() ->
++    couch_config:start_link(?CONFIG_CHAIN),
++    couch_config:set("attachments", "compression_level", "0", false),
++    ok.
++
++teardown(_) ->
++    couch_config:stop().
++
++
++json_doc_test_() ->
++    {
++        setup,
++        fun setup/0, fun teardown/1,
++        [
++            {
++                "Document from JSON",
++                [
++                    from_json_success_cases(),
++                    from_json_error_cases()
++                ]
++            },
++            {
++                "Document to JSON",
++                [
++                    to_json_success_cases()
++                ]
++            }
++        ]
++    }.
++
++from_json_success_cases() ->
++    Cases = [
++        {
++            {[]},
++            #doc{},
++            "Return an empty document for an empty JSON object."
++        },
++        {
++            {[{<<"_id">>, <<"zing!">>}]},
++            #doc{id = <<"zing!">>},
++            "Parses document ids."
++        },
++        {
++            {[{<<"_id">>, <<"_design/foo">>}]},
++            #doc{id = <<"_design/foo">>},
++            "_design/document ids."
++        },
++        {
++            {[{<<"_id">>, <<"_local/bam">>}]},
++            #doc{id = <<"_local/bam">>},
++            "_local/document ids."
++        },
++        {
++            {[{<<"_rev">>, <<"4-230234">>}]},
++            #doc{revs = {4, [<<"230234">>]}},
++            "_rev stored in revs."
++        },
++        {
++            {[{<<"soap">>, 35}]},
++            #doc{body = {[{<<"soap">>, 35}]}},
++            "Non underscore prefixed fields stored in body."
++        },
++        {
++            {[{<<"_attachments">>, {[
++                {<<"my_attachment.fu">>, {[
++                    {<<"stub">>, true},
++                    {<<"content_type">>, <<"application/awesome">>},
++                    {<<"length">>, 45}
++                ]}},
++                {<<"noahs_private_key.gpg">>, {[
++                    {<<"data">>, <<"SSBoYXZlIGEgcGV0IGZpc2gh">>},
++                    {<<"content_type">>, <<"application/pgp-signature">>}
++                ]}}
++            ]}}]},
++            #doc{atts = [
++                #att{
++                    name = <<"my_attachment.fu">>,
++                    data = stub,
++                    type = <<"application/awesome">>,
++                    att_len = 45,
++                    disk_len = 45,
++                    revpos = nil
++                },
++                #att{
++                    name = <<"noahs_private_key.gpg">>,
++                    data = <<"I have a pet fish!">>,
++                    type = <<"application/pgp-signature">>,
++                    att_len = 18,
++                    disk_len = 18,
++                    revpos = 0
++                }
++            ]},
++            "Attachments are parsed correctly."
++        },
++        {
++            {[{<<"_deleted">>, true}]},
++            #doc{deleted = true},
++            "_deleted controls the deleted field."
++        },
++        {
++            {[{<<"_deleted">>, false}]},
++            #doc{},
++            "{\"_deleted\": false} is ok."
++        },
++        {
++            {[
++                 {<<"_revisions">>,
++                  {[{<<"start">>, 4},
++                    {<<"ids">>, [<<"foo1">>, <<"phi3">>, <<"omega">>]}]}},
++                 {<<"_rev">>, <<"6-something">>}
++             ]},
++            #doc{revs = {4, [<<"foo1">>, <<"phi3">>, <<"omega">>]}},
++            "_revisions attribute are preferred to _rev."
++        },
++        {
++            {[{<<"_revs_info">>, dropping}]},
++            #doc{},
++            "Drops _revs_info."
++        },
++        {
++            {[{<<"_local_seq">>, dropping}]},
++            #doc{},
++            "Drops _local_seq."
++        },
++        {
++            {[{<<"_conflicts">>, dropping}]},
++            #doc{},
++            "Drops _conflicts."
++        },
++        {
++            {[{<<"_deleted_conflicts">>, dropping}]},
++            #doc{},
++            "Drops _deleted_conflicts."
++        }
++    ],
++    lists:map(
++        fun({EJson, Expect, Msg}) ->
++            {Msg, ?_assertMatch(Expect, couch_doc:from_json_obj(EJson))}
++        end,
++        Cases).
++
++from_json_error_cases() ->
++    Cases = [
++        {
++            [],
++            {bad_request, "Document must be a JSON object"},
++            "arrays are invalid"
++        },
++        {
++            4,
++            {bad_request, "Document must be a JSON object"},
++            "integers are invalid"
++        },
++        {
++            true,
++            {bad_request, "Document must be a JSON object"},
++            "literals are invalid"
++        },
++        {
++            {[{<<"_id">>, {[{<<"foo">>, 5}]}}]},
++            {bad_request, <<"Document id must be a string">>},
++            "Document id must be a string."
++        },
++        {
++            {[{<<"_id">>, <<"_random">>}]},
++            {bad_request,
++             <<"Only reserved document ids may start with underscore.">>},
++            "Disallow arbitrary underscore prefixed docids."
++        },
++        {
++            {[{<<"_rev">>, 5}]},
++            {bad_request, <<"Invalid rev format">>},
++            "_rev must be a string"
++        },
++        {
++            {[{<<"_rev">>, "foobar"}]},
++            {bad_request, <<"Invalid rev format">>},
++            "_rev must be %d-%s"
++        },
++        {
++            {[{<<"_rev">>, "foo-bar"}]},
++            "Error if _rev's integer expection is broken."
++        },
++        {
++            {[{<<"_revisions">>, {[{<<"start">>, true}]}}]},
++            {doc_validation, "_revisions.start isn't an integer."},
++            "_revisions.start must be an integer."
++        },
++        {
++            {[{<<"_revisions">>, {[{<<"start">>, 0}, {<<"ids">>, 5}]}}]},
++            {doc_validation, "_revisions.ids isn't a array."},
++            "_revions.ids must be a list."
++        },
++        {
++            {[{<<"_revisions">>, {[{<<"start">>, 0}, {<<"ids">>, [5]}]}}]},
++            {doc_validation, "RevId isn't a string"},
++            "Revision ids must be strings."
++        },
++        {
++            {[{<<"_something">>, 5}]},
++            {doc_validation, <<"Bad special document member: _something">>},
++            "Underscore prefix fields are reserved."
++        }
++    ],
++
++    lists:map(fun
++        ({EJson, Expect, Msg}) ->
++            Error = (catch couch_doc:from_json_obj(EJson)),
++            {Msg, ?_assertMatch(Expect, Error)};
++        ({EJson, Msg}) ->
++            try
++                couch_doc:from_json_obj(EJson),
++                {"Conversion failed to raise an exception", ?_assert(false)}
++            catch
++                _:_ -> {Msg, ?_assert(true)}
++            end
++    end, Cases).
++
++to_json_success_cases() ->
++    Cases = [
++        {
++            #doc{},
++            {[{<<"_id">>, <<"">>}]},
++            "Empty docs are {\"_id\": \"\"}"
++        },
++        {
++            #doc{id = <<"foo">>},
++            {[{<<"_id">>, <<"foo">>}]},
++            "_id is added."
++        },
++        {
++            #doc{revs = {5, ["foo"]}},
++            {[{<<"_id">>, <<>>}, {<<"_rev">>, <<"5-foo">>}]},
++            "_rev is added."
++        },
++        {
++            [revs],
++            #doc{revs = {5, [<<"first">>, <<"second">>]}},
++            {[
++                 {<<"_id">>, <<>>},
++                 {<<"_rev">>, <<"5-first">>},
++                 {<<"_revisions">>, {[
++                     {<<"start">>, 5},
++                     {<<"ids">>, [<<"first">>, <<"second">>]}
++                 ]}}
++             ]},
++            "_revisions include with revs option"
++        },
++        {
++            #doc{body = {[{<<"foo">>, <<"bar">>}]}},
++            {[{<<"_id">>, <<>>}, {<<"foo">>, <<"bar">>}]},
++            "Arbitrary fields are added."
++        },
++        {
++            #doc{deleted = true, body = {[{<<"foo">>, <<"bar">>}]}},
++            {[{<<"_id">>, <<>>}, {<<"foo">>, <<"bar">>}, {<<"_deleted">>, true}]},
++            "Deleted docs no longer drop body members."
++        },
++        {
++            #doc{meta = [
++                {revs_info, 4, [{<<"fin">>, deleted}, {<<"zim">>, missing}]}
++            ]},
++            {[
++                 {<<"_id">>, <<>>},
++                 {<<"_revs_info">>, [
++                     {[{<<"rev">>, <<"4-fin">>}, {<<"status">>, <<"deleted">>}]},
++                     {[{<<"rev">>, <<"3-zim">>}, {<<"status">>, <<"missing">>}]}
++                 ]}
++             ]},
++            "_revs_info field is added correctly."
++        },
++        {
++            #doc{meta = [{local_seq, 5}]},
++            {[{<<"_id">>, <<>>}, {<<"_local_seq">>, 5}]},
++            "_local_seq is added as an integer."
++        },
++        {
++            #doc{meta = [{conflicts, [{3, <<"yep">>}, {1, <<"snow">>}]}]},
++            {[
++                {<<"_id">>, <<>>},
++                {<<"_conflicts">>, [<<"3-yep">>, <<"1-snow">>]}
++            ]},
++            "_conflicts is added as an array of strings."
++        },
++        {
++            #doc{meta = [{deleted_conflicts, [{10923, <<"big_cowboy_hat">>}]}]},
++            {[
++                 {<<"_id">>, <<>>},
++                 {<<"_deleted_conflicts">>, [<<"10923-big_cowboy_hat">>]}
++             ]},
++            "_deleted_conflicsts is added as an array of strings."
++        },
++        {
++            #doc{atts = [
++                #att{
++                    name = <<"big.xml">>,
++                    type = <<"xml/sucks">>,
++                    data = fun() -> ok end,
++                    revpos = 1,
++                    att_len = 400,
++                    disk_len = 400
++                },
++                #att{
++                    name = <<"fast.json">>,
++                    type = <<"json/ftw">>,
++                    data = <<"{\"so\": \"there!\"}">>,
++                    revpos = 1,
++                    att_len = 16,
++                    disk_len = 16
++                }
++            ]},
++            {[
++                 {<<"_id">>, <<>>},
++                 {<<"_attachments">>, {[
++                       {<<"big.xml">>, {[
++                           {<<"content_type">>, <<"xml/sucks">>},
++                           {<<"revpos">>, 1},
++                           {<<"length">>, 400},
++                           {<<"stub">>, true}
++                       ]}},
++                       {<<"fast.json">>, {[
++                           {<<"content_type">>, <<"json/ftw">>},
++                           {<<"revpos">>, 1},
++                           {<<"length">>, 16},
++                           {<<"stub">>, true}
++                       ]}}
++                ]}}
++            ]},
++            "Attachments attached as stubs only include a length."
++        },
++        {
++            [attachments],
++            #doc{atts = [
++                #att{
++                    name = <<"stuff.txt">>,
++                    type = <<"text/plain">>,
++                    data = fun() -> <<"diet pepsi">> end,
++                    revpos = 1,
++                    att_len = 10,
++                    disk_len = 10
++                },
++                #att{
++                    name = <<"food.now">>,
++                    type = <<"application/food">>,
++                    revpos = 1,
++                    data = <<"sammich">>
++                }
++            ]},
++            {[
++                {<<"_id">>, <<>>},
++                {<<"_attachments">>, {[
++                   {<<"stuff.txt">>, {[
++                       {<<"content_type">>, <<"text/plain">>},
++                       {<<"revpos">>, 1},
++                       {<<"data">>, <<"ZGlldCBwZXBzaQ==">>}
++                   ]}},
++                   {<<"food.now">>, {[
++                       {<<"content_type">>, <<"application/food">>},
++                       {<<"revpos">>, 1},
++                       {<<"data">>, <<"c2FtbWljaA==">>}
++                   ]}}
++                ]}}
++            ]},
++            "Attachments included inline with attachments option."
++        }
++    ],
++
++    lists:map(fun
++        ({Doc, EJson, Msg}) ->
++            {Msg, ?_assertMatch(EJson, couch_doc:to_json_obj(Doc, []))};
++        ({Options, Doc, EJson, Msg}) ->
++            {Msg, ?_assertMatch(EJson, couch_doc:to_json_obj(Doc, Options))}
++    end, Cases).
+diff --git a/test/couchdb/couch_file_tests.erl b/test/couchdb/couch_file_tests.erl
+new file mode 100644
+index 0000000..ad13383
+--- /dev/null
++++ b/test/couchdb/couch_file_tests.erl
+@@ -0,0 +1,265 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_file_tests).
++
++-include("couch_eunit.hrl").
++
++-define(BLOCK_SIZE, 4096).
++-define(setup(F), {setup, fun setup/0, fun teardown/1, F}).
++-define(foreach(Fs), {foreach, fun setup/0, fun teardown/1, Fs}).
++
++
++setup() ->
++    {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
++    Fd.
++
++teardown(Fd) ->
++    ok = couch_file:close(Fd).
++
++
++open_close_test_() ->
++    {
++        "Test for proper file open and close",
++        [
++            should_return_enoent_if_missed(),
++            should_ignore_invalid_flags_with_open(),
++            ?setup(fun should_return_pid_on_file_open/1),
++            should_close_file_properly(),
++            ?setup(fun should_create_empty_new_files/1)
++        ]
++    }.
++
++should_return_enoent_if_missed() ->
++    ?_assertEqual({error, enoent}, couch_file:open("not a real file")).
++
++should_ignore_invalid_flags_with_open() ->
++    ?_assertMatch({ok, _},
++                  couch_file:open(?tempfile(), [create, invalid_option])).
++
++should_return_pid_on_file_open(Fd) ->
++    ?_assert(is_pid(Fd)).
++
++should_close_file_properly() ->
++    {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
++    ok = couch_file:close(Fd),
++    ?_assert(true).
++
++should_create_empty_new_files(Fd) ->
++    ?_assertMatch({ok, 0}, couch_file:bytes(Fd)).
++
++
++read_write_test_() ->
++    {
++        "Common file read/write tests",
++        ?foreach([
++            fun should_increase_file_size_on_write/1,
++            fun should_return_current_file_size_on_write/1,
++            fun should_write_and_read_term/1,
++            fun should_write_and_read_binary/1,
++            fun should_write_and_read_large_binary/1,
++            fun should_return_term_as_binary_for_reading_binary/1,
++            fun should_read_term_written_as_binary/1,
++            fun should_read_iolist/1,
++            fun should_fsync/1,
++            fun should_not_read_beyond_eof/1,
++            fun should_truncate/1
++        ])
++    }.
++
++
++should_increase_file_size_on_write(Fd) ->
++    {ok, 0, _} = couch_file:append_term(Fd, foo),
++    {ok, Size} = couch_file:bytes(Fd),
++    ?_assert(Size > 0).
++
++should_return_current_file_size_on_write(Fd) ->
++    {ok, 0, _} = couch_file:append_term(Fd, foo),
++    {ok, Size} = couch_file:bytes(Fd),
++    ?_assertMatch({ok, Size, _}, couch_file:append_term(Fd, bar)).
++
++should_write_and_read_term(Fd) ->
++    {ok, Pos, _} = couch_file:append_term(Fd, foo),
++    ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, Pos)).
++
++should_write_and_read_binary(Fd) ->
++    {ok, Pos, _} = couch_file:append_binary(Fd, <<"fancy!">>),
++    ?_assertMatch({ok, <<"fancy!">>}, couch_file:pread_binary(Fd, Pos)).
++
++should_return_term_as_binary_for_reading_binary(Fd) ->
++    {ok, Pos, _} = couch_file:append_term(Fd, foo),
++    Foo = couch_compress:compress(foo, snappy),
++    ?_assertMatch({ok, Foo}, couch_file:pread_binary(Fd, Pos)).
++
++should_read_term_written_as_binary(Fd) ->
++    {ok, Pos, _} = couch_file:append_binary(Fd, <<131,100,0,3,102,111,111>>),
++    ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, Pos)).
++
++should_write_and_read_large_binary(Fd) ->
++    BigBin = list_to_binary(lists:duplicate(100000, 0)),
++    {ok, Pos, _} = couch_file:append_binary(Fd, BigBin),
++    ?_assertMatch({ok, BigBin}, couch_file:pread_binary(Fd, Pos)).
++
++should_read_iolist(Fd) ->
++    %% append_binary == append_iolist?
++    %% Possible bug in pread_iolist or iolist() -> append_binary
++    {ok, Pos, _} = couch_file:append_binary(Fd, ["foo", $m, <<"bam">>]),
++    {ok, IoList} = couch_file:pread_iolist(Fd, Pos),
++    ?_assertMatch(<<"foombam">>, iolist_to_binary(IoList)).
++
++should_fsync(Fd) ->
++    {"How does on test fsync?", ?_assertMatch(ok, couch_file:sync(Fd))}.
++
++should_not_read_beyond_eof(_) ->
++    {"No idea how to test reading beyond EOF", ?_assert(true)}.
++
++should_truncate(Fd) ->
++    {ok, 0, _} = couch_file:append_term(Fd, foo),
++    {ok, Size} = couch_file:bytes(Fd),
++    BigBin = list_to_binary(lists:duplicate(100000, 0)),
++    {ok, _, _} = couch_file:append_binary(Fd, BigBin),
++    ok = couch_file:truncate(Fd, Size),
++    ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, 0)).
++
++
++header_test_() ->
++    {
++        "File header read/write tests",
++        [
++            ?foreach([
++                fun should_write_and_read_atom_header/1,
++                fun should_write_and_read_tuple_header/1,
++                fun should_write_and_read_second_header/1,
++                fun should_truncate_second_header/1,
++                fun should_produce_same_file_size_on_rewrite/1,
++                fun should_save_headers_larger_than_block_size/1
++            ]),
++            should_recover_header_marker_corruption(),
++            should_recover_header_size_corruption(),
++            should_recover_header_md5sig_corruption(),
++            should_recover_header_data_corruption()
++        ]
++    }.
++
++
++should_write_and_read_atom_header(Fd) ->
++    ok = couch_file:write_header(Fd, hello),
++    ?_assertMatch({ok, hello}, couch_file:read_header(Fd)).
++
++should_write_and_read_tuple_header(Fd) ->
++    ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
++    ?_assertMatch({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd)).
++
++should_write_and_read_second_header(Fd) ->
++    ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
++    ok = couch_file:write_header(Fd, [foo, <<"more">>]),
++    ?_assertMatch({ok, [foo, <<"more">>]}, couch_file:read_header(Fd)).
++
++should_truncate_second_header(Fd) ->
++    ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
++    {ok, Size} = couch_file:bytes(Fd),
++    ok = couch_file:write_header(Fd, [foo, <<"more">>]),
++    ok = couch_file:truncate(Fd, Size),
++    ?_assertMatch({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd)).
++
++should_produce_same_file_size_on_rewrite(Fd) ->
++    ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
++    {ok, Size1} = couch_file:bytes(Fd),
++    ok = couch_file:write_header(Fd, [foo, <<"more">>]),
++    {ok, Size2} = couch_file:bytes(Fd),
++    ok = couch_file:truncate(Fd, Size1),
++    ok = couch_file:write_header(Fd, [foo, <<"more">>]),
++    ?_assertMatch({ok, Size2}, couch_file:bytes(Fd)).
++
++should_save_headers_larger_than_block_size(Fd) ->
++    Header = erlang:make_tuple(5000, <<"CouchDB">>),
++    couch_file:write_header(Fd, Header),
++    {"COUCHDB-1319", ?_assertMatch({ok, Header}, couch_file:read_header(Fd))}.
++
++
++should_recover_header_marker_corruption() ->
++    ?_assertMatch(
++        ok,
++        check_header_recovery(
++            fun(CouchFd, RawFd, Expect, HeaderPos) ->
++                ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
++                file:pwrite(RawFd, HeaderPos, <<0>>),
++                ?assertMatch(Expect, couch_file:read_header(CouchFd))
++            end)
++    ).
++
++should_recover_header_size_corruption() ->
++    ?_assertMatch(
++        ok,
++        check_header_recovery(
++            fun(CouchFd, RawFd, Expect, HeaderPos) ->
++                ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
++                % +1 for 0x1 byte marker
++                file:pwrite(RawFd, HeaderPos + 1, <<10/integer>>),
++                ?assertMatch(Expect, couch_file:read_header(CouchFd))
++            end)
++    ).
++
++should_recover_header_md5sig_corruption() ->
++    ?_assertMatch(
++        ok,
++        check_header_recovery(
++            fun(CouchFd, RawFd, Expect, HeaderPos) ->
++                ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
++                % +5 = +1 for 0x1 byte and +4 for term size.
++                file:pwrite(RawFd, HeaderPos + 5, <<"F01034F88D320B22">>),
++                ?assertMatch(Expect, couch_file:read_header(CouchFd))
++            end)
++    ).
++
++should_recover_header_data_corruption() ->
++    ?_assertMatch(
++        ok,
++        check_header_recovery(
++            fun(CouchFd, RawFd, Expect, HeaderPos) ->
++                ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
++                % +21 = +1 for 0x1 byte, +4 for term size and +16 for MD5 sig
++                file:pwrite(RawFd, HeaderPos + 21, <<"some data goes here!">>),
++                ?assertMatch(Expect, couch_file:read_header(CouchFd))
++            end)
++    ).
++
++
++check_header_recovery(CheckFun) ->
++    Path = ?tempfile(),
++    {ok, Fd} = couch_file:open(Path, [create, overwrite]),
++    {ok, RawFd} = file:open(Path, [read, write, raw, binary]),
++
++    {ok, _} = write_random_data(Fd),
++    ExpectHeader = {some_atom, <<"a binary">>, 756},
++    ok = couch_file:write_header(Fd, ExpectHeader),
++
++    {ok, HeaderPos} = write_random_data(Fd),
++    ok = couch_file:write_header(Fd, {2342, <<"corruption! greed!">>}),
++
++    CheckFun(Fd, RawFd, {ok, ExpectHeader}, HeaderPos),
++
++    ok = file:close(RawFd),
++    ok = couch_file:close(Fd),
++    ok.
++
++write_random_data(Fd) ->
++    write_random_data(Fd, 100 + random:uniform(1000)).
++
++write_random_data(Fd, 0) ->
++    {ok, Bytes} = couch_file:bytes(Fd),
++    {ok, (1 + Bytes div ?BLOCK_SIZE) * ?BLOCK_SIZE};
++write_random_data(Fd, N) ->
++    Choices = [foo, bar, <<"bizzingle">>, "bank", ["rough", stuff]],
++    Term = lists:nth(random:uniform(4) + 1, Choices),
++    {ok, _, _} = couch_file:append_term(Fd, Term),
++    write_random_data(Fd, N - 1).
+diff --git a/test/couchdb/couch_key_tree_tests.erl b/test/couchdb/couch_key_tree_tests.erl
+new file mode 100644
+index 0000000..753ecc4
+--- /dev/null
++++ b/test/couchdb/couch_key_tree_tests.erl
+@@ -0,0 +1,380 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_key_tree_tests).
++
++-include("couch_eunit.hrl").
++
++-define(DEPTH, 10).
++
++
++key_tree_merge_test_()->
++    {
++        "Key tree merge",
++        [
++            should_merge_with_empty_tree(),
++            should_merge_reflexive(),
++            should_merge_prefix_of_a_tree_with_tree(),
++            should_produce_conflict_on_merge_with_unrelated_branch(),
++            should_merge_reflexive_for_child_nodes(),
++            should_merge_tree_to_itself(),
++            should_merge_tree_of_odd_length(),
++            should_merge_tree_with_stem(),
++            should_merge_with_stem_at_deeper_level(),
++            should_merge_with_stem_at_deeper_level_with_deeper_paths(),
++            should_merge_single_tree_with_deeper_stem(),
++            should_merge_tree_with_large_stem(),
++            should_merge_stems(),
++            should_create_conflicts_on_merge(),
++            should_create_no_conflicts_on_merge(),
++            should_ignore_conflicting_branch()
++        ]
++    }.
++
++key_tree_missing_leaves_test_()->
++    {
++        "Missing tree leaves",
++        [
++            should_not_find_missing_leaves(),
++            should_find_missing_leaves()
++        ]
++    }.
++
++key_tree_remove_leaves_test_()->
++    {
++        "Remove tree leaves",
++        [
++            should_have_no_effect_on_removing_no_leaves(),
++            should_have_no_effect_on_removing_non_existant_branch(),
++            should_remove_leaf(),
++            should_produce_empty_tree_on_removing_all_leaves(),
++            should_have_no_effect_on_removing_non_existant_node(),
++            should_produce_empty_tree_on_removing_last_leaf()
++        ]
++    }.
++
++key_tree_get_leaves_test_()->
++    {
++        "Leaves retrieving",
++        [
++            should_extract_subtree(),
++            should_extract_subsubtree(),
++            should_gather_non_existant_leaf(),
++            should_gather_leaf(),
++            shoul_gather_multiple_leaves(),
++            should_retrieve_full_key_path(),
++            should_retrieve_full_key_path_for_node(),
++            should_retrieve_leaves_with_parent_node(),
++            should_retrieve_all_leaves()
++        ]
++    }.
++
++key_tree_leaf_counting_test_()->
++    {
++        "Leaf counting",
++        [
++            should_have_no_leaves_for_empty_tree(),
++            should_have_single_leaf_for_tree_with_single_node(),
++            should_have_two_leaves_for_tree_with_chindler_siblings(),
++            should_not_affect_on_leaf_counting_for_stemmed_tree()
++        ]
++    }.
++
++key_tree_stemming_test_()->
++    {
++        "Stemming",
++        [
++            should_have_no_effect_for_stemming_more_levels_than_exists(),
++            should_return_one_deepest_node(),
++            should_return_two_deepest_nodes()
++        ]
++    }.
++
++
++should_merge_with_empty_tree()->
++    One = {1, {"1","foo",[]}},
++    ?_assertEqual({[One], no_conflicts},
++                  couch_key_tree:merge([], One, ?DEPTH)).
++
++should_merge_reflexive()->
++    One = {1, {"1","foo",[]}},
++    ?_assertEqual({[One], no_conflicts},
++                  couch_key_tree:merge([One], One, ?DEPTH)).
++
++should_merge_prefix_of_a_tree_with_tree()->
++    One = {1, {"1","foo",[]}},
++    TwoSibs = [{1, {"1","foo",[]}},
++               {1, {"2","foo",[]}}],
++    ?_assertEqual({TwoSibs, no_conflicts},
++                  couch_key_tree:merge(TwoSibs, One, ?DEPTH)).
++
++should_produce_conflict_on_merge_with_unrelated_branch()->
++    TwoSibs = [{1, {"1","foo",[]}},
++               {1, {"2","foo",[]}}],
++    Three = {1, {"3","foo",[]}},
++    ThreeSibs = [{1, {"1","foo",[]}},
++                 {1, {"2","foo",[]}},
++                 {1, {"3","foo",[]}}],
++    ?_assertEqual({ThreeSibs, conflicts},
++                  couch_key_tree:merge(TwoSibs, Three, ?DEPTH)).
++
++should_merge_reflexive_for_child_nodes()->
++    TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
++    ?_assertEqual({[TwoChild], no_conflicts},
++                  couch_key_tree:merge([TwoChild], TwoChild, ?DEPTH)).
++
++should_merge_tree_to_itself()->
++    TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
++                                    {"1b", "bar", []}]}},
++    ?_assertEqual({[TwoChildSibs], no_conflicts},
++                  couch_key_tree:merge([TwoChildSibs], TwoChildSibs, ?DEPTH)).
++
++should_merge_tree_of_odd_length()->
++    TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
++    TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
++                                    {"1b", "bar", []}]}},
++    TwoChildPlusSibs = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]},
++                                        {"1b", "bar", []}]}},
++
++    ?_assertEqual({[TwoChildPlusSibs], no_conflicts},
++                  couch_key_tree:merge([TwoChild], TwoChildSibs, ?DEPTH)).
++
++should_merge_tree_with_stem()->
++    Stemmed = {2, {"1a", "bar", []}},
++    TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
++                                    {"1b", "bar", []}]}},
++
++    ?_assertEqual({[TwoChildSibs], no_conflicts},
++                  couch_key_tree:merge([TwoChildSibs], Stemmed, ?DEPTH)).
++
++should_merge_with_stem_at_deeper_level()->
++    Stemmed = {3, {"1bb", "boo", []}},
++    TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
++                                    {"1b", "bar", [{"1bb", "boo", []}]}]}},
++    ?_assertEqual({[TwoChildSibs], no_conflicts},
++                  couch_key_tree:merge([TwoChildSibs], Stemmed, ?DEPTH)).
++
++should_merge_with_stem_at_deeper_level_with_deeper_paths()->
++    Stemmed = {3, {"1bb", "boo", []}},
++    StemmedTwoChildSibs = [{2,{"1a", "bar", []}},
++                           {2,{"1b", "bar", [{"1bb", "boo", []}]}}],
++    ?_assertEqual({StemmedTwoChildSibs, no_conflicts},
++                  couch_key_tree:merge(StemmedTwoChildSibs, Stemmed, ?DEPTH)).
++
++should_merge_single_tree_with_deeper_stem()->
++    Stemmed = {3, {"1aa", "bar", []}},
++    TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
++    ?_assertEqual({[TwoChild], no_conflicts},
++                  couch_key_tree:merge([TwoChild], Stemmed, ?DEPTH)).
++
++should_merge_tree_with_large_stem()->
++    Stemmed = {2, {"1a", "bar", [{"1aa", "bar", []}]}},
++    TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
++    ?_assertEqual({[TwoChild], no_conflicts},
++                  couch_key_tree:merge([TwoChild], Stemmed, ?DEPTH)).
++
++should_merge_stems()->
++    StemmedA = {2, {"1a", "bar", [{"1aa", "bar", []}]}},
++    StemmedB = {3, {"1aa", "bar", []}},
++    ?_assertEqual({[StemmedA], no_conflicts},
++                  couch_key_tree:merge([StemmedA], StemmedB, ?DEPTH)).
++
++should_create_conflicts_on_merge()->
++    OneChild = {1, {"1","foo",[{"1a", "bar", []}]}},
++    Stemmed = {3, {"1aa", "bar", []}},
++    ?_assertEqual({[OneChild, Stemmed], conflicts},
++                  couch_key_tree:merge([OneChild], Stemmed, ?DEPTH)).
++
++should_create_no_conflicts_on_merge()->
++    OneChild = {1, {"1","foo",[{"1a", "bar", []}]}},
++    Stemmed = {3, {"1aa", "bar", []}},
++    TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
++    ?_assertEqual({[TwoChild], no_conflicts},
++                  couch_key_tree:merge([OneChild, Stemmed], TwoChild, ?DEPTH)).
++
++should_ignore_conflicting_branch()->
++    %% this test is based on couch-902-test-case2.py
++    %% foo has conflicts from replication at depth two
++    %% foo3 is the current value
++    Foo = {1, {"foo",
++               "val1",
++               [{"foo2","val2",[]},
++                {"foo3", "val3", []}
++               ]}},
++    %% foo now has an attachment added, which leads to foo4 and val4
++    %% off foo3
++    Bar = {1, {"foo",
++               [],
++               [{"foo3",
++                 [],
++                 [{"foo4","val4",[]}
++                  ]}]}},
++    %% this is what the merge returns
++    %% note that it ignore the conflicting branch as there's no match
++    FooBar = {1, {"foo",
++               "val1",
++               [{"foo2","val2",[]},
++                {"foo3", "val3", [{"foo4","val4",[]}]}
++               ]}},
++    {
++        "COUCHDB-902",
++        ?_assertEqual({[FooBar], no_conflicts},
++                      couch_key_tree:merge([Foo], Bar, ?DEPTH))
++    }.
++
++should_not_find_missing_leaves()->
++    TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
++    ?_assertEqual([],
++                  couch_key_tree:find_missing(TwoChildSibs,
++                                              [{0,"1"}, {1,"1a"}])).
++
++should_find_missing_leaves()->
++    Stemmed1 = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
++    Stemmed2 = [{2, {"1aa", "bar", []}}],
++    TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
++    [
++        ?_assertEqual(
++            [{0, "10"}, {100, "x"}],
++            couch_key_tree:find_missing(
++                TwoChildSibs,
++                [{0,"1"}, {0, "10"}, {1,"1a"}, {100, "x"}])),
++        ?_assertEqual(
++            [{0, "1"}, {100, "x"}],
++            couch_key_tree:find_missing(
++                Stemmed1,
++                [{0,"1"}, {1,"1a"}, {100, "x"}])),
++        ?_assertEqual(
++            [{0, "1"}, {1,"1a"}, {100, "x"}],
++            couch_key_tree:find_missing(
++                Stemmed2,
++                [{0,"1"}, {1,"1a"}, {100, "x"}]))
++    ].
++
++should_have_no_effect_on_removing_no_leaves()->
++    TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
++    ?_assertEqual({TwoChildSibs, []},
++                  couch_key_tree:remove_leafs(TwoChildSibs,
++                                              [])).
++
++should_have_no_effect_on_removing_non_existant_branch()->
++    TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
++    ?_assertEqual({TwoChildSibs, []},
++                  couch_key_tree:remove_leafs(TwoChildSibs,
++                                              [{0, "1"}])).
++
++should_remove_leaf()->
++    OneChild = [{0, {"1","foo",[{"1a", "bar", []}]}}],
++    TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
++    ?_assertEqual({OneChild, [{1, "1b"}]},
++                  couch_key_tree:remove_leafs(TwoChildSibs,
++                                              [{1, "1b"}])).
++
++should_produce_empty_tree_on_removing_all_leaves()->
++    TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
++    ?_assertEqual({[], [{1, "1b"}, {1, "1a"}]},
++                  couch_key_tree:remove_leafs(TwoChildSibs,
++                                              [{1, "1b"}, {1, "1a"}])).
++
++should_have_no_effect_on_removing_non_existant_node()->
++    Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
++    ?_assertEqual({Stemmed, []},
++                  couch_key_tree:remove_leafs(Stemmed,
++                                              [{1, "1a"}])).
++
++should_produce_empty_tree_on_removing_last_leaf()->
++    Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
++    ?_assertEqual({[], [{2, "1aa"}]},
++                  couch_key_tree:remove_leafs(Stemmed,
++                                              [{2, "1aa"}])).
++
++should_extract_subtree()->
++    TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
++    ?_assertEqual({[{"foo", {0, ["1"]}}],[]},
++                  couch_key_tree:get(TwoChildSibs, [{0, "1"}])).
++
++should_extract_subsubtree()->
++    TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
++    ?_assertEqual({[{"bar", {1, ["1a", "1"]}}],[]},
++                  couch_key_tree:get(TwoChildSibs, [{1, "1a"}])).
++
++should_gather_non_existant_leaf()->
++    TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
++    ?_assertEqual({[],[{0, "x"}]},
++                  couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "x"}])).
++
++should_gather_leaf()->
++    TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
++    ?_assertEqual({[{"bar", {1, ["1a","1"]}}],[]},
++                  couch_key_tree:get_key_leafs(TwoChildSibs, [{1, "1a"}])).
++
++shoul_gather_multiple_leaves()->
++    TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
++    ?_assertEqual({[{"bar", {1, ["1a","1"]}},{"bar",{1, ["1b","1"]}}],[]},
++                  couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "1"}])).
++
++should_retrieve_full_key_path()->
++    TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
++    ?_assertEqual({[{0,[{"1", "foo"}]}],[]},
++                  couch_key_tree:get_full_key_paths(TwoChildSibs, [{0, "1"}])).
++
++should_retrieve_full_key_path_for_node()->
++    TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
++    ?_assertEqual({[{1,[{"1a", "bar"},{"1", "foo"}]}],[]},
++                  couch_key_tree:get_full_key_paths(TwoChildSibs, [{1, "1a"}])).
++
++should_retrieve_leaves_with_parent_node()->
++    Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
++    TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
++    [
++        ?_assertEqual([{2, [{"1aa", "bar"},{"1a", "bar"}]}],
++                      couch_key_tree:get_all_leafs_full(Stemmed)),
++        ?_assertEqual([{1, [{"1a", "bar"},{"1", "foo"}]},
++                       {1, [{"1b", "bar"},{"1", "foo"}]}],
++                      couch_key_tree:get_all_leafs_full(TwoChildSibs))
++    ].
++
++should_retrieve_all_leaves()->
++    Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
++    TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
++    [
++        ?_assertEqual([{"bar", {2, ["1aa","1a"]}}],
++                      couch_key_tree:get_all_leafs(Stemmed)),
++        ?_assertEqual([{"bar", {1, ["1a", "1"]}}, {"bar", {1, ["1b","1"]}}],
++                      couch_key_tree:get_all_leafs(TwoChildSibs))
++    ].
++
++should_have_no_leaves_for_empty_tree()->
++    ?_assertEqual(0, couch_key_tree:count_leafs([])).
++
++should_have_single_leaf_for_tree_with_single_node()->
++    ?_assertEqual(1, couch_key_tree:count_leafs([{0, {"1","foo",[]}}])).
++
++should_have_two_leaves_for_tree_with_chindler_siblings()->
++    TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
++    ?_assertEqual(2, couch_key_tree:count_leafs(TwoChildSibs)).
++
++should_not_affect_on_leaf_counting_for_stemmed_tree()->
++    ?_assertEqual(1, couch_key_tree:count_leafs([{2, {"1bb", "boo", []}}])).
++
++should_have_no_effect_for_stemming_more_levels_than_exists()->
++    TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
++    ?_assertEqual(TwoChild, couch_key_tree:stem(TwoChild, 3)).
++
++should_return_one_deepest_node()->
++    TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
++    Stemmed = [{2, {"1aa", "bar", []}}],
++    ?_assertEqual(Stemmed, couch_key_tree:stem(TwoChild, 1)).
++
++should_return_two_deepest_nodes()->
++    TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
++    Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
++    ?_assertEqual(Stemmed, couch_key_tree:stem(TwoChild, 2)).
+diff --git a/test/couchdb/couch_passwords_tests.erl b/test/couchdb/couch_passwords_tests.erl
+new file mode 100644
+index 0000000..116265c
+--- /dev/null
++++ b/test/couchdb/couch_passwords_tests.erl
+@@ -0,0 +1,54 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_passwords_tests).
++
++-include("couch_eunit.hrl").
++
++
++pbkdf2_test_()->
++    {"PBKDF2",
++     [
++         {"Iterations: 1, length: 20",
++          ?_assertEqual(
++              {ok, <<"0c60c80f961f0e71f3a9b524af6012062fe037a6">>},
++              couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 1, 20))},
++
++         {"Iterations: 2, length: 20",
++          ?_assertEqual(
++              {ok, <<"ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957">>},
++              couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 2, 20))},
++
++         {"Iterations: 4096, length: 20",
++          ?_assertEqual(
++              {ok, <<"4b007901b765489abead49d926f721d065a429c1">>},
++              couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 4096, 20))},
++
++         {"Iterations: 4096, length: 25",
++          ?_assertEqual(
++              {ok, <<"3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038">>},
++              couch_passwords:pbkdf2(<<"passwordPASSWORDpassword">>,
++                                     <<"saltSALTsaltSALTsaltSALTsaltSALTsalt">>,
++                                     4096, 25))},
++         {"Null byte",
++          ?_assertEqual(
++              {ok, <<"56fa6aa75548099dcc37d7f03425e0c3">>},
++              couch_passwords:pbkdf2(<<"pass\0word">>,
++                                     <<"sa\0lt">>,
++                                     4096, 16))},
++
++         {timeout, 180,  %% this may runs too long on slow hosts
++          {"Iterations: 16777216 - this may take some time",
++           ?_assertEqual(
++               {ok, <<"eefe3d61cd4da4e4e9945b3d6ba2158c2634e984">>},
++               couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 16777216, 20)
++           )}}]}.
+diff --git a/test/couchdb/couch_ref_counter_tests.erl b/test/couchdb/couch_ref_counter_tests.erl
+new file mode 100644
+index 0000000..b7e97b4
+--- /dev/null
++++ b/test/couchdb/couch_ref_counter_tests.erl
+@@ -0,0 +1,107 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_ref_counter_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(TIMEOUT, 1000).
++
++
++setup() ->
++    {ok, RefCtr} = couch_ref_counter:start([]),
++    ChildPid = spawn(fun() -> loop() end),
++    {RefCtr, ChildPid}.
++
++teardown({_, ChildPid}) ->
++    erlang:monitor(process, ChildPid),
++    ChildPid ! close,
++    wait().
++
++
++couch_ref_counter_test_() ->
++    {
++        "CouchDB reference counter tests",
++        {
++            foreach,
++            fun setup/0, fun teardown/1,
++            [
++                fun should_initialize_with_calling_process_as_referrer/1,
++                fun should_ignore_unknown_pid/1,
++                fun should_increment_counter_on_pid_add/1,
++                fun should_not_increase_counter_on_readding_same_pid/1,
++                fun should_drop_ref_for_double_added_pid/1,
++                fun should_decrement_counter_on_pid_drop/1,
++                fun should_add_after_drop/1,
++                fun should_decrement_counter_on_process_exit/1
++
++            ]
++        }
++    }.
++
++
++should_initialize_with_calling_process_as_referrer({RefCtr, _}) ->
++    ?_assertEqual(1, couch_ref_counter:count(RefCtr)).
++
++should_ignore_unknown_pid({RefCtr, ChildPid}) ->
++    ?_assertEqual(ok, couch_ref_counter:drop(RefCtr, ChildPid)).
++
++should_increment_counter_on_pid_add({RefCtr, ChildPid}) ->
++    couch_ref_counter:add(RefCtr, ChildPid),
++    ?_assertEqual(2, couch_ref_counter:count(RefCtr)).
++
++should_not_increase_counter_on_readding_same_pid({RefCtr, ChildPid}) ->
++    couch_ref_counter:add(RefCtr, ChildPid),
++    couch_ref_counter:add(RefCtr, ChildPid),
++    ?_assertEqual(2, couch_ref_counter:count(RefCtr)).
++
++should_drop_ref_for_double_added_pid({RefCtr, ChildPid}) ->
++    couch_ref_counter:add(RefCtr, ChildPid),
++    couch_ref_counter:add(RefCtr, ChildPid),
++    couch_ref_counter:drop(RefCtr, ChildPid),
++    ?_assertEqual(2, couch_ref_counter:count(RefCtr)).
++
++should_decrement_counter_on_pid_drop({RefCtr, ChildPid}) ->
++    couch_ref_counter:add(RefCtr, ChildPid),
++    couch_ref_counter:drop(RefCtr, ChildPid),
++    ?_assertEqual(1, couch_ref_counter:count(RefCtr)).
++
++should_add_after_drop({RefCtr, ChildPid}) ->
++    couch_ref_counter:add(RefCtr, ChildPid),
++    couch_ref_counter:drop(RefCtr, ChildPid),
++    couch_ref_counter:add(RefCtr, ChildPid),
++    ?_assertEqual(2, couch_ref_counter:count(RefCtr)).
++
++should_decrement_counter_on_process_exit({RefCtr, ChildPid}) ->
++    ?_assertEqual(1,
++        begin
++            couch_ref_counter:add(RefCtr, ChildPid),
++            erlang:monitor(process, ChildPid),
++            ChildPid ! close,
++            wait(),
++            couch_ref_counter:count(RefCtr)
++        end).
++
++
++loop() ->
++    receive
++        close -> ok
++    end.
++
++wait() ->
++    receive
++        {'DOWN', _, _, _, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw(timeout_error)
++    end.
+diff --git a/test/couchdb/couch_stats_tests.erl b/test/couchdb/couch_stats_tests.erl
+new file mode 100644
+index 0000000..d156449
+--- /dev/null
++++ b/test/couchdb/couch_stats_tests.erl
+@@ -0,0 +1,412 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_stats_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(STATS_CFG_FIXTURE,
++    filename:join([?FIXTURESDIR, "couch_stats_aggregates.cfg"])).
++-define(STATS_INI_FIXTURE,
++    filename:join([?FIXTURESDIR, "couch_stats_aggregates.ini"])).
++-define(TIMEOUT, 1000).
++-define(TIMEWAIT, 500).
++
++
++setup_collector() ->
++    couch_stats_collector:start(),
++    ok.
++
++setup_aggregator(_) ->
++    {ok, Pid} = couch_config:start_link([?STATS_INI_FIXTURE]),
++    {ok, _} = couch_stats_collector:start(),
++    {ok, _} = couch_stats_aggregator:start(?STATS_CFG_FIXTURE),
++    Pid.
++
++teardown_collector(_) ->
++    couch_stats_collector:stop(),
++    ok.
++
++teardown_aggregator(_, Pid) ->
++    couch_stats_aggregator:stop(),
++    couch_stats_collector:stop(),
++    erlang:monitor(process, Pid),
++    couch_config:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout, config_stop})
++    end,
++    ok.
++
++
++couch_stats_collector_test_() ->
++    {
++        "CouchDB stats collector tests",
++        {
++            foreach,
++            fun setup_collector/0, fun teardown_collector/1,
++            [
++                should_increment_counter(),
++                should_decrement_counter(),
++                should_increment_and_decrement_counter(),
++                should_record_absolute_values(),
++                should_clear_absolute_values(),
++                should_track_process_count(),
++                should_increment_counter_multiple_times_per_pid(),
++                should_decrement_counter_on_process_exit(),
++                should_decrement_for_each_track_process_count_call_on_exit(),
++                should_return_all_counters_and_absolute_values(),
++                should_return_incremental_counters(),
++                should_return_absolute_values()
++            ]
++        }
++    }.
++
++couch_stats_aggregator_test_() ->
++    Funs = [
++        fun should_init_empty_aggregate/2,
++        fun should_get_empty_aggregate/2,
++        fun should_change_stats_on_values_add/2,
++        fun should_change_stats_for_all_times_on_values_add/2,
++        fun should_change_stats_on_values_change/2,
++        fun should_change_stats_for_all_times_on_values_change/2,
++        fun should_not_remove_data_after_some_time_for_0_sample/2,
++        fun should_remove_data_after_some_time_for_other_samples/2
++    ],
++    {
++        "CouchDB stats aggregator tests",
++        [
++            {
++                "Absolute values",
++                {
++                    foreachx,
++                    fun setup_aggregator/1, fun teardown_aggregator/2,
++                    [{absolute, Fun} || Fun <- Funs]
++                }
++            },
++            {
++                "Counters",
++                {
++                    foreachx,
++                    fun setup_aggregator/1, fun teardown_aggregator/2,
++                    [{counter, Fun} || Fun <- Funs]
++                }
++            }
++        ]
++    }.
++
++
++should_increment_counter() ->
++    ?_assertEqual(100,
++        begin
++            AddCount = fun() -> couch_stats_collector:increment(foo) end,
++            repeat(AddCount, 100),
++            couch_stats_collector:get(foo)
++        end).
++
++should_decrement_counter() ->
++    ?_assertEqual(67,
++        begin
++            AddCount = fun() -> couch_stats_collector:increment(foo) end,
++            RemCount = fun() -> couch_stats_collector:decrement(foo) end,
++            repeat(AddCount, 100),
++            repeat(RemCount, 33),
++            couch_stats_collector:get(foo)
++        end).
++
++should_increment_and_decrement_counter() ->
++    ?_assertEqual(0,
++        begin
++            AddCount = fun() -> couch_stats_collector:increment(foo) end,
++            RemCount = fun() -> couch_stats_collector:decrement(foo) end,
++            repeat(AddCount, 100),
++            repeat(RemCount, 25),
++            repeat(AddCount, 10),
++            repeat(RemCount, 5),
++            repeat(RemCount, 80),
++            couch_stats_collector:get(foo)
++        end).
++
++should_record_absolute_values() ->
++    ?_assertEqual(lists:seq(1, 15),
++        begin
++            lists:map(fun(Val) ->
++                couch_stats_collector:record(bar, Val)
++            end, lists:seq(1, 15)),
++            couch_stats_collector:get(bar)
++        end).
++
++should_clear_absolute_values() ->
++    ?_assertEqual(nil,
++        begin
++            lists:map(fun(Val) ->
++                couch_stats_collector:record(bar, Val)
++            end, lists:seq(1, 15)),
++            couch_stats_collector:clear(bar),
++            couch_stats_collector:get(bar)
++        end).
++
++should_track_process_count() ->
++    ?_assertMatch({_, 1}, spawn_and_count(1)).
++
++should_increment_counter_multiple_times_per_pid() ->
++    ?_assertMatch({_, 3}, spawn_and_count(3)).
++
++should_decrement_counter_on_process_exit() ->
++    ?_assertEqual(2,
++        begin
++            {Pid, 1} = spawn_and_count(1),
++            spawn_and_count(2),
++            RefMon = erlang:monitor(process, Pid),
++            Pid ! sepuku,
++            receive
++                {'DOWN', RefMon, _, _, _} -> ok
++            after ?TIMEOUT ->
++                throw(timeout)
++            end,
++            % sleep for awhile to let collector handle the updates
++            % suddenly, it couldn't notice process death instantly
++            timer:sleep(?TIMEWAIT),
++            couch_stats_collector:get(hoopla)
++        end).
++
++should_decrement_for_each_track_process_count_call_on_exit() ->
++    ?_assertEqual(2,
++        begin
++            {_, 2} = spawn_and_count(2),
++            {Pid, 6} = spawn_and_count(4),
++            RefMon = erlang:monitor(process, Pid),
++            Pid ! sepuku,
++            receive
++                {'DOWN', RefMon, _, _, _} -> ok
++            after ?TIMEOUT ->
++                throw(timeout)
++            end,
++            timer:sleep(?TIMEWAIT),
++            couch_stats_collector:get(hoopla)
++        end).
++
++should_return_all_counters_and_absolute_values() ->
++    ?_assertEqual([{bar,[1.0,0.0]}, {foo,1}],
++        begin
++            couch_stats_collector:record(bar, 0.0),
++            couch_stats_collector:record(bar, 1.0),
++            couch_stats_collector:increment(foo),
++            lists:sort(couch_stats_collector:all())
++        end).
++
++should_return_incremental_counters() ->
++    ?_assertEqual([{foo,1}],
++        begin
++            couch_stats_collector:record(bar, 0.0),
++            couch_stats_collector:record(bar, 1.0),
++            couch_stats_collector:increment(foo),
++            lists:sort(couch_stats_collector:all(incremental))
++        end).
++
++should_return_absolute_values() ->
++    ?_assertEqual([{bar,[1.0,0.0]}, {zing, "Z"}],
++        begin
++            couch_stats_collector:record(bar, 0.0),
++            couch_stats_collector:record(bar, 1.0),
++            couch_stats_collector:record(zing, 90),
++            couch_stats_collector:increment(foo),
++            lists:sort(couch_stats_collector:all(absolute))
++        end).
++
++should_init_empty_aggregate(absolute, _) ->
++    {Aggs} = couch_stats_aggregator:all(),
++    ?_assertEqual({[{'11', make_agg(<<"randomosity">>,
++                                    null, null, null, null, null)}]},
++                  couch_util:get_value(number, Aggs));
++should_init_empty_aggregate(counter, _) ->
++    {Aggs} = couch_stats_aggregator:all(),
++    ?_assertEqual({[{stuff, make_agg(<<"yay description">>,
++                                     null, null, null, null, null)}]},
++                  couch_util:get_value(testing, Aggs)).
++
++should_get_empty_aggregate(absolute, _) ->
++    ?_assertEqual(make_agg(<<"randomosity">>, null, null, null, null, null),
++             couch_stats_aggregator:get_json({number, '11'}));
++should_get_empty_aggregate(counter, _) ->
++    ?_assertEqual(make_agg(<<"yay description">>, null, null, null, null, null),
++             couch_stats_aggregator:get_json({testing, stuff})).
++
++should_change_stats_on_values_add(absolute, _) ->
++    lists:foreach(fun(X) ->
++        couch_stats_collector:record({number, 11}, X)
++    end, lists:seq(0, 10)),
++    couch_stats_aggregator:collect_sample(),
++    ?_assertEqual(make_agg(<<"randomosity">>, 5.0, 5.0, null, 5.0, 5.0),
++                  couch_stats_aggregator:get_json({number, 11}));
++should_change_stats_on_values_add(counter, _) ->
++    lists:foreach(fun(_) ->
++        couch_stats_collector:increment({testing, stuff})
++    end, lists:seq(1, 100)),
++    couch_stats_aggregator:collect_sample(),
++    ?_assertEqual(make_agg(<<"yay description">>, 100.0, 100.0, null, 100, 100),
++                  couch_stats_aggregator:get_json({testing, stuff})).
++
++should_change_stats_for_all_times_on_values_add(absolute, _) ->
++    lists:foreach(fun(X) ->
++        couch_stats_collector:record({number, 11}, X)
++    end, lists:seq(0, 10)),
++    couch_stats_aggregator:collect_sample(),
++    ?_assertEqual(make_agg(<<"randomosity">>, 5.0, 5.0, null, 5.0, 5.0),
++                  couch_stats_aggregator:get_json({number, 11}, 1));
++should_change_stats_for_all_times_on_values_add(counter, _) ->
++    lists:foreach(fun(_) ->
++        couch_stats_collector:increment({testing, stuff})
++    end, lists:seq(1, 100)),
++    couch_stats_aggregator:collect_sample(),
++    ?_assertEqual(make_agg(<<"yay description">>, 100.0, 100.0, null, 100, 100),
++                  couch_stats_aggregator:get_json({testing, stuff}, 1)).
++
++should_change_stats_on_values_change(absolute, _) ->
++    ?_assertEqual(make_agg(<<"randomosity">>, 20.0, 10.0, 7.071, 5.0, 15.0),
++        begin
++            lists:foreach(fun(X) ->
++                couch_stats_collector:record({number, 11}, X)
++            end, lists:seq(0, 10)),
++            couch_stats_aggregator:collect_sample(),
++            timer:sleep(?TIMEWAIT),
++            couch_stats_collector:record({number, 11}, 15),
++            couch_stats_aggregator:collect_sample(),
++            couch_stats_aggregator:get_json({number, 11})
++        end);
++should_change_stats_on_values_change(counter, _) ->
++    ?_assertEqual(make_agg(<<"yay description">>, 100.0, 50.0, 70.711, 0, 100),
++        begin
++            lists:foreach(fun(_) ->
++                couch_stats_collector:increment({testing, stuff})
++            end, lists:seq(1, 100)),
++            couch_stats_aggregator:collect_sample(),
++            timer:sleep(?TIMEWAIT),
++            couch_stats_aggregator:collect_sample(),
++            couch_stats_aggregator:get_json({testing, stuff})
++        end).
++
++should_change_stats_for_all_times_on_values_change(absolute, _) ->
++    ?_assertEqual(make_agg(<<"randomosity">>, 20.0, 10.0, 7.071, 5.0, 15.0),
++        begin
++            lists:foreach(fun(X) ->
++                couch_stats_collector:record({number, 11}, X)
++            end, lists:seq(0, 10)),
++            couch_stats_aggregator:collect_sample(),
++            timer:sleep(?TIMEWAIT),
++            couch_stats_collector:record({number, 11}, 15),
++            couch_stats_aggregator:collect_sample(),
++            couch_stats_aggregator:get_json({number, 11}, 1)
++        end);
++should_change_stats_for_all_times_on_values_change(counter, _) ->
++    ?_assertEqual(make_agg(<<"yay description">>, 100.0, 50.0, 70.711, 0, 100),
++        begin
++            lists:foreach(fun(_) ->
++                couch_stats_collector:increment({testing, stuff})
++            end, lists:seq(1, 100)),
++            couch_stats_aggregator:collect_sample(),
++            timer:sleep(?TIMEWAIT),
++            couch_stats_aggregator:collect_sample(),
++            couch_stats_aggregator:get_json({testing, stuff}, 1)
++        end).
++
++should_not_remove_data_after_some_time_for_0_sample(absolute, _) ->
++    ?_assertEqual(make_agg(<<"randomosity">>, 20.0, 10.0, 7.071, 5.0, 15.0),
++        begin
++            lists:foreach(fun(X) ->
++                couch_stats_collector:record({number, 11}, X)
++            end, lists:seq(0, 10)),
++            couch_stats_aggregator:collect_sample(),
++            timer:sleep(?TIMEWAIT),
++            couch_stats_collector:record({number, 11}, 15),
++            couch_stats_aggregator:collect_sample(),
++            timer:sleep(?TIMEWAIT),
++            couch_stats_aggregator:collect_sample(),
++            couch_stats_aggregator:get_json({number, 11})
++        end);
++should_not_remove_data_after_some_time_for_0_sample(counter, _) ->
++    ?_assertEqual(make_agg(<<"yay description">>, 100.0, 33.333, 57.735, 0, 100),
++        begin
++            lists:foreach(fun(_) ->
++                couch_stats_collector:increment({testing, stuff})
++            end, lists:seq(1, 100)),
++            couch_stats_aggregator:collect_sample(),
++            timer:sleep(?TIMEWAIT),
++            couch_stats_aggregator:collect_sample(),
++            timer:sleep(?TIMEWAIT),
++            couch_stats_aggregator:collect_sample(),
++            couch_stats_aggregator:get_json({testing, stuff})
++        end).
++
++should_remove_data_after_some_time_for_other_samples(absolute, _) ->
++    ?_assertEqual(make_agg(<<"randomosity">>, 15.0, 15.0, null, 15.0, 15.0),
++        begin
++            lists:foreach(fun(X) ->
++                couch_stats_collector:record({number, 11}, X)
++            end, lists:seq(0, 10)),
++            couch_stats_aggregator:collect_sample(),
++            timer:sleep(?TIMEWAIT),
++            couch_stats_collector:record({number, 11}, 15),
++            couch_stats_aggregator:collect_sample(),
++            timer:sleep(?TIMEWAIT),
++            couch_stats_aggregator:collect_sample(),
++            couch_stats_aggregator:get_json({number, 11}, 1)
++        end);
++should_remove_data_after_some_time_for_other_samples(counter, _) ->
++    ?_assertEqual(make_agg(<<"yay description">>, 0, 0.0, 0.0, 0, 0),
++        begin
++            lists:foreach(fun(_) ->
++                couch_stats_collector:increment({testing, stuff})
++            end, lists:seq(1, 100)),
++            couch_stats_aggregator:collect_sample(),
++            timer:sleep(?TIMEWAIT),
++            couch_stats_aggregator:collect_sample(),
++            timer:sleep(?TIMEWAIT),
++            couch_stats_aggregator:collect_sample(),
++            couch_stats_aggregator:get_json({testing, stuff}, 1)
++        end).
++
++
++spawn_and_count(N) ->
++    Self = self(),
++    Pid = spawn(fun() ->
++        lists:foreach(
++            fun(_) ->
++                couch_stats_collector:track_process_count(hoopla)
++            end, lists:seq(1,N)),
++        Self ! reporting,
++        receive
++            sepuku -> ok
++        end
++    end),
++    receive reporting -> ok end,
++    {Pid, couch_stats_collector:get(hoopla)}.
++
++repeat(_, 0) ->
++    ok;
++repeat(Fun, Count) ->
++    Fun(),
++    repeat(Fun, Count-1).
++
++make_agg(Desc, Sum, Mean, StdDev, Min, Max) ->
++    {[
++        {description, Desc},
++        {current, Sum},
++        {sum, Sum},
++        {mean, Mean},
++        {stddev, StdDev},
++        {min, Min},
++        {max, Max}
++    ]}.
+diff --git a/test/couchdb/couch_stream_tests.erl b/test/couchdb/couch_stream_tests.erl
+new file mode 100644
+index 0000000..335a2fe
+--- /dev/null
++++ b/test/couchdb/couch_stream_tests.erl
+@@ -0,0 +1,100 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_stream_tests).
++
++-include("couch_eunit.hrl").
++
++
++setup() ->
++    {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
++    {ok, Stream} = couch_stream:open(Fd),
++    {Fd, Stream}.
++
++teardown({Fd, _}) ->
++    ok = couch_file:close(Fd).
++
++
++stream_test_() ->
++    {
++        "CouchDB stream tests",
++        {
++            foreach,
++            fun setup/0, fun teardown/1,
++            [
++                fun should_write/1,
++                fun should_write_consecutive/1,
++                fun should_write_empty_binary/1,
++                fun should_return_file_pointers_on_close/1,
++                fun should_return_stream_size_on_close/1,
++                fun should_return_valid_pointers/1,
++                fun should_recall_last_pointer_position/1,
++                fun should_stream_more_with_4K_chunk_size/1
++            ]
++        }
++    }.
++
++
++should_write({_, Stream}) ->
++    ?_assertEqual(ok, couch_stream:write(Stream, <<"food">>)).
++
++should_write_consecutive({_, Stream}) ->
++    couch_stream:write(Stream, <<"food">>),
++    ?_assertEqual(ok, couch_stream:write(Stream, <<"foob">>)).
++
++should_write_empty_binary({_, Stream}) ->
++    ?_assertEqual(ok, couch_stream:write(Stream, <<>>)).
++
++should_return_file_pointers_on_close({_, Stream}) ->
++    couch_stream:write(Stream, <<"foodfoob">>),
++    {Ptrs, _, _, _, _} = couch_stream:close(Stream),
++    ?_assertEqual([{0, 8}], Ptrs).
++
++should_return_stream_size_on_close({_, Stream}) ->
++    couch_stream:write(Stream, <<"foodfoob">>),
++    {_, Length, _, _, _} = couch_stream:close(Stream),
++    ?_assertEqual(8, Length).
++
++should_return_valid_pointers({Fd, Stream}) ->
++    couch_stream:write(Stream, <<"foodfoob">>),
++    {Ptrs, _, _, _, _} = couch_stream:close(Stream),
++    ?_assertEqual(<<"foodfoob">>, read_all(Fd, Ptrs)).
++
++should_recall_last_pointer_position({Fd, Stream}) ->
++    couch_stream:write(Stream, <<"foodfoob">>),
++    {_, _, _, _, _} = couch_stream:close(Stream),
++    {ok, ExpPtr} = couch_file:bytes(Fd),
++    {ok, Stream2} = couch_stream:open(Fd),
++    ZeroBits = <<0:(8 * 10)>>,
++    OneBits = <<1:(8 * 10)>>,
++    ok = couch_stream:write(Stream2, OneBits),
++    ok = couch_stream:write(Stream2, ZeroBits),
++    {Ptrs, 20, _, _, _} = couch_stream:close(Stream2),
++    [{ExpPtr, 20}] = Ptrs,
++    AllBits = iolist_to_binary([OneBits, ZeroBits]),
++    ?_assertEqual(AllBits, read_all(Fd, Ptrs)).
++
++should_stream_more_with_4K_chunk_size({Fd, _}) ->
++    {ok, Stream} = couch_stream:open(Fd, [{buffer_size, 4096}]),
++    lists:foldl(
++        fun(_, Acc) ->
++            Data = <<"a1b2c">>,
++            couch_stream:write(Stream, Data),
++            [Data | Acc]
++        end, [], lists:seq(1, 1024)),
++    ?_assertMatch({[{0, 4100}, {4106, 1020}], 5120, _, _, _},
++                  couch_stream:close(Stream)).
++
++
++read_all(Fd, PosList) ->
++    Data = couch_stream:foldl(Fd, PosList, fun(Bin, Acc) -> [Bin, Acc] end, []),
++    iolist_to_binary(Data).
+diff --git a/test/couchdb/couch_task_status_tests.erl b/test/couchdb/couch_task_status_tests.erl
+new file mode 100644
+index 0000000..f71ad2b
+--- /dev/null
++++ b/test/couchdb/couch_task_status_tests.erl
+@@ -0,0 +1,225 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_task_status_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(TIMEOUT, 1000).
++
++
++setup() ->
++    {ok, TaskStatusPid} = couch_task_status:start_link(),
++    TaskUpdaterPid = spawn(fun() -> loop() end),
++    {TaskStatusPid, TaskUpdaterPid}.
++
++teardown({TaskStatusPid, _}) ->
++    erlang:monitor(process, TaskStatusPid),
++    couch_task_status:stop(),
++    receive
++        {'DOWN', _, _, TaskStatusPid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw(timeout_error)
++    end.
++
++
++couch_task_status_test_() ->
++    {
++        "CouchDB task status updates",
++        {
++            foreach,
++            fun setup/0, fun teardown/1,
++            [
++                fun should_register_task/1,
++                fun should_set_task_startup_time/1,
++                fun should_have_update_time_as_startup_before_any_progress/1,
++                fun should_set_task_type/1,
++                fun should_not_register_multiple_tasks_for_same_pid/1,
++                fun should_set_task_progress/1,
++                fun should_update_task_progress/1,
++                fun should_update_time_changes_on_task_progress/1,
++                fun should_control_update_frequency/1,
++                fun should_reset_control_update_frequency/1,
++                fun should_track_multiple_tasks/1,
++                fun should_finish_task/1
++
++            ]
++        }
++    }.
++
++
++should_register_task({_, Pid}) ->
++    ok = call(Pid, add, [{type, replication}, {progress, 0}]),
++    ?_assertEqual(1, length(couch_task_status:all())).
++
++should_set_task_startup_time({_, Pid}) ->
++    ok = call(Pid, add, [{type, replication}, {progress, 0}]),
++    ?_assert(is_integer(get_task_prop(Pid, started_on))).
++
++should_have_update_time_as_startup_before_any_progress({_, Pid}) ->
++    ok = call(Pid, add, [{type, replication}, {progress, 0}]),
++    StartTime = get_task_prop(Pid, started_on),
++    ?_assertEqual(StartTime, get_task_prop(Pid, updated_on)).
++
++should_set_task_type({_, Pid}) ->
++    ok = call(Pid, add, [{type, replication}, {progress, 0}]),
++    ?_assertEqual(replication, get_task_prop(Pid, type)).
++
++should_not_register_multiple_tasks_for_same_pid({_, Pid}) ->
++    ok = call(Pid, add, [{type, replication}, {progress, 0}]),
++    ?_assertEqual({add_task_error, already_registered},
++                  call(Pid, add, [{type, compaction}, {progress, 0}])).
++
++should_set_task_progress({_, Pid}) ->
++    ok = call(Pid, add, [{type, replication}, {progress, 0}]),
++    ?_assertEqual(0, get_task_prop(Pid, progress)).
++
++should_update_task_progress({_, Pid}) ->
++    ok = call(Pid, add, [{type, replication}, {progress, 0}]),
++    call(Pid, update, [{progress, 25}]),
++    ?_assertEqual(25, get_task_prop(Pid, progress)).
++
++should_update_time_changes_on_task_progress({_, Pid}) ->
++    ?_assert(
++        begin
++            ok = call(Pid, add, [{type, replication}, {progress, 0}]),
++            ok = timer:sleep(1000),  % sleep awhile to customize update time
++            call(Pid, update, [{progress, 25}]),
++            get_task_prop(Pid, updated_on) > get_task_prop(Pid, started_on)
++        end).
++
++should_control_update_frequency({_, Pid}) ->
++    ?_assertEqual(66,
++        begin
++            ok = call(Pid, add, [{type, replication}, {progress, 0}]),
++            call(Pid, update, [{progress, 50}]),
++            call(Pid, update_frequency, 500),
++            call(Pid, update, [{progress, 66}]),
++            call(Pid, update, [{progress, 77}]),
++            get_task_prop(Pid, progress)
++        end).
++
++should_reset_control_update_frequency({_, Pid}) ->
++    ?_assertEqual(87,
++        begin
++            ok = call(Pid, add, [{type, replication}, {progress, 0}]),
++            call(Pid, update, [{progress, 50}]),
++            call(Pid, update_frequency, 500),
++            call(Pid, update, [{progress, 66}]),
++            call(Pid, update, [{progress, 77}]),
++            call(Pid, update_frequency, 0),
++            call(Pid, update, [{progress, 87}]),
++            get_task_prop(Pid, progress)
++        end).
++
++should_track_multiple_tasks(_) ->
++    ?_assert(run_multiple_tasks()).
++
++should_finish_task({_, Pid}) ->
++    ok = call(Pid, add, [{type, replication}, {progress, 0}]),
++    ?assertEqual(1, length(couch_task_status:all())),
++    ok = call(Pid, done),
++    ?_assertEqual(0, length(couch_task_status:all())).
++
++
++run_multiple_tasks() ->
++    Pid1 = spawn(fun() -> loop() end),
++    Pid2 = spawn(fun() -> loop() end),
++    Pid3 = spawn(fun() -> loop() end),
++    call(Pid1, add, [{type, replication}, {progress, 0}]),
++    call(Pid2, add, [{type, compaction}, {progress, 0}]),
++    call(Pid3, add, [{type, indexer}, {progress, 0}]),
++
++    ?assertEqual(3, length(couch_task_status:all())),
++    ?assertEqual(replication, get_task_prop(Pid1, type)),
++    ?assertEqual(compaction, get_task_prop(Pid2, type)),
++    ?assertEqual(indexer, get_task_prop(Pid3, type)),
++
++    call(Pid2, update, [{progress, 33}]),
++    call(Pid3, update, [{progress, 42}]),
++    call(Pid1, update, [{progress, 11}]),
++    ?assertEqual(42, get_task_prop(Pid3, progress)),
++    call(Pid1, update, [{progress, 72}]),
++    ?assertEqual(72, get_task_prop(Pid1, progress)),
++    ?assertEqual(33, get_task_prop(Pid2, progress)),
++
++    call(Pid1, done),
++    ?assertEqual(2, length(couch_task_status:all())),
++    call(Pid3, done),
++    ?assertEqual(1, length(couch_task_status:all())),
++    call(Pid2, done),
++    ?assertEqual(0, length(couch_task_status:all())),
++
++    true.
++
++
++loop() ->
++    receive
++        {add, Props, From} ->
++            Resp = couch_task_status:add_task(Props),
++            From ! {ok, self(), Resp},
++            loop();
++        {update, Props, From} ->
++            Resp = couch_task_status:update(Props),
++            From ! {ok, self(), Resp},
++            loop();
++        {update_frequency, Msecs, From} ->
++            Resp = couch_task_status:set_update_frequency(Msecs),
++            From ! {ok, self(), Resp},
++            loop();
++        {done, From} ->
++            From ! {ok, self(), ok}
++    end.
++
++call(Pid, Command) ->
++    Pid ! {Command, self()},
++    wait(Pid).
++
++call(Pid, Command, Arg) ->
++    Pid ! {Command, Arg, self()},
++    wait(Pid).
++
++wait(Pid) ->
++    receive
++        {ok, Pid, Msg} ->
++            Msg
++    after ?TIMEOUT ->
++        throw(timeout_error)
++    end.
++
++get_task_prop(Pid, Prop) ->
++    From = list_to_binary(pid_to_list(Pid)),
++    Element = lists:foldl(
++        fun(PropList, Acc) ->
++            case couch_util:get_value(pid, PropList) of
++                From ->
++                    [PropList | Acc];
++                _ ->
++                    Acc
++            end
++        end,
++        [], couch_task_status:all()
++    ),
++    case couch_util:get_value(Prop, hd(Element), nil) of
++        nil ->
++            erlang:error({assertion_failed,
++                         [{module, ?MODULE},
++                          {line, ?LINE},
++                          {reason, "Could not get property '"
++                                   ++ couch_util:to_list(Prop)
++                                   ++ "' for task "
++                                   ++ pid_to_list(Pid)}]});
++        Value ->
++            Value
++    end.
+diff --git a/test/couchdb/couch_util_tests.erl b/test/couchdb/couch_util_tests.erl
+new file mode 100644
+index 0000000..8e24e72
+--- /dev/null
++++ b/test/couchdb/couch_util_tests.erl
+@@ -0,0 +1,136 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_util_tests).
++
++-include("couch_eunit.hrl").
++
++
++setup() ->
++    %% We cannot start driver from here since it becomes bounded to eunit
++    %% master process and the next couch_server_sup:start_link call will
++    %% fail because server couldn't load driver since it already is.
++    %%
++    %% On other hand, we cannot unload driver here due to
++    %% {error, not_loaded_by_this_process} while it is. Any ideas is welcome.
++    %%
++    couch_server_sup:start_link(?CONFIG_CHAIN),
++    %% couch_config:start_link(?CONFIG_CHAIN),
++    %% {ok, _} = couch_drv:start_link(),
++    ok.
++
++teardown(_) ->
++    couch_server_sup:stop(),
++    %% couch_config:stop(),
++    %% erl_ddll:unload_driver(couch_icu_driver),
++    ok.
++
++
++collation_test_() ->
++    {
++        "Collation tests",
++        [
++            {
++                setup,
++                fun setup/0, fun teardown/1,
++                [
++                    should_collate_ascii(),
++                    should_collate_non_ascii()
++                ]
++            }
++        ]
++    }.
++
++should_collate_ascii() ->
++    ?_assertEqual(1, couch_util:collate(<<"foo">>, <<"bar">>)).
++
++should_collate_non_ascii() ->
++    ?_assertEqual(-1, couch_util:collate(<<"A">>, <<"aa">>)).
++
++to_existed_atom_test() ->
++    ?assert(couch_util:to_existing_atom(true)),
++    ?assertMatch(foo, couch_util:to_existing_atom(<<"foo">>)),
++    ?assertMatch(foobarbaz, couch_util:to_existing_atom("foobarbaz")).
++
++implode_test() ->
++    ?assertEqual([1, 38, 2, 38, 3], couch_util:implode([1, 2, 3], "&")).
++
++trim_test() ->
++    lists:map(fun(S) -> ?assertEqual("foo", couch_util:trim(S)) end,
++              [" foo", "foo ", "\tfoo", " foo ", "foo\t", "foo\n", "\nfoo"]).
++
++abs_pathname_test() ->
++    {ok, Cwd} = file:get_cwd(),
++    ?assertEqual(Cwd ++ "/foo", couch_util:abs_pathname("./foo")).
++
++flush_test() ->
++    ?assertNot(couch_util:should_flush()),
++    AcquireMem = fun() ->
++        _IntsToAGazillion = lists:seq(1, 200000),
++        _LotsOfData = lists:map(fun(_) -> <<"foobar">> end,
++                                lists:seq(1, 500000)),
++        _BigBin = list_to_binary(_LotsOfData),
++
++        %% Allocation 200K tuples puts us above the memory threshold
++        %% Originally, there should be:
++        %%      ?assertNot(should_flush())
++        %% however, unlike for etap test, GC collects all allocated bits
++        %% making this conditions fail. So we have to invert the condition
++        %% since GC works, cleans the memory and everything is fine.
++        ?assertNot(couch_util:should_flush())
++    end,
++    AcquireMem(),
++
++    %% Checking to flush invokes GC
++    ?assertNot(couch_util:should_flush()).
++
++verify_test() ->
++    ?assert(couch_util:verify("It4Vooya", "It4Vooya")),
++    ?assertNot(couch_util:verify("It4VooyaX", "It4Vooya")),
++    ?assert(couch_util:verify(<<"ahBase3r">>, <<"ahBase3r">>)),
++    ?assertNot(couch_util:verify(<<"ahBase3rX">>, <<"ahBase3r">>)),
++    ?assertNot(couch_util:verify(nil, <<"ahBase3r">>)).
++
++find_in_binary_test_() ->
++    Cases = [
++        {<<"foo">>, <<"foobar">>, {exact, 0}},
++        {<<"foo">>, <<"foofoo">>, {exact, 0}},
++        {<<"foo">>, <<"barfoo">>, {exact, 3}},
++        {<<"foo">>, <<"barfo">>, {partial, 3}},
++        {<<"f">>, <<"fobarfff">>, {exact, 0}},
++        {<<"f">>, <<"obarfff">>, {exact, 4}},
++        {<<"f">>, <<"obarggf">>, {exact, 6}},
++        {<<"f">>, <<"f">>, {exact, 0}},
++        {<<"f">>, <<"g">>, not_found},
++        {<<"foo">>, <<"f">>, {partial, 0}},
++        {<<"foo">>, <<"g">>, not_found},
++        {<<"foo">>, <<"">>, not_found},
++        {<<"fofo">>, <<"foofo">>, {partial, 3}},
++        {<<"foo">>, <<"gfobarfo">>, {partial, 6}},
++        {<<"foo">>, <<"gfobarf">>, {partial, 6}},
++        {<<"foo">>, <<"gfobar">>, not_found},
++        {<<"fog">>, <<"gbarfogquiz">>, {exact, 4}},
++        {<<"ggg">>, <<"ggg">>, {exact, 0}},
++        {<<"ggg">>, <<"ggggg">>, {exact, 0}},
++        {<<"ggg">>, <<"bggg">>, {exact, 1}},
++        {<<"ggg">>, <<"bbgg">>, {partial, 2}},
++        {<<"ggg">>, <<"bbbg">>, {partial, 3}},
++        {<<"ggg">>, <<"bgbggbggg">>, {exact, 6}},
++        {<<"ggg">>, <<"bgbggb">>, not_found}
++    ],
++    lists:map(
++        fun({Needle, Haystack, Result}) ->
++            Msg = lists:flatten(io_lib:format("Looking for ~s in ~s",
++                                              [Needle, Haystack])),
++            {Msg, ?_assertMatch(Result,
++                                couch_util:find_in_binary(Needle, Haystack))}
++        end, Cases).
+diff --git a/test/couchdb/couch_uuids_tests.erl b/test/couchdb/couch_uuids_tests.erl
+new file mode 100644
+index 0000000..ea1d034
+--- /dev/null
++++ b/test/couchdb/couch_uuids_tests.erl
+@@ -0,0 +1,161 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_uuids_tests).
++
++-include("couch_eunit.hrl").
++
++-define(TIMEOUT_S, 20).
++
++
++setup() ->
++    {ok, Pid} = couch_config:start_link(?CONFIG_CHAIN),
++    erlang:monitor(process, Pid),
++    couch_uuids:start(),
++    Pid.
++
++setup(Opts) ->
++    Pid = setup(),
++    lists:foreach(
++        fun({Option, Value}) ->
++            couch_config:set("uuids", Option, Value, false)
++        end, Opts),
++    Pid.
++
++teardown(Pid) ->
++    couch_uuids:stop(),
++    couch_config:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} -> ok
++    after
++        1000 -> throw({timeout_error, config_stop})
++    end.
++
++teardown(_, Pid) ->
++    teardown(Pid).
++
++
++default_test_() ->
++    {
++        "Default UUID algorithm",
++        {
++            setup,
++            fun setup/0, fun teardown/1,
++            fun should_be_unique/1
++        }
++    }.
++
++sequential_test_() ->
++    Opts = [{"algorithm", "sequential"}],
++    Cases = [
++        fun should_be_unique/2,
++        fun should_increment_monotonically/2,
++        fun should_rollover/2
++    ],
++    {
++        "UUID algorithm: sequential",
++        {
++            foreachx,
++            fun setup/1, fun teardown/2,
++            [{Opts, Fun} || Fun <- Cases]
++        }
++    }.
++
++utc_test_() ->
++    Opts = [{"algorithm", "utc_random"}],
++    Cases = [
++        fun should_be_unique/2,
++        fun should_increment_monotonically/2
++    ],
++    {
++        "UUID algorithm: utc_random",
++        {
++            foreachx,
++            fun setup/1, fun teardown/2,
++            [{Opts, Fun} || Fun <- Cases]
++        }
++    }.
++
++utc_id_suffix_test_() ->
++    Opts = [{"algorithm", "utc_id"}, {"utc_id_suffix", "bozo"}],
++    Cases = [
++        fun should_be_unique/2,
++        fun should_increment_monotonically/2,
++        fun should_preserve_suffix/2
++    ],
++    {
++        "UUID algorithm: utc_id",
++        {
++            foreachx,
++            fun setup/1, fun teardown/2,
++            [{Opts, Fun} || Fun <- Cases]
++        }
++    }.
++
++
++should_be_unique() ->
++    %% this one may really runs for too long on slow hosts
++    {timeout, ?TIMEOUT_S, ?_assert(test_unique(10000, [couch_uuids:new()]))}.
++should_be_unique(_) ->
++    should_be_unique().
++should_be_unique(_, _) ->
++    should_be_unique().
++
++should_increment_monotonically(_, _) ->
++    ?_assert(couch_uuids:new() < couch_uuids:new()).
++
++should_rollover(_, _) ->
++    ?_test(begin
++        UUID = binary_to_list(couch_uuids:new()),
++        Prefix = element(1, lists:split(26, UUID)),
++        N = gen_until_pref_change(Prefix, 0),
++        ?assert(N >= 5000 andalso N =< 11000)
++    end).
++
++should_preserve_suffix(_, _) ->
++    ?_test(begin
++        UUID = binary_to_list(couch_uuids:new()),
++        Suffix = get_suffix(UUID),
++        ?assert(test_same_suffix(10000, Suffix))
++    end).
++
++
++test_unique(0, _) ->
++    true;
++test_unique(N, UUIDs) ->
++    UUID = couch_uuids:new(),
++    ?assertNot(lists:member(UUID, UUIDs)),
++    test_unique(N - 1, [UUID| UUIDs]).
++
++get_prefix(UUID) ->
++    element(1, lists:split(26, binary_to_list(UUID))).
++
++gen_until_pref_change(_, Count) when Count > 8251 ->
++    Count;
++gen_until_pref_change(Prefix, N) ->
++    case get_prefix(couch_uuids:new()) of
++        Prefix -> gen_until_pref_change(Prefix, N + 1);
++        _ -> N
++    end.
++
++get_suffix(UUID) when is_binary(UUID) ->
++    get_suffix(binary_to_list(UUID));
++get_suffix(UUID) ->
++    element(2, lists:split(14, UUID)).
++
++test_same_suffix(0, _) ->
++    true;
++test_same_suffix(N, Suffix) ->
++    case get_suffix(couch_uuids:new()) of
++        Suffix -> test_same_suffix(N - 1, Suffix);
++        _ -> false
++    end.
+diff --git a/test/couchdb/couch_work_queue_tests.erl b/test/couchdb/couch_work_queue_tests.erl
+new file mode 100644
+index 0000000..8a463b5
+--- /dev/null
++++ b/test/couchdb/couch_work_queue_tests.erl
+@@ -0,0 +1,393 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couch_work_queue_tests).
++
++-include("couch_eunit.hrl").
++
++-define(TIMEOUT, 100).
++
++
++setup(Opts) ->
++    {ok, Q} = couch_work_queue:new(Opts),
++    Producer = spawn_producer(Q),
++    Consumer = spawn_consumer(Q),
++    {Q, Producer, Consumer}.
++
++setup_max_items() ->
++    setup([{max_items, 3}]).
++
++setup_max_size() ->
++    setup([{max_size, 160}]).
++
++setup_max_items_and_size() ->
++    setup([{max_size, 160}, {max_items, 3}]).
++
++setup_multi_workers() ->
++    {Q, Producer, Consumer1} = setup([{max_size, 160},
++                                      {max_items, 3},
++                                      {multi_workers, true}]),
++    Consumer2 = spawn_consumer(Q),
++    Consumer3 = spawn_consumer(Q),
++    {Q, Producer, [Consumer1, Consumer2, Consumer3]}.
++
++teardown({Q, Producer, Consumers}) when is_list(Consumers) ->
++    % consume all to unblock and let producer/consumer stop without timeout
++    [consume(Consumer, all) || Consumer <- Consumers],
++
++    ok = close_queue(Q),
++    ok = stop(Producer, "producer"),
++    R = [stop(Consumer, "consumer") || Consumer <- Consumers],
++    R = [ok || _ <- Consumers],
++    ok;
++teardown({Q, Producer, Consumer}) ->
++    teardown({Q, Producer, [Consumer]}).
++
++
++single_consumer_test_() ->
++    {
++        "Single producer and consumer",
++        [
++            {
++                "Queue with 3 max items",
++                {
++                    foreach,
++                    fun setup_max_items/0, fun teardown/1,
++                    single_consumer_max_item_count() ++ common_cases()
++                }
++            },
++            {
++                "Queue with max size of 160 bytes",
++                {
++                    foreach,
++                    fun setup_max_size/0, fun teardown/1,
++                    single_consumer_max_size() ++ common_cases()
++                }
++            },
++            {
++                "Queue with max size of 160 bytes and 3 max items",
++                {
++                    foreach,
++                    fun setup_max_items_and_size/0, fun teardown/1,
++                    single_consumer_max_items_and_size() ++ common_cases()
++                }
++            }
++        ]
++    }.
++
++multiple_consumers_test_() ->
++    {
++        "Single producer and multiple consumers",
++        [
++            {
++                "Queue with max size of 160 bytes and 3 max items",
++                {
++                    foreach,
++                    fun setup_multi_workers/0, fun teardown/1,
++                    common_cases() ++ multiple_consumers()
++                }
++
++            }
++        ]
++    }.
++
++common_cases()->
++    [
++        fun should_block_consumer_on_dequeue_from_empty_queue/1,
++        fun should_consume_right_item/1,
++        fun should_timeout_on_close_non_empty_queue/1,
++        fun should_not_block_producer_for_non_empty_queue_after_close/1,
++        fun should_be_closed/1
++    ].
++
++single_consumer_max_item_count()->
++    [
++        fun should_have_no_items_for_new_queue/1,
++        fun should_block_producer_on_full_queue_count/1,
++        fun should_receive_first_queued_item/1,
++        fun should_consume_multiple_items/1,
++        fun should_consume_all/1
++    ].
++
++single_consumer_max_size()->
++    [
++        fun should_have_zero_size_for_new_queue/1,
++        fun should_block_producer_on_full_queue_size/1,
++        fun should_increase_queue_size_on_produce/1,
++        fun should_receive_first_queued_item/1,
++        fun should_consume_multiple_items/1,
++        fun should_consume_all/1
++    ].
++
++single_consumer_max_items_and_size() ->
++    single_consumer_max_item_count() ++ single_consumer_max_size().
++
++multiple_consumers() ->
++    [
++        fun should_have_zero_size_for_new_queue/1,
++        fun should_have_no_items_for_new_queue/1,
++        fun should_increase_queue_size_on_produce/1
++    ].
++
++
++should_have_no_items_for_new_queue({Q, _, _}) ->
++    ?_assertEqual(0, couch_work_queue:item_count(Q)).
++
++should_have_zero_size_for_new_queue({Q, _, _}) ->
++    ?_assertEqual(0, couch_work_queue:size(Q)).
++
++should_block_consumer_on_dequeue_from_empty_queue({_, _, Consumers}) when is_list(Consumers) ->
++    [consume(C, 2) || C <- Consumers],
++    Pongs = [ping(C) || C <- Consumers],
++    ?_assertEqual([timeout, timeout, timeout], Pongs);
++should_block_consumer_on_dequeue_from_empty_queue({_, _, Consumer}) ->
++    consume(Consumer, 1),
++    Pong = ping(Consumer),
++    ?_assertEqual(timeout, Pong).
++
++should_consume_right_item({Q, Producer, Consumers}) when is_list(Consumers) ->
++    [consume(C, 3) || C <- Consumers],
++
++    Item1 = produce(Producer, 10),
++    ok = ping(Producer),
++    ?assertEqual(0, couch_work_queue:item_count(Q)),
++    ?assertEqual(0, couch_work_queue:size(Q)),
++
++    Item2 = produce(Producer, 10),
++    ok = ping(Producer),
++    ?assertEqual(0, couch_work_queue:item_count(Q)),
++    ?assertEqual(0, couch_work_queue:size(Q)),
++
++    Item3 = produce(Producer, 10),
++    ok = ping(Producer),
++    ?assertEqual(0, couch_work_queue:item_count(Q)),
++    ?assertEqual(0, couch_work_queue:size(Q)),
++
++    R = [{ping(C), Item}
++         || {C, Item} <- lists:zip(Consumers, [Item1, Item2, Item3])],
++
++    ?_assertEqual([{ok, Item1}, {ok, Item2}, {ok, Item3}], R);
++should_consume_right_item({_, Producer, Consumer}) ->
++    consume(Consumer, 1),
++    Item = produce(Producer, 10),
++    produce(Producer, 20),
++    ok = ping(Producer),
++    ok = ping(Consumer),
++    {ok, Items} = last_consumer_items(Consumer),
++    ?_assertEqual([Item], Items).
++
++should_increase_queue_size_on_produce({Q, Producer, _}) ->
++    produce(Producer, 50),
++    ok = ping(Producer),
++    Count1 = couch_work_queue:item_count(Q),
++    Size1 = couch_work_queue:size(Q),
++
++    produce(Producer, 10),
++    Count2 = couch_work_queue:item_count(Q),
++    Size2 = couch_work_queue:size(Q),
++
++    ?_assertEqual([{Count1, Size1}, {Count2, Size2}], [{1, 50}, {2, 60}]).
++
++should_block_producer_on_full_queue_count({Q, Producer, _}) ->
++    produce(Producer, 10),
++    ?assertEqual(1, couch_work_queue:item_count(Q)),
++    ok = ping(Producer),
++
++    produce(Producer, 15),
++    ?assertEqual(2, couch_work_queue:item_count(Q)),
++    ok = ping(Producer),
++
++    produce(Producer, 20),
++    ?assertEqual(3, couch_work_queue:item_count(Q)),
++    Pong = ping(Producer),
++
++    ?_assertEqual(timeout, Pong).
++
++should_block_producer_on_full_queue_size({Q, Producer, _}) ->
++    produce(Producer, 100),
++    ok = ping(Producer),
++    ?assertEqual(1, couch_work_queue:item_count(Q)),
++    ?assertEqual(100, couch_work_queue:size(Q)),
++
++    produce(Producer, 110),
++    Pong = ping(Producer),
++    ?assertEqual(2, couch_work_queue:item_count(Q)),
++    ?assertEqual(210, couch_work_queue:size(Q)),
++
++    ?_assertEqual(timeout, Pong).
++
++should_consume_multiple_items({_, Producer, Consumer}) ->
++    Item1 = produce(Producer, 10),
++    ok = ping(Producer),
++
++    Item2 = produce(Producer, 15),
++    ok = ping(Producer),
++
++    consume(Consumer, 2),
++
++    {ok, Items} = last_consumer_items(Consumer),
++    ?_assertEqual([Item1, Item2], Items).
++
++should_receive_first_queued_item({Q, Producer, Consumer}) ->
++    consume(Consumer, 100),
++    timeout = ping(Consumer),
++
++    Item = produce(Producer, 11),
++    ok = ping(Producer),
++
++    ok = ping(Consumer),
++    ?assertEqual(0, couch_work_queue:item_count(Q)),
++
++    {ok, Items} = last_consumer_items(Consumer),
++    ?_assertEqual([Item], Items).
++
++should_consume_all({_, Producer, Consumer}) ->
++    Item1 = produce(Producer, 10),
++    Item2 = produce(Producer, 15),
++    Item3 = produce(Producer, 20),
++
++    consume(Consumer, all),
++
++    {ok, Items} = last_consumer_items(Consumer),
++    ?_assertEqual([Item1, Item2, Item3], Items).
++
++should_timeout_on_close_non_empty_queue({Q, Producer, _}) ->
++    produce(Producer, 1),
++    Status = close_queue(Q),
++
++    ?_assertEqual(timeout, Status).
++
++should_not_block_producer_for_non_empty_queue_after_close({Q, Producer, _}) ->
++    produce(Producer, 1),
++    close_queue(Q),
++    Pong = ping(Producer),
++    Size = couch_work_queue:size(Q),
++    Count = couch_work_queue:item_count(Q),
++
++    ?_assertEqual({ok, 1, 1}, {Pong, Size, Count}).
++
++should_be_closed({Q, _, Consumers}) when is_list(Consumers) ->
++    ok = close_queue(Q),
++
++    [consume(C, 1) || C <- Consumers],
++
++    LastConsumerItems = [last_consumer_items(C) || C <- Consumers],
++    ItemsCount = couch_work_queue:item_count(Q),
++    Size = couch_work_queue:size(Q),
++
++    ?_assertEqual({[closed, closed, closed], closed, closed},
++                  {LastConsumerItems, ItemsCount, Size});
++should_be_closed({Q, _, Consumer}) ->
++    ok = close_queue(Q),
++
++    consume(Consumer, 1),
++
++    LastConsumerItems = last_consumer_items(Consumer),
++    ItemsCount = couch_work_queue:item_count(Q),
++    Size = couch_work_queue:size(Q),
++
++    ?_assertEqual({closed, closed, closed},
++                  {LastConsumerItems, ItemsCount, Size}).
++
++
++close_queue(Q) ->
++    ok = couch_work_queue:close(Q),
++    MonRef = erlang:monitor(process, Q),
++    receive
++        {'DOWN', MonRef, process, Q, _Reason} -> ok
++    after ?TIMEOUT ->
++        erlang:demonitor(MonRef),
++        timeout
++    end.
++
++spawn_consumer(Q) ->
++    Parent = self(),
++    spawn(fun() -> consumer_loop(Parent, Q, nil) end).
++
++consumer_loop(Parent, Q, PrevItem) ->
++    receive
++        {stop, Ref} ->
++            Parent ! {ok, Ref};
++        {ping, Ref} ->
++            Parent ! {pong, Ref},
++            consumer_loop(Parent, Q, PrevItem);
++        {last_item, Ref} ->
++            Parent ! {item, Ref, PrevItem},
++            consumer_loop(Parent, Q, PrevItem);
++        {consume, N} ->
++            Result = couch_work_queue:dequeue(Q, N),
++            consumer_loop(Parent, Q, Result)
++    end.
++
++spawn_producer(Q) ->
++    Parent = self(),
++    spawn(fun() -> producer_loop(Parent, Q) end).
++
++producer_loop(Parent, Q) ->
++    receive
++        {stop, Ref} ->
++            Parent ! {ok, Ref};
++        {ping, Ref} ->
++            Parent ! {pong, Ref},
++            producer_loop(Parent, Q);
++        {produce, Ref, Size} ->
++            Item = crypto:rand_bytes(Size),
++            Parent ! {item, Ref, Item},
++            ok = couch_work_queue:queue(Q, Item),
++            producer_loop(Parent, Q)
++    end.
++
++consume(Consumer, N) ->
++    Consumer ! {consume, N}.
++
++last_consumer_items(Consumer) ->
++    Ref = make_ref(),
++    Consumer ! {last_item, Ref},
++    receive
++        {item, Ref, Items} ->
++            Items
++    after ?TIMEOUT ->
++        timeout
++    end.
++
++produce(Producer, Size) ->
++    Ref = make_ref(),
++    Producer ! {produce, Ref, Size},
++    receive
++        {item, Ref, Item} ->
++            Item
++    after ?TIMEOUT ->
++        erlang:error({assertion_failed,
++                      [{module, ?MODULE},
++                       {line, ?LINE},
++                       {reason, "Timeout asking producer to produce an item"}]})
++    end.
++
++ping(Pid) ->
++    Ref = make_ref(),
++    Pid ! {ping, Ref},
++    receive
++        {pong, Ref} ->
++            ok
++    after ?TIMEOUT ->
++        timeout
++    end.
++
++stop(Pid, Name) ->
++    Ref = make_ref(),
++    Pid ! {stop, Ref},
++    receive
++        {ok, Ref} -> ok
++    after ?TIMEOUT ->
++        ?debugMsg("Timeout stopping " ++ Name),
++        timeout
++    end.
+diff --git a/test/couchdb/couchdb_attachments_tests.erl b/test/couchdb/couchdb_attachments_tests.erl
+new file mode 100644
+index 0000000..cf59785
+--- /dev/null
++++ b/test/couchdb/couchdb_attachments_tests.erl
+@@ -0,0 +1,638 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couchdb_attachments_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(COMPRESSION_LEVEL, 8).
++-define(ATT_BIN_NAME, <<"logo.png">>).
++-define(ATT_TXT_NAME, <<"file.erl">>).
++-define(FIXTURE_PNG, filename:join([?FIXTURESDIR, "logo.png"])).
++-define(FIXTURE_TXT, ?FILE).
++-define(TIMEOUT, 1000).
++-define(TIMEOUT_EUNIT, 10).
++-define(TIMEWAIT, 100).
++-define(i2l(I), integer_to_list(I)).
++
++
++start() ->
++    {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    % ensure in default compression settings for attachments_compression_tests
++    couch_config:set("attachments", "compression_level",
++                     ?i2l(?COMPRESSION_LEVEL), false),
++    couch_config:set("attachments", "compressible_types", "text/*", false),
++    Pid.
++
++stop(Pid) ->
++    erlang:monitor(process, Pid),
++    couch_server_sup:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout, server_stop})
++    end.
++
++setup() ->
++    DbName = ?tempdb(),
++    {ok, Db} = couch_db:create(DbName, []),
++    ok = couch_db:close(Db),
++    Addr = couch_config:get("httpd", "bind_address", any),
++    Port = mochiweb_socket_server:get(couch_httpd, port),
++    Host = Addr ++ ":" ++ ?i2l(Port),
++    {Host, ?b2l(DbName)}.
++
++setup({binary, standalone}) ->
++    {Host, DbName} = setup(),
++        setup_att(fun create_standalone_png_att/2, Host, DbName, ?FIXTURE_PNG);
++setup({text, standalone}) ->
++    {Host, DbName} = setup(),
++    setup_att(fun create_standalone_text_att/2, Host, DbName, ?FIXTURE_TXT);
++setup({binary, inline}) ->
++    {Host, DbName} = setup(),
++    setup_att(fun create_inline_png_att/2, Host, DbName, ?FIXTURE_PNG);
++setup({text, inline}) ->
++    {Host, DbName} = setup(),
++    setup_att(fun create_inline_text_att/2, Host, DbName, ?FIXTURE_TXT);
++setup(compressed) ->
++    {Host, DbName} = setup(),
++    setup_att(fun create_already_compressed_att/2, Host, DbName, ?FIXTURE_TXT).
++setup_att(Fun, Host, DbName, File) ->
++    HttpHost = "http://" ++ Host,
++    AttUrl = Fun(HttpHost, DbName),
++    {ok, Data} = file:read_file(File),
++    DocUrl = string:join([HttpHost, DbName, "doc"], "/"),
++    Helpers = {DbName, DocUrl, AttUrl},
++    {Data, Helpers}.
++
++teardown(_, {_, {DbName, _, _}}) ->
++    teardown(DbName).
++
++teardown({_, DbName}) ->
++    teardown(DbName);
++teardown(DbName) ->
++    ok = couch_server:delete(?l2b(DbName), []),
++    ok.
++
++
++attachments_test_() ->
++    {
++        "Attachments tests",
++        {
++            setup,
++            fun start/0, fun stop/1,
++            [
++                attachments_md5_tests(),
++                attachments_compression_tests()
++            ]
++        }
++    }.
++
++attachments_md5_tests() ->
++    {
++        "Attachments MD5 tests",
++        {
++            foreach,
++            fun setup/0, fun teardown/1,
++            [
++                fun should_upload_attachment_without_md5/1,
++                fun should_upload_attachment_by_chunks_without_md5/1,
++                fun should_upload_attachment_with_valid_md5_header/1,
++                fun should_upload_attachment_by_chunks_with_valid_md5_header/1,
++                fun should_upload_attachment_by_chunks_with_valid_md5_trailer/1,
++                fun should_reject_attachment_with_invalid_md5/1,
++                fun should_reject_chunked_attachment_with_invalid_md5/1,
++                fun should_reject_chunked_attachment_with_invalid_md5_trailer/1
++            ]
++        }
++    }.
++
++attachments_compression_tests() ->
++    Funs = [
++         fun should_get_att_without_accept_gzip_encoding/2,
++         fun should_get_att_with_accept_gzip_encoding/2,
++         fun should_get_att_with_accept_deflate_encoding/2,
++         fun should_return_406_response_on_unsupported_encoding/2,
++         fun should_get_doc_with_att_data/2,
++         fun should_get_doc_with_att_data_stub/2
++    ],
++    {
++        "Attachments compression tests",
++        [
++            {
++                "Created via Attachments API",
++                created_attachments_compression_tests(standalone, Funs)
++            },
++            {
++                "Created inline via Document API",
++                created_attachments_compression_tests(inline, Funs)
++            },
++            {
++                "Created already been compressed via Attachments API",
++                {
++                    foreachx,
++                    fun setup/1, fun teardown/2,
++                    [{compressed, Fun} || Fun <- Funs]
++                }
++            },
++            {
++                foreach,
++                fun setup/0, fun teardown/1,
++                [
++                    fun should_not_create_compressed_att_with_deflate_encoding/1,
++                    fun should_not_create_compressed_att_with_compress_encoding/1,
++                    fun should_create_compressible_att_with_ctype_params/1
++                ]
++            }
++        ]
++    }.
++
++created_attachments_compression_tests(Mod, Funs) ->
++    [
++        {
++            "Compressiable attachments",
++            {
++                foreachx,
++                fun setup/1, fun teardown/2,
++                [{{text, Mod}, Fun} || Fun <- Funs]
++            }
++        },
++        {
++            "Uncompressiable attachments",
++            {
++                foreachx,
++                fun setup/1, fun teardown/2,
++                [{{binary, Mod}, Fun} || Fun <- Funs]
++            }
++        }
++    ].
++
++
++
++should_upload_attachment_without_md5({Host, DbName}) ->
++    ?_test(begin
++        AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
++        Body = "We all live in a yellow submarine!",
++        Headers = [
++            {"Content-Length", "34"},
++            {"Content-Type", "text/plain"},
++            {"Host", Host}
++        ],
++        {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
++        ?assertEqual(201, Code),
++        ?assertEqual(true, get_json(Json, [<<"ok">>]))
++    end).
++
++should_upload_attachment_by_chunks_without_md5({Host, DbName}) ->
++    ?_test(begin
++        AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
++        AttData = <<"We all live in a yellow submarine!">>,
++        <<Part1:21/binary, Part2:13/binary>> = AttData,
++        Body = chunked_body([Part1, Part2]),
++        Headers = [
++            {"Content-Type", "text/plain"},
++            {"Transfer-Encoding", "chunked"},
++            {"Host", Host}
++        ],
++        {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
++        ?assertEqual(201, Code),
++        ?assertEqual(true, get_json(Json, [<<"ok">>]))
++    end).
++
++should_upload_attachment_with_valid_md5_header({Host, DbName}) ->
++    ?_test(begin
++        AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
++        Body = "We all live in a yellow submarine!",
++        Headers = [
++            {"Content-Length", "34"},
++            {"Content-Type", "text/plain"},
++            {"Content-MD5", ?b2l(base64:encode(couch_util:md5(Body)))},
++            {"Host", Host}
++        ],
++        {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
++        ?assertEqual(201, Code),
++        ?assertEqual(true, get_json(Json, [<<"ok">>]))
++    end).
++
++should_upload_attachment_by_chunks_with_valid_md5_header({Host, DbName}) ->
++    ?_test(begin
++        AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
++        AttData = <<"We all live in a yellow submarine!">>,
++        <<Part1:21/binary, Part2:13/binary>> = AttData,
++        Body = chunked_body([Part1, Part2]),
++        Headers = [
++            {"Content-Type", "text/plain"},
++            {"Content-MD5", ?b2l(base64:encode(couch_util:md5(AttData)))},
++            {"Host", Host},
++            {"Transfer-Encoding", "chunked"}
++        ],
++        {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
++        ?assertEqual(201, Code),
++        ?assertEqual(true, get_json(Json, [<<"ok">>]))
++    end).
++
++should_upload_attachment_by_chunks_with_valid_md5_trailer({Host, DbName}) ->
++    ?_test(begin
++        AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
++        AttData = <<"We all live in a yellow submarine!">>,
++        <<Part1:21/binary, Part2:13/binary>> = AttData,
++        Body = [chunked_body([Part1, Part2]),
++                "Content-MD5: ", base64:encode(couch_util:md5(AttData)),
++                "\r\n"],
++        Headers = [
++            {"Content-Type", "text/plain"},
++            {"Host", Host},
++            {"Trailer", "Content-MD5"},
++            {"Transfer-Encoding", "chunked"}
++        ],
++        {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
++        ?assertEqual(201, Code),
++        ?assertEqual(true, get_json(Json, [<<"ok">>]))
++    end).
++
++should_reject_attachment_with_invalid_md5({Host, DbName}) ->
++    ?_test(begin
++        AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
++        Body = "We all live in a yellow submarine!",
++        Headers = [
++            {"Content-Length", "34"},
++            {"Content-Type", "text/plain"},
++            {"Content-MD5", ?b2l(base64:encode(<<"foobar!">>))},
++            {"Host", Host}
++        ],
++        {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
++        ?assertEqual(400, Code),
++        ?assertEqual(<<"content_md5_mismatch">>,
++                     get_json(Json, [<<"error">>]))
++    end).
++
++
++should_reject_chunked_attachment_with_invalid_md5({Host, DbName}) ->
++    ?_test(begin
++        AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
++        AttData = <<"We all live in a yellow submarine!">>,
++        <<Part1:21/binary, Part2:13/binary>> = AttData,
++        Body = chunked_body([Part1, Part2]),
++        Headers = [
++            {"Content-Type", "text/plain"},
++            {"Content-MD5", ?b2l(base64:encode(<<"foobar!">>))},
++            {"Host", Host},
++            {"Transfer-Encoding", "chunked"}
++        ],
++        {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
++        ?assertEqual(400, Code),
++        ?assertEqual(<<"content_md5_mismatch">>,
++                     get_json(Json, [<<"error">>]))
++    end).
++
++should_reject_chunked_attachment_with_invalid_md5_trailer({Host, DbName}) ->
++    ?_test(begin
++        AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
++        AttData = <<"We all live in a yellow submarine!">>,
++        <<Part1:21/binary, Part2:13/binary>> = AttData,
++        Body = [chunked_body([Part1, Part2]),
++                "Content-MD5: ", base64:encode(<<"foobar!">>),
++                "\r\n"],
++        Headers = [
++            {"Content-Type", "text/plain"},
++            {"Host", Host},
++            {"Trailer", "Content-MD5"},
++            {"Transfer-Encoding", "chunked"}
++        ],
++        {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
++        ?assertEqual(400, Code),
++        ?assertEqual(<<"content_md5_mismatch">>, get_json(Json, [<<"error">>]))
++    end).
++
++should_get_att_without_accept_gzip_encoding(_, {Data, {_, _, AttUrl}}) ->
++    ?_test(begin
++        {ok, Code, Headers, Body} = test_request:get(AttUrl),
++        ?assertEqual(200, Code),
++        ?assertNot(lists:member({"Content-Encoding", "gzip"}, Headers)),
++        ?assertEqual(Data, iolist_to_binary(Body))
++    end).
++
++should_get_att_with_accept_gzip_encoding(compressed, {Data, {_, _, AttUrl}}) ->
++    ?_test(begin
++        {ok, Code, Headers, Body} = test_request:get(
++            AttUrl, [{"Accept-Encoding", "gzip"}]),
++        ?assertEqual(200, Code),
++        ?assert(lists:member({"Content-Encoding", "gzip"}, Headers)),
++        ?assertEqual(Data, zlib:gunzip(iolist_to_binary(Body)))
++    end);
++should_get_att_with_accept_gzip_encoding({text, _}, {Data, {_, _, AttUrl}}) ->
++    ?_test(begin
++        {ok, Code, Headers, Body} = test_request:get(
++            AttUrl, [{"Accept-Encoding", "gzip"}]),
++        ?assertEqual(200, Code),
++        ?assert(lists:member({"Content-Encoding", "gzip"}, Headers)),
++        ?assertEqual(Data, zlib:gunzip(iolist_to_binary(Body)))
++    end);
++should_get_att_with_accept_gzip_encoding({binary, _}, {Data, {_, _, AttUrl}}) ->
++    ?_test(begin
++        {ok, Code, Headers, Body} = test_request:get(
++            AttUrl, [{"Accept-Encoding", "gzip"}]),
++        ?assertEqual(200, Code),
++        ?assertEqual(undefined,
++                     couch_util:get_value("Content-Encoding", Headers)),
++        ?assertEqual(Data, iolist_to_binary(Body))
++    end).
++
++should_get_att_with_accept_deflate_encoding(_, {Data, {_, _, AttUrl}}) ->
++    ?_test(begin
++        {ok, Code, Headers, Body} = test_request:get(
++            AttUrl, [{"Accept-Encoding", "deflate"}]),
++        ?assertEqual(200, Code),
++        ?assertEqual(undefined,
++                     couch_util:get_value("Content-Encoding", Headers)),
++        ?assertEqual(Data, iolist_to_binary(Body))
++    end).
++
++should_return_406_response_on_unsupported_encoding(_, {_, {_, _, AttUrl}}) ->
++    ?_assertEqual(406,
++        begin
++            {ok, Code, _, _} = test_request:get(
++                AttUrl, [{"Accept-Encoding", "deflate, *;q=0"}]),
++            Code
++        end).
++
++should_get_doc_with_att_data(compressed, {Data, {_, DocUrl, _}}) ->
++    ?_test(begin
++        Url = DocUrl ++ "?attachments=true",
++        {ok, Code, _, Body} = test_request:get(
++            Url, [{"Accept", "application/json"}]),
++        ?assertEqual(200, Code),
++        Json = ejson:decode(Body),
++        AttJson = couch_util:get_nested_json_value(
++            Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
++        AttData = couch_util:get_nested_json_value(
++            AttJson, [<<"data">>]),
++        ?assertEqual(
++            <<"text/plain">>,
++            couch_util:get_nested_json_value(AttJson,[<<"content_type">>])),
++        ?assertEqual(Data, base64:decode(AttData))
++    end);
++should_get_doc_with_att_data({text, _}, {Data, {_, DocUrl, _}}) ->
++    ?_test(begin
++        Url = DocUrl ++ "?attachments=true",
++        {ok, Code, _, Body} = test_request:get(
++            Url, [{"Accept", "application/json"}]),
++        ?assertEqual(200, Code),
++        Json = ejson:decode(Body),
++        AttJson = couch_util:get_nested_json_value(
++            Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
++        AttData = couch_util:get_nested_json_value(
++            AttJson, [<<"data">>]),
++        ?assertEqual(
++            <<"text/plain">>,
++            couch_util:get_nested_json_value(AttJson,[<<"content_type">>])),
++        ?assertEqual(Data, base64:decode(AttData))
++    end);
++should_get_doc_with_att_data({binary, _}, {Data, {_, DocUrl, _}}) ->
++    ?_test(begin
++        Url = DocUrl ++ "?attachments=true",
++        {ok, Code, _, Body} = test_request:get(
++            Url, [{"Accept", "application/json"}]),
++        ?assertEqual(200, Code),
++        Json = ejson:decode(Body),
++        AttJson = couch_util:get_nested_json_value(
++            Json, [<<"_attachments">>, ?ATT_BIN_NAME]),
++        AttData = couch_util:get_nested_json_value(
++            AttJson, [<<"data">>]),
++        ?assertEqual(
++            <<"image/png">>,
++            couch_util:get_nested_json_value(AttJson,[<<"content_type">>])),
++        ?assertEqual(Data, base64:decode(AttData))
++    end).
++
++should_get_doc_with_att_data_stub(compressed, {Data, {_, DocUrl, _}}) ->
++    ?_test(begin
++        Url = DocUrl ++ "?att_encoding_info=true",
++        {ok, Code, _, Body} = test_request:get(
++            Url, [{"Accept", "application/json"}]),
++        ?assertEqual(200, Code),
++        Json = ejson:decode(Body),
++        {AttJson} = couch_util:get_nested_json_value(
++            Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
++        ?assertEqual(<<"gzip">>,
++                     couch_util:get_value(<<"encoding">>, AttJson)),
++        AttLength = couch_util:get_value(<<"length">>, AttJson),
++        EncLength = couch_util:get_value(<<"encoded_length">>, AttJson),
++        ?assertEqual(AttLength, EncLength),
++        ?assertEqual(iolist_size(zlib:gzip(Data)), AttLength)
++    end);
++should_get_doc_with_att_data_stub({text, _}, {Data, {_, DocUrl, _}}) ->
++    ?_test(begin
++        Url = DocUrl ++ "?att_encoding_info=true",
++        {ok, Code, _, Body} = test_request:get(
++            Url, [{"Accept", "application/json"}]),
++        ?assertEqual(200, Code),
++        Json = ejson:decode(Body),
++        {AttJson} = couch_util:get_nested_json_value(
++            Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
++        ?assertEqual(<<"gzip">>,
++                     couch_util:get_value(<<"encoding">>, AttJson)),
++        AttEncLength = iolist_size(gzip(Data)),
++        ?assertEqual(AttEncLength,
++                     couch_util:get_value(<<"encoded_length">>, AttJson)),
++        ?assertEqual(byte_size(Data),
++                     couch_util:get_value(<<"length">>, AttJson))
++    end);
++should_get_doc_with_att_data_stub({binary, _}, {Data, {_, DocUrl, _}}) ->
++    ?_test(begin
++        Url = DocUrl ++ "?att_encoding_info=true",
++        {ok, Code, _, Body} = test_request:get(
++            Url, [{"Accept", "application/json"}]),
++        ?assertEqual(200, Code),
++        Json = ejson:decode(Body),
++        {AttJson} = couch_util:get_nested_json_value(
++            Json, [<<"_attachments">>, ?ATT_BIN_NAME]),
++        ?assertEqual(undefined,
++                     couch_util:get_value(<<"encoding">>, AttJson)),
++        ?assertEqual(undefined,
++                     couch_util:get_value(<<"encoded_length">>, AttJson)),
++        ?assertEqual(byte_size(Data),
++                     couch_util:get_value(<<"length">>, AttJson))
++    end).
++
++should_not_create_compressed_att_with_deflate_encoding({Host, DbName}) ->
++    ?_assertEqual(415,
++        begin
++            HttpHost = "http://" ++ Host,
++            AttUrl = string:join([HttpHost, DbName, ?docid(), "file.txt"], "/"),
++            {ok, Data} = file:read_file(?FIXTURE_TXT),
++            Body = zlib:compress(Data),
++            Headers = [
++                {"Content-Encoding", "deflate"},
++                {"Content-Type", "text/plain"}
++            ],
++            {ok, Code, _, _} = test_request:put(AttUrl, Headers, Body),
++            Code
++        end).
++
++should_not_create_compressed_att_with_compress_encoding({Host, DbName}) ->
++    % Note: As of OTP R13B04, it seems there's no LZW compression
++    % (i.e. UNIX compress utility implementation) lib in OTP.
++    % However there's a simple working Erlang implementation at:
++    % http://scienceblogs.com/goodmath/2008/01/simple_lempelziv_compression_i.php
++    ?_assertEqual(415,
++        begin
++            HttpHost = "http://" ++ Host,
++            AttUrl = string:join([HttpHost, DbName, ?docid(), "file.txt"], "/"),
++            {ok, Data} = file:read_file(?FIXTURE_TXT),
++            Headers = [
++                {"Content-Encoding", "compress"},
++                {"Content-Type", "text/plain"}
++            ],
++            {ok, Code, _, _} = test_request:put(AttUrl, Headers, Data),
++            Code
++        end).
++
++should_create_compressible_att_with_ctype_params({Host, DbName}) ->
++    {timeout, ?TIMEOUT_EUNIT, ?_test(begin
++        HttpHost = "http://" ++ Host,
++        DocUrl = string:join([HttpHost, DbName, ?docid()], "/"),
++        AttUrl = string:join([DocUrl, ?b2l(?ATT_TXT_NAME)], "/"),
++        {ok, Data} = file:read_file(?FIXTURE_TXT),
++        Headers = [{"Content-Type", "text/plain; charset=UTF-8"}],
++        {ok, Code0, _, _} = test_request:put(AttUrl, Headers, Data),
++        ?assertEqual(201, Code0),
++
++        {ok, Code1, _, Body} = test_request:get(
++            DocUrl ++ "?att_encoding_info=true"),
++        ?assertEqual(200, Code1),
++        Json = ejson:decode(Body),
++        {AttJson} = couch_util:get_nested_json_value(
++            Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
++        ?assertEqual(<<"gzip">>,
++                     couch_util:get_value(<<"encoding">>, AttJson)),
++        AttEncLength = iolist_size(gzip(Data)),
++        ?assertEqual(AttEncLength,
++                     couch_util:get_value(<<"encoded_length">>, AttJson)),
++        ?assertEqual(byte_size(Data),
++                     couch_util:get_value(<<"length">>, AttJson))
++    end)}.
++
++
++get_json(Json, Path) ->
++    couch_util:get_nested_json_value(Json, Path).
++
++to_hex(Val) ->
++    to_hex(Val, []).
++
++to_hex(0, Acc) ->
++    Acc;
++to_hex(Val, Acc) ->
++    to_hex(Val div 16, [hex_char(Val rem 16) | Acc]).
++
++hex_char(V) when V < 10 -> $0 + V;
++hex_char(V) -> $A + V - 10.
++
++chunked_body(Chunks) ->
++    chunked_body(Chunks, []).
++
++chunked_body([], Acc) ->
++    iolist_to_binary(lists:reverse(Acc, "0\r\n"));
++chunked_body([Chunk | Rest], Acc) ->
++    Size = to_hex(size(Chunk)),
++    chunked_body(Rest, ["\r\n", Chunk, "\r\n", Size | Acc]).
++
++get_socket() ->
++    Options = [binary, {packet, 0}, {active, false}],
++    Addr = couch_config:get("httpd", "bind_address", any),
++    Port = mochiweb_socket_server:get(couch_httpd, port),
++    {ok, Sock} = gen_tcp:connect(Addr, Port, Options),
++    Sock.
++
++request(Method, Url, Headers, Body) ->
++    RequestHead = [Method, " ", Url, " HTTP/1.1"],
++    RequestHeaders = [[string:join([Key, Value], ": "), "\r\n"]
++                      || {Key, Value} <- Headers],
++    Request = [RequestHead, "\r\n", RequestHeaders, "\r\n", Body, "\r\n"],
++    Sock = get_socket(),
++    gen_tcp:send(Sock, list_to_binary(lists:flatten(Request))),
++    timer:sleep(?TIMEWAIT),  % must wait to receive complete response
++    {ok, R} = gen_tcp:recv(Sock, 0),
++    gen_tcp:close(Sock),
++    [Header, Body1] = re:split(R, "\r\n\r\n", [{return, binary}]),
++    {ok, {http_response, _, Code, _}, _} =
++        erlang:decode_packet(http, Header, []),
++    Json = ejson:decode(Body1),
++    {ok, Code, Json}.
++
++create_standalone_text_att(Host, DbName) ->
++    {ok, Data} = file:read_file(?FIXTURE_TXT),
++    Url = string:join([Host, DbName, "doc", ?b2l(?ATT_TXT_NAME)], "/"),
++    {ok, Code, _Headers, _Body} = test_request:put(
++        Url, [{"Content-Type", "text/plain"}], Data),
++    ?assertEqual(201, Code),
++    Url.
++
++create_standalone_png_att(Host, DbName) ->
++    {ok, Data} = file:read_file(?FIXTURE_PNG),
++    Url = string:join([Host, DbName, "doc", ?b2l(?ATT_BIN_NAME)], "/"),
++    {ok, Code, _Headers, _Body} = test_request:put(
++        Url, [{"Content-Type", "image/png"}], Data),
++    ?assertEqual(201, Code),
++    Url.
++
++create_inline_text_att(Host, DbName) ->
++    {ok, Data} = file:read_file(?FIXTURE_TXT),
++    Url = string:join([Host, DbName, "doc"], "/"),
++    Doc = {[
++        {<<"_attachments">>, {[
++            {?ATT_TXT_NAME, {[
++                {<<"content_type">>, <<"text/plain">>},
++                {<<"data">>, base64:encode(Data)}
++            ]}
++        }]}}
++    ]},
++    {ok, Code, _Headers, _Body} = test_request:put(
++        Url, [{"Content-Type", "application/json"}], ejson:encode(Doc)),
++    ?assertEqual(201, Code),
++    string:join([Url, ?b2l(?ATT_TXT_NAME)], "/").
++
++create_inline_png_att(Host, DbName) ->
++    {ok, Data} = file:read_file(?FIXTURE_PNG),
++    Url = string:join([Host, DbName, "doc"], "/"),
++    Doc = {[
++        {<<"_attachments">>, {[
++            {?ATT_BIN_NAME, {[
++                {<<"content_type">>, <<"image/png">>},
++                {<<"data">>, base64:encode(Data)}
++            ]}
++        }]}}
++    ]},
++    {ok, Code, _Headers, _Body} = test_request:put(
++        Url, [{"Content-Type", "application/json"}], ejson:encode(Doc)),
++    ?assertEqual(201, Code),
++    string:join([Url, ?b2l(?ATT_BIN_NAME)], "/").
++
++create_already_compressed_att(Host, DbName) ->
++    {ok, Data} = file:read_file(?FIXTURE_TXT),
++    Url = string:join([Host, DbName, "doc", ?b2l(?ATT_TXT_NAME)], "/"),
++    {ok, Code, _Headers, _Body} = test_request:put(
++        Url, [{"Content-Type", "text/plain"}, {"Content-Encoding", "gzip"}],
++        zlib:gzip(Data)),
++    ?assertEqual(201, Code),
++    Url.
++
++gzip(Data) ->
++    Z = zlib:open(),
++    ok = zlib:deflateInit(Z, ?COMPRESSION_LEVEL, deflated, 16 + 15, 8, default),
++    zlib:deflate(Z, Data),
++    Last = zlib:deflate(Z, [], finish),
++    ok = zlib:deflateEnd(Z),
++    ok = zlib:close(Z),
++    Last.
+diff --git a/test/couchdb/couchdb_compaction_daemon.erl b/test/couchdb/couchdb_compaction_daemon.erl
+new file mode 100644
+index 0000000..725a97b
+--- /dev/null
++++ b/test/couchdb/couchdb_compaction_daemon.erl
+@@ -0,0 +1,231 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couchdb_compaction_daemon).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
++-define(DELAY, 100).
++-define(TIMEOUT, 30000).
++-define(TIMEOUT_S, ?TIMEOUT div 1000).
++
++
++start() ->
++    {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    couch_config:set("compaction_daemon", "check_interval", "3", false),
++    couch_config:set("compaction_daemon", "min_file_size", "100000", false),
++    Pid.
++
++stop(Pid) ->
++    erlang:monitor(process, Pid),
++    couch_server_sup:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout, server_stop})
++    end.
++
++setup() ->
++    DbName = ?tempdb(),
++    {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
++    create_design_doc(Db),
++    ok = couch_db:close(Db),
++    DbName.
++
++teardown(DbName) ->
++    Configs = couch_config:get("compactions"),
++    lists:foreach(
++        fun({Key, _}) ->
++            ok = couch_config:delete("compactions", Key, false)
++        end,
++        Configs),
++    couch_server:delete(DbName, [?ADMIN_USER]),
++    ok.
++
++
++compaction_daemon_test_() ->
++    {
++        "Compaction daemon tests",
++        {
++            setup,
++            fun start/0, fun stop/1,
++            {
++                foreach,
++                fun setup/0, fun teardown/1,
++                [
++                    fun should_compact_by_default_rule/1,
++                    fun should_compact_by_dbname_rule/1
++                ]
++            }
++        }
++    }.
++
++
++should_compact_by_default_rule(DbName) ->
++    {timeout, ?TIMEOUT_S, ?_test(begin
++        {ok, Db} = couch_db:open_int(DbName, []),
++        populate(DbName, 70, 70, 200 * 1024),
++
++        {_, DbFileSize} = get_db_frag(DbName),
++        {_, ViewFileSize} = get_view_frag(DbName),
++
++        ok = couch_config:set("compactions", "_default",
++            "[{db_fragmentation, \"70%\"}, {view_fragmentation, \"70%\"}]",
++            false),
++
++        ok = timer:sleep(4000), % something >= check_interval
++        wait_compaction_finished(DbName),
++        ok = couch_config:delete("compactions", "_default", false),
++
++        {DbFrag2, DbFileSize2} = get_db_frag(DbName),
++        {ViewFrag2, ViewFileSize2} = get_view_frag(DbName),
++
++        ?assert(DbFrag2 < 70),
++        ?assert(ViewFrag2 < 70),
++
++        ?assert(DbFileSize > DbFileSize2),
++        ?assert(ViewFileSize > ViewFileSize2),
++
++        ?assert(couch_db:is_idle(Db)),
++        ok = couch_db:close(Db)
++    end)}.
++
++should_compact_by_dbname_rule(DbName) ->
++    {timeout, ?TIMEOUT_S, ?_test(begin
++        {ok, Db} = couch_db:open_int(DbName, []),
++        populate(DbName, 70, 70, 200 * 1024),
++
++        {_, DbFileSize} = get_db_frag(DbName),
++        {_, ViewFileSize} = get_view_frag(DbName),
++
++        ok = couch_config:set("compactions", ?b2l(DbName),
++            "[{db_fragmentation, \"70%\"}, {view_fragmentation, \"70%\"}]",
++            false),
++
++        ok = timer:sleep(4000), % something >= check_interval
++        wait_compaction_finished(DbName),
++        ok = couch_config:delete("compactions", ?b2l(DbName), false),
++
++        {DbFrag2, DbFileSize2} = get_db_frag(DbName),
++        {ViewFrag2, ViewFileSize2} = get_view_frag(DbName),
++
++        ?assert(DbFrag2 < 70),
++        ?assert(ViewFrag2 < 70),
++
++        ?assert(DbFileSize > DbFileSize2),
++        ?assert(ViewFileSize > ViewFileSize2),
++
++        ?assert(couch_db:is_idle(Db)),
++        ok = couch_db:close(Db)
++    end)}.
++
++
++create_design_doc(Db) ->
++    DDoc = couch_doc:from_json_obj({[
++        {<<"_id">>, <<"_design/foo">>},
++        {<<"language">>, <<"javascript">>},
++        {<<"views">>, {[
++            {<<"foo">>, {[
++                {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>}
++            ]}},
++            {<<"foo2">>, {[
++                {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>}
++            ]}},
++            {<<"foo3">>, {[
++                {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>}
++            ]}}
++        ]}}
++    ]}),
++    {ok, _} = couch_db:update_docs(Db, [DDoc]),
++    {ok, _} = couch_db:ensure_full_commit(Db),
++    ok.
++
++populate(DbName, DbFrag, ViewFrag, MinFileSize) ->
++    {CurDbFrag, DbFileSize} = get_db_frag(DbName),
++    {CurViewFrag, ViewFileSize} = get_view_frag(DbName),
++    populate(DbName, DbFrag, ViewFrag, MinFileSize, CurDbFrag, CurViewFrag,
++             lists:min([DbFileSize, ViewFileSize])).
++
++populate(_Db, DbFrag, ViewFrag, MinFileSize, CurDbFrag, CurViewFrag, FileSize)
++    when CurDbFrag >= DbFrag, CurViewFrag >= ViewFrag, FileSize >= MinFileSize ->
++    ok;
++populate(DbName, DbFrag, ViewFrag, MinFileSize, _, _, _) ->
++    update(DbName),
++    {CurDbFrag, DbFileSize} = get_db_frag(DbName),
++    {CurViewFrag, ViewFileSize} = get_view_frag(DbName),
++    populate(DbName, DbFrag, ViewFrag, MinFileSize, CurDbFrag, CurViewFrag,
++             lists:min([DbFileSize, ViewFileSize])).
++
++update(DbName) ->
++    {ok, Db} = couch_db:open_int(DbName, []),
++    lists:foreach(fun(_) ->
++        Doc = couch_doc:from_json_obj({[{<<"_id">>, couch_uuids:new()}]}),
++        {ok, _} = couch_db:update_docs(Db, [Doc]),
++        query_view(Db#db.name)
++    end, lists:seq(1, 200)),
++    couch_db:close(Db).
++
++db_url(DbName) ->
++    Addr = couch_config:get("httpd", "bind_address", "127.0.0.1"),
++    Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
++    "http://" ++ Addr ++ ":" ++ Port ++ "/" ++ ?b2l(DbName).
++
++query_view(DbName) ->
++    {ok, Code, _Headers, _Body} = test_request:get(
++        db_url(DbName) ++ "/_design/foo/_view/foo"),
++    ?assertEqual(200, Code).
++
++get_db_frag(DbName) ->
++    {ok, Db} = couch_db:open_int(DbName, []),
++    {ok, Info} = couch_db:get_db_info(Db),
++    couch_db:close(Db),
++    FileSize = couch_util:get_value(disk_size, Info),
++    DataSize = couch_util:get_value(data_size, Info),
++    {round((FileSize - DataSize) / FileSize * 100), FileSize}.
++
++get_view_frag(DbName) ->
++    {ok, Db} = couch_db:open_int(DbName, []),
++    {ok, Info} = couch_mrview:get_info(Db, <<"_design/foo">>),
++    couch_db:close(Db),
++    FileSize = couch_util:get_value(disk_size, Info),
++    DataSize = couch_util:get_value(data_size, Info),
++    {round((FileSize - DataSize) / FileSize * 100), FileSize}.
++
++wait_compaction_finished(DbName) ->
++    Parent = self(),
++    Loop = spawn_link(fun() -> wait_loop(DbName, Parent) end),
++    receive
++        {done, Loop} ->
++            ok
++    after ?TIMEOUT ->
++        erlang:error(
++            {assertion_failed,
++             [{module, ?MODULE}, {line, ?LINE},
++              {reason, "Compaction timeout"}]})
++    end.
++
++wait_loop(DbName, Parent) ->
++    {ok, Db} = couch_db:open_int(DbName, []),
++    {ok, DbInfo} = couch_db:get_db_info(Db),
++    {ok, ViewInfo} = couch_mrview:get_info(Db, <<"_design/foo">>),
++    couch_db:close(Db),
++    case (couch_util:get_value(compact_running, ViewInfo) =:= true) orelse
++        (couch_util:get_value(compact_running, DbInfo) =:= true) of
++        false ->
++            Parent ! {done, self()};
++        true ->
++            ok = timer:sleep(?DELAY),
++            wait_loop(DbName, Parent)
++    end.
+diff --git a/test/couchdb/couchdb_cors_tests.erl b/test/couchdb/couchdb_cors_tests.erl
+new file mode 100644
+index 0000000..4e88ae7
+--- /dev/null
++++ b/test/couchdb/couchdb_cors_tests.erl
+@@ -0,0 +1,344 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couchdb_cors_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++
++-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
++-define(SUPPORTED_METHODS,
++        "GET, HEAD, POST, PUT, DELETE, TRACE, CONNECT, COPY, OPTIONS").
++-define(TIMEOUT, 1000).
++
++
++start() ->
++    {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    ok = couch_config:set("httpd", "enable_cors", "true", false),
++    ok = couch_config:set("vhosts", "example.com", "/", false),
++    Pid.
++
++stop(Pid) ->
++    couch_server_sup:stop(),
++    erlang:monitor(process, Pid),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout, server_stop})
++    end.
++
++setup() ->
++    DbName = ?tempdb(),
++    {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
++    couch_db:close(Db),
++
++    couch_config:set("cors", "credentials", "false", false),
++    couch_config:set("cors", "origins", "http://example.com", false),
++
++    Addr = couch_config:get("httpd", "bind_address", "127.0.0.1"),
++    Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
++    Host = "http://" ++ Addr ++ ":" ++ Port,
++    {Host, ?b2l(DbName)}.
++
++setup({Mod, VHost}) ->
++    {Host, DbName} = setup(),
++    Url = case Mod of
++        server ->
++            Host;
++        db ->
++            Host ++ "/" ++ DbName
++    end,
++    DefaultHeaders = [{"Origin", "http://example.com"}]
++                     ++ maybe_append_vhost(VHost),
++    {Host, DbName, Url, DefaultHeaders}.
++
++teardown(DbName) when is_list(DbName) ->
++    ok = couch_server:delete(?l2b(DbName), [?ADMIN_USER]),
++    ok;
++teardown({_, DbName}) ->
++    teardown(DbName).
++
++teardown(_, {_, DbName, _, _}) ->
++    teardown(DbName).
++
++
++cors_test_() ->
++    Funs = [
++        fun should_not_allow_origin/2,
++        fun should_not_allow_origin_with_port_mismatch/2,
++        fun should_not_allow_origin_with_scheme_mismatch/2,
++        fun should_not_all_origin_due_case_mismatch/2,
++        fun should_make_simple_request/2,
++        fun should_make_preflight_request/2,
++        fun should_make_prefligh_request_with_port/2,
++        fun should_make_prefligh_request_with_scheme/2,
++        fun should_make_prefligh_request_with_wildcard_origin/2,
++        fun should_make_request_with_credentials/2,
++        fun should_make_origin_request_with_auth/2,
++        fun should_make_preflight_request_with_auth/2
++    ],
++    {
++        "CORS (COUCHDB-431)",
++        {
++            setup,
++            fun start/0, fun stop/1,
++            [
++                cors_tests(Funs),
++                vhost_cors_tests(Funs),
++                headers_tests()
++            ]
++        }
++    }.
++
++headers_tests() ->
++    {
++        "Various headers tests",
++        {
++            foreach,
++            fun setup/0, fun teardown/1,
++            [
++                fun should_not_return_cors_headers_for_invalid_origin/1,
++                fun should_not_return_cors_headers_for_invalid_origin_preflight/1,
++                fun should_make_request_against_attachment/1,
++                fun should_make_range_request_against_attachment/1,
++                fun should_make_request_with_if_none_match_header/1
++            ]
++        }
++    }.
++
++cors_tests(Funs) ->
++    {
++        "CORS tests",
++        [
++            make_test_case(server, false, Funs),
++            make_test_case(db, false, Funs)
++        ]
++    }.
++
++vhost_cors_tests(Funs) ->
++    {
++        "Virtual Host CORS",
++        [
++            make_test_case(server, true, Funs),
++            make_test_case(db, true, Funs)
++        ]
++    }.
++
++make_test_case(Mod, UseVhost, Funs) ->
++    {
++        case Mod of server -> "Server"; db -> "Database" end,
++        {foreachx, fun setup/1, fun teardown/2, [{{Mod, UseVhost}, Fun}
++                                                 || Fun <- Funs]}
++    }.
++
++
++should_not_allow_origin(_, {_, _, Url, Headers0}) ->
++    ?_assertEqual(undefined,
++        begin
++            couch_config:delete("cors", "origins", false),
++            Headers1 = proplists:delete("Origin", Headers0),
++            Headers = [{"Origin", "http://127.0.0.1"}]
++                      ++ Headers1,
++            {ok, _, Resp, _} = test_request:get(Url, Headers),
++            proplists:get_value("Access-Control-Allow-Origin", Resp)
++        end).
++
++should_not_allow_origin_with_port_mismatch({_, VHost}, {_, _, Url, _}) ->
++    ?_assertEqual(undefined,
++        begin
++            Headers = [{"Origin", "http://example.com:5984"},
++                       {"Access-Control-Request-Method", "GET"}]
++                      ++ maybe_append_vhost(VHost),
++            {ok, _, Resp, _} = test_request:options(Url, Headers),
++            proplists:get_value("Access-Control-Allow-Origin", Resp)
++        end).
++
++should_not_allow_origin_with_scheme_mismatch({_, VHost}, {_, _, Url, _}) ->
++    ?_assertEqual(undefined,
++        begin
++            Headers = [{"Origin", "http://example.com:5984"},
++                       {"Access-Control-Request-Method", "GET"}]
++                      ++ maybe_append_vhost(VHost),
++            {ok, _, Resp, _} = test_request:options(Url, Headers),
++            proplists:get_value("Access-Control-Allow-Origin", Resp)
++        end).
++
++should_not_all_origin_due_case_mismatch({_, VHost}, {_, _, Url, _}) ->
++    ?_assertEqual(undefined,
++        begin
++            Headers = [{"Origin", "http://ExAmPlE.CoM"},
++                       {"Access-Control-Request-Method", "GET"}]
++                      ++ maybe_append_vhost(VHost),
++            {ok, _, Resp, _} = test_request:options(Url, Headers),
++            proplists:get_value("Access-Control-Allow-Origin", Resp)
++        end).
++
++should_make_simple_request(_, {_, _, Url, DefaultHeaders}) ->
++    ?_test(begin
++        {ok, _, Resp, _} = test_request:get(Url, DefaultHeaders),
++        ?assertEqual(
++            undefined,
++            proplists:get_value("Access-Control-Allow-Credentials", Resp)),
++        ?assertEqual(
++            "http://example.com",
++            proplists:get_value("Access-Control-Allow-Origin", Resp)),
++        ?assertEqual(
++            "Cache-Control, Content-Type, Server",
++            proplists:get_value("Access-Control-Expose-Headers", Resp))
++    end).
++
++should_make_preflight_request(_, {_, _, Url, DefaultHeaders}) ->
++    ?_assertEqual(?SUPPORTED_METHODS,
++        begin
++            Headers = DefaultHeaders
++                      ++ [{"Access-Control-Request-Method", "GET"}],
++            {ok, _, Resp, _} = test_request:options(Url, Headers),
++            proplists:get_value("Access-Control-Allow-Methods", Resp)
++        end).
++
++should_make_prefligh_request_with_port({_, VHost}, {_, _, Url, _}) ->
++    ?_assertEqual("http://example.com:5984",
++        begin
++            couch_config:set("cors", "origins", "http://example.com:5984",
++                             false),
++            Headers = [{"Origin", "http://example.com:5984"},
++                       {"Access-Control-Request-Method", "GET"}]
++                      ++ maybe_append_vhost(VHost),
++            {ok, _, Resp, _} = test_request:options(Url, Headers),
++            proplists:get_value("Access-Control-Allow-Origin", Resp)
++        end).
++
++should_make_prefligh_request_with_scheme({_, VHost}, {_, _, Url, _}) ->
++    ?_assertEqual("https://example.com:5984",
++        begin
++            couch_config:set("cors", "origins", "https://example.com:5984",
++                             false),
++            Headers = [{"Origin", "https://example.com:5984"},
++                       {"Access-Control-Request-Method", "GET"}]
++                      ++ maybe_append_vhost(VHost),
++            {ok, _, Resp, _} = test_request:options(Url, Headers),
++            proplists:get_value("Access-Control-Allow-Origin", Resp)
++        end).
++
++should_make_prefligh_request_with_wildcard_origin({_, VHost}, {_, _, Url, _}) ->
++    ?_assertEqual("https://example.com:5984",
++        begin
++            couch_config:set("cors", "origins", "*", false),
++            Headers = [{"Origin", "https://example.com:5984"},
++                       {"Access-Control-Request-Method", "GET"}]
++                      ++ maybe_append_vhost(VHost),
++            {ok, _, Resp, _} = test_request:options(Url, Headers),
++            proplists:get_value("Access-Control-Allow-Origin", Resp)
++        end).
++
++should_make_request_with_credentials(_, {_, _, Url, DefaultHeaders}) ->
++    ?_assertEqual("true",
++        begin
++            ok = couch_config:set("cors", "credentials", "true", false),
++            {ok, _, Resp, _} = test_request:options(Url, DefaultHeaders),
++            proplists:get_value("Access-Control-Allow-Credentials", Resp)
++        end).
++
++should_make_origin_request_with_auth(_, {_, _, Url, DefaultHeaders}) ->
++    ?_assertEqual("http://example.com",
++        begin
++            Hashed = couch_passwords:hash_admin_password(<<"test">>),
++            couch_config:set("admins", "test", Hashed, false),
++            {ok, _, Resp, _} = test_request:get(
++                Url, DefaultHeaders, [{basic_auth, {"test", "test"}}]),
++            couch_config:delete("admins", "test", false),
++            proplists:get_value("Access-Control-Allow-Origin", Resp)
++        end).
++
++should_make_preflight_request_with_auth(_, {_, _, Url, DefaultHeaders}) ->
++    ?_assertEqual(?SUPPORTED_METHODS,
++        begin
++            Hashed = couch_passwords:hash_admin_password(<<"test">>),
++            couch_config:set("admins", "test", Hashed, false),
++            Headers = DefaultHeaders
++                      ++ [{"Access-Control-Request-Method", "GET"}],
++            {ok, _, Resp, _} = test_request:options(
++                Url, Headers, [{basic_auth, {"test", "test"}}]),
++            couch_config:delete("admins", "test", false),
++            proplists:get_value("Access-Control-Allow-Methods", Resp)
++        end).
++
++should_not_return_cors_headers_for_invalid_origin({Host, _}) ->
++    ?_assertEqual(undefined,
++        begin
++            Headers = [{"Origin", "http://127.0.0.1"}],
++            {ok, _, Resp, _} = test_request:get(Host, Headers),
++            proplists:get_value("Access-Control-Allow-Origin", Resp)
++        end).
++
++should_not_return_cors_headers_for_invalid_origin_preflight({Host, _}) ->
++    ?_assertEqual(undefined,
++        begin
++            Headers = [{"Origin", "http://127.0.0.1"},
++                       {"Access-Control-Request-Method", "GET"}],
++            {ok, _, Resp, _} = test_request:options(Host, Headers),
++            proplists:get_value("Access-Control-Allow-Origin", Resp)
++        end).
++
++should_make_request_against_attachment({Host, DbName}) ->
++    {"COUCHDB-1689",
++     ?_assertEqual(200,
++         begin
++             Url = Host ++ "/" ++ DbName,
++             {ok, Code0, _, _} = test_request:put(
++                 Url ++ "/doc/file.txt", [{"Content-Type", "text/plain"}],
++                 "hello, couch!"),
++             ?assert(Code0 =:= 201),
++             {ok, Code, _, _} = test_request:get(
++                 Url ++ "/doc?attachments=true",
++                 [{"Origin", "http://example.com"}]),
++             Code
++         end)}.
++
++should_make_range_request_against_attachment({Host, DbName}) ->
++    {"COUCHDB-1689",
++     ?_assertEqual(206,
++         begin
++             Url = Host ++ "/" ++ DbName,
++             {ok, Code0, _, _} = test_request:put(
++                 Url ++ "/doc/file.txt",
++                 [{"Content-Type", "application/octet-stream"}],
++                 "hello, couch!"),
++             ?assert(Code0 =:= 201),
++             {ok, Code, _, _} = test_request:get(
++                 Url ++ "/doc/file.txt", [{"Origin", "http://example.com"},
++                                          {"Range", "bytes=0-6"}]),
++             Code
++         end)}.
++
++should_make_request_with_if_none_match_header({Host, DbName}) ->
++    {"COUCHDB-1697",
++     ?_assertEqual(304,
++         begin
++             Url = Host ++ "/" ++ DbName,
++             {ok, Code0, Headers0, _} = test_request:put(
++                 Url ++ "/doc", [{"Content-Type", "application/json"}], "{}"),
++             ?assert(Code0 =:= 201),
++             ETag = proplists:get_value("ETag", Headers0),
++             {ok, Code, _, _} = test_request:get(
++                 Url ++ "/doc", [{"Origin", "http://example.com"},
++                                 {"If-None-Match", ETag}]),
++             Code
++        end)}.
++
++
++maybe_append_vhost(true) ->
++    [{"Host", "http://example.com"}];
++maybe_append_vhost(false) ->
++    [].
+diff --git a/test/couchdb/couchdb_file_compression_tests.erl b/test/couchdb/couchdb_file_compression_tests.erl
+new file mode 100644
+index 0000000..fd3f513
+--- /dev/null
++++ b/test/couchdb/couchdb_file_compression_tests.erl
+@@ -0,0 +1,239 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couchdb_file_compression_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
++-define(DDOC_ID, <<"_design/test">>).
++-define(DOCS_COUNT, 5000).
++-define(TIMEOUT, 30000).
++
++
++start() ->
++    {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    Pid.
++
++stop(Pid) ->
++    erlang:monitor(process, Pid),
++    couch_server_sup:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout, server_stop})
++    end.
++
++setup() ->
++    couch_config:set("couchdb", "file_compression", "none", false),
++    DbName = ?tempdb(),
++    {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
++    ok = populate_db(Db, ?DOCS_COUNT),
++    DDoc = couch_doc:from_json_obj({[
++        {<<"_id">>, ?DDOC_ID},
++        {<<"language">>, <<"javascript">>},
++        {<<"views">>, {[
++                {<<"by_id">>, {[
++                    {<<"map">>, <<"function(doc){emit(doc._id, doc.string);}">>}
++                ]}}
++            ]}
++        }
++    ]}),
++    {ok, _} = couch_db:update_doc(Db, DDoc, []),
++    refresh_index(DbName),
++    ok = couch_db:close(Db),
++    DbName.
++
++teardown(DbName) ->
++    ok = couch_server:delete(DbName, [?ADMIN_USER]),
++    ok.
++
++
++couch_auth_cache_test_() ->
++    {
++        "CouchDB file compression tests",
++        {
++            setup,
++            fun start/0, fun stop/1,
++            {
++                foreach,
++                fun setup/0, fun teardown/1,
++                [
++                    fun should_use_none/1,
++                    fun should_use_deflate_1/1,
++                    fun should_use_deflate_9/1,
++                    fun should_use_snappy/1,
++                    fun should_compare_compression_methods/1
++                ]
++            }
++        }
++    }.
++
++
++should_use_none(DbName) ->
++    couch_config:set("couchdb", "file_compression", "none", false),
++    {
++        "Use no compression",
++        [
++            {"compact database", ?_test(compact_db(DbName))},
++            {"compact view", ?_test(compact_view(DbName))}
++        ]
++    }.
++
++should_use_deflate_1(DbName) ->
++    couch_config:set("couchdb", "file_compression", "deflate_1", false),
++    {
++        "Use deflate compression at level 1",
++        [
++            {"compact database", ?_test(compact_db(DbName))},
++            {"compact view", ?_test(compact_view(DbName))}
++        ]
++    }.
++
++should_use_deflate_9(DbName) ->
++    couch_config:set("couchdb", "file_compression", "deflate_9", false),
++    {
++        "Use deflate compression at level 9",
++        [
++            {"compact database", ?_test(compact_db(DbName))},
++            {"compact view", ?_test(compact_view(DbName))}
++        ]
++    }.
++
++should_use_snappy(DbName) ->
++    couch_config:set("couchdb", "file_compression", "snappy", false),
++    {
++        "Use snappy compression",
++        [
++            {"compact database", ?_test(compact_db(DbName))},
++            {"compact view", ?_test(compact_view(DbName))}
++        ]
++    }.
++
++should_compare_compression_methods(DbName) ->
++    {"none > snappy > deflate_1 > deflate_9",
++     {timeout, ?TIMEOUT div 1000, ?_test(compare_compression_methods(DbName))}}.
++
++compare_compression_methods(DbName) ->
++    couch_config:set("couchdb", "file_compression", "none", false),
++    compact_db(DbName),
++    compact_view(DbName),
++    DbSizeNone = db_disk_size(DbName),
++    ViewSizeNone = view_disk_size(DbName),
++
++    couch_config:set("couchdb", "file_compression", "snappy", false),
++    compact_db(DbName),
++    compact_view(DbName),
++    DbSizeSnappy = db_disk_size(DbName),
++    ViewSizeSnappy = view_disk_size(DbName),
++
++    ?assert(DbSizeNone > DbSizeSnappy),
++    ?assert(ViewSizeNone > ViewSizeSnappy),
++
++    couch_config:set("couchdb", "file_compression", "deflate_1", false),
++    compact_db(DbName),
++    compact_view(DbName),
++    DbSizeDeflate1 = db_disk_size(DbName),
++    ViewSizeDeflate1 = view_disk_size(DbName),
++
++    ?assert(DbSizeSnappy > DbSizeDeflate1),
++    ?assert(ViewSizeSnappy > ViewSizeDeflate1),
++
++    couch_config:set("couchdb", "file_compression", "deflate_9", false),
++    compact_db(DbName),
++    compact_view(DbName),
++    DbSizeDeflate9 = db_disk_size(DbName),
++    ViewSizeDeflate9 = view_disk_size(DbName),
++
++    ?assert(DbSizeDeflate1 > DbSizeDeflate9),
++    ?assert(ViewSizeDeflate1 > ViewSizeDeflate9).
++
++
++populate_db(_Db, NumDocs) when NumDocs =< 0 ->
++    ok;
++populate_db(Db, NumDocs) ->
++    Docs = lists:map(
++        fun(_) ->
++            couch_doc:from_json_obj({[
++                {<<"_id">>, couch_uuids:random()},
++                {<<"string">>, ?l2b(lists:duplicate(1000, $X))}
++            ]})
++        end,
++        lists:seq(1, 500)),
++    {ok, _} = couch_db:update_docs(Db, Docs, []),
++    populate_db(Db, NumDocs - 500).
++
++refresh_index(DbName) ->
++    {ok, Db} = couch_db:open_int(DbName, []),
++    {ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]),
++    couch_mrview:query_view(Db, DDoc, <<"by_id">>, [{stale, false}]),
++    ok = couch_db:close(Db).
++
++compact_db(DbName) ->
++    DiskSizeBefore = db_disk_size(DbName),
++    {ok, Db} = couch_db:open_int(DbName, []),
++    {ok, CompactPid} = couch_db:start_compact(Db),
++    MonRef = erlang:monitor(process, CompactPid),
++    receive
++        {'DOWN', MonRef, process, CompactPid, normal} ->
++            ok;
++        {'DOWN', MonRef, process, CompactPid, Reason} ->
++            erlang:error({assertion_failed,
++                          [{module, ?MODULE},
++                           {line, ?LINE},
++                           {reason, "Error compacting database: "
++                                    ++ couch_util:to_list(Reason)}]})
++    after ?TIMEOUT ->
++        erlang:error({assertion_failed,
++                      [{module, ?MODULE},
++                       {line, ?LINE},
++                       {reason, "Timeout waiting for database compaction"}]})
++    end,
++    ok = couch_db:close(Db),
++    DiskSizeAfter = db_disk_size(DbName),
++    ?assert(DiskSizeBefore > DiskSizeAfter).
++
++compact_view(DbName) ->
++    DiskSizeBefore = view_disk_size(DbName),
++    {ok, MonRef} = couch_mrview:compact(DbName, ?DDOC_ID, [monitor]),
++    receive
++        {'DOWN', MonRef, process, _CompactPid, normal} ->
++            ok;
++        {'DOWN', MonRef, process, _CompactPid, Reason} ->
++            erlang:error({assertion_failed,
++                          [{module, ?MODULE},
++                           {line, ?LINE},
++                           {reason, "Error compacting view group: "
++                                    ++ couch_util:to_list(Reason)}]})
++    after ?TIMEOUT ->
++        erlang:error({assertion_failed,
++                      [{module, ?MODULE},
++                       {line, ?LINE},
++                       {reason, "Timeout waiting for view group compaction"}]})
++    end,
++    DiskSizeAfter = view_disk_size(DbName),
++    ?assert(DiskSizeBefore > DiskSizeAfter).
++
++db_disk_size(DbName) ->
++    {ok, Db} = couch_db:open_int(DbName, []),
++    {ok, Info} = couch_db:get_db_info(Db),
++    ok = couch_db:close(Db),
++    couch_util:get_value(disk_size, Info).
++
++view_disk_size(DbName) ->
++    {ok, Db} = couch_db:open_int(DbName, []),
++    {ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]),
++    {ok, Info} = couch_mrview:get_info(Db, DDoc),
++    ok = couch_db:close(Db),
++    couch_util:get_value(disk_size, Info).
+diff --git a/test/couchdb/couchdb_http_proxy_tests.erl b/test/couchdb/couchdb_http_proxy_tests.erl
+new file mode 100644
+index 0000000..acb1974
+--- /dev/null
++++ b/test/couchdb/couchdb_http_proxy_tests.erl
+@@ -0,0 +1,462 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couchdb_http_proxy_tests).
++
++-include("couch_eunit.hrl").
++
++-record(req, {method=get, path="", headers=[], body="", opts=[]}).
++
++-define(CONFIG_FIXTURE_TEMP,
++    begin
++        FileName = filename:join([?TEMPDIR, ?tempfile() ++ ".ini"]),
++        {ok, Fd} = file:open(FileName, write),
++        ok = file:truncate(Fd),
++        ok = file:close(Fd),
++        FileName
++    end).
++-define(TIMEOUT, 5000).
++
++
++start() ->
++    % we have to write any config changes to temp ini file to not loose them
++    % when supervisor will kill all children due to reaching restart threshold
++    % (each httpd_global_handlers changes causes couch_httpd restart)
++    couch_server_sup:start_link(?CONFIG_CHAIN ++ [?CONFIG_FIXTURE_TEMP]),
++    % 49151 is IANA Reserved, let's assume no one is listening there
++    couch_config:set("httpd_global_handlers", "_error",
++        "{couch_httpd_proxy, handle_proxy_req, <<\"http://127.0.0.1:49151/\">>}"
++    ),
++    ok.
++
++stop(_) ->
++    couch_server_sup:stop(),
++    ok.
++
++setup() ->
++    {ok, Pid} = test_web:start_link(),
++    Value = lists:flatten(io_lib:format(
++        "{couch_httpd_proxy, handle_proxy_req, ~p}",
++        [list_to_binary(proxy_url())])),
++    couch_config:set("httpd_global_handlers", "_test", Value),
++    % let couch_httpd restart
++    timer:sleep(100),
++    Pid.
++
++teardown(Pid) ->
++    erlang:monitor(process, Pid),
++    test_web:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout, test_web_stop})
++    end.
++
++
++http_proxy_test_() ->
++    {
++        "HTTP Proxy handler tests",
++        {
++            setup,
++            fun start/0, fun stop/1,
++            {
++                foreach,
++                fun setup/0, fun teardown/1,
++                [
++                    fun should_proxy_basic_request/1,
++                    fun should_return_alternative_status/1,
++                    fun should_respect_trailing_slash/1,
++                    fun should_proxy_headers/1,
++                    fun should_proxy_host_header/1,
++                    fun should_pass_headers_back/1,
++                    fun should_use_same_protocol_version/1,
++                    fun should_proxy_body/1,
++                    fun should_proxy_body_back/1,
++                    fun should_proxy_chunked_body/1,
++                    fun should_proxy_chunked_body_back/1,
++                    fun should_rewrite_location_header/1,
++                    fun should_not_rewrite_external_locations/1,
++                    fun should_rewrite_relative_location/1,
++                    fun should_refuse_connection_to_backend/1
++                ]
++            }
++
++        }
++    }.
++
++
++should_proxy_basic_request(_) ->
++    Remote = fun(Req) ->
++        'GET' = Req:get(method),
++        "/" = Req:get(path),
++        0 = Req:get(body_length),
++        <<>> = Req:recv_body(),
++        {ok, {200, [{"Content-Type", "text/plain"}], "ok"}}
++    end,
++    Local = fun
++        ({ok, "200", _, "ok"}) ->
++            true;
++        (_) ->
++            false
++    end,
++    ?_test(check_request(#req{}, Remote, Local)).
++
++should_return_alternative_status(_) ->
++    Remote = fun(Req) ->
++        "/alternate_status" = Req:get(path),
++        {ok, {201, [], "ok"}}
++    end,
++    Local = fun
++        ({ok, "201", _, "ok"}) ->
++            true;
++        (_) ->
++            false
++    end,
++    Req = #req{path = "/alternate_status"},
++    ?_test(check_request(Req, Remote, Local)).
++
++should_respect_trailing_slash(_) ->
++    Remote = fun(Req) ->
++        "/trailing_slash/" = Req:get(path),
++        {ok, {200, [], "ok"}}
++    end,
++    Local = fun
++        ({ok, "200", _, "ok"}) ->
++            true;
++        (_) ->
++            false
++    end,
++    Req = #req{path="/trailing_slash/"},
++    ?_test(check_request(Req, Remote, Local)).
++
++should_proxy_headers(_) ->
++    Remote = fun(Req) ->
++        "/passes_header" = Req:get(path),
++        "plankton" = Req:get_header_value("X-CouchDB-Ralph"),
++        {ok, {200, [], "ok"}}
++    end,
++    Local = fun
++        ({ok, "200", _, "ok"}) ->
++            true;
++        (_) ->
++            false
++    end,
++    Req = #req{
++        path="/passes_header",
++        headers=[{"X-CouchDB-Ralph", "plankton"}]
++    },
++    ?_test(check_request(Req, Remote, Local)).
++
++should_proxy_host_header(_) ->
++    Remote = fun(Req) ->
++        "/passes_host_header" = Req:get(path),
++        "www.google.com" = Req:get_header_value("Host"),
++        {ok, {200, [], "ok"}}
++    end,
++    Local = fun
++        ({ok, "200", _, "ok"}) ->
++            true;
++        (_) ->
++            false
++    end,
++    Req = #req{
++        path="/passes_host_header",
++        headers=[{"Host", "www.google.com"}]
++    },
++    ?_test(check_request(Req, Remote, Local)).
++
++should_pass_headers_back(_) ->
++    Remote = fun(Req) ->
++        "/passes_header_back" = Req:get(path),
++        {ok, {200, [{"X-CouchDB-Plankton", "ralph"}], "ok"}}
++    end,
++    Local = fun
++        ({ok, "200", Headers, "ok"}) ->
++            lists:member({"X-CouchDB-Plankton", "ralph"}, Headers);
++        (_) ->
++            false
++    end,
++    Req = #req{path="/passes_header_back"},
++    ?_test(check_request(Req, Remote, Local)).
++
++should_use_same_protocol_version(_) ->
++    Remote = fun(Req) ->
++        "/uses_same_version" = Req:get(path),
++        {1, 0} = Req:get(version),
++        {ok, {200, [], "ok"}}
++    end,
++    Local = fun
++        ({ok, "200", _, "ok"}) ->
++            true;
++        (_) ->
++            false
++    end,
++    Req = #req{
++        path="/uses_same_version",
++        opts=[{http_vsn, {1, 0}}]
++    },
++    ?_test(check_request(Req, Remote, Local)).
++
++should_proxy_body(_) ->
++    Remote = fun(Req) ->
++        'PUT' = Req:get(method),
++        "/passes_body" = Req:get(path),
++        <<"Hooray!">> = Req:recv_body(),
++        {ok, {201, [], "ok"}}
++    end,
++    Local = fun
++        ({ok, "201", _, "ok"}) ->
++            true;
++        (_) ->
++            false
++    end,
++    Req = #req{
++        method=put,
++        path="/passes_body",
++        body="Hooray!"
++    },
++    ?_test(check_request(Req, Remote, Local)).
++
++should_proxy_body_back(_) ->
++    BodyChunks = [<<"foo">>, <<"bar">>, <<"bazinga">>],
++    Remote = fun(Req) ->
++        'GET' = Req:get(method),
++        "/passes_eof_body" = Req:get(path),
++        {raw, {200, [{"Connection", "close"}], BodyChunks}}
++    end,
++    Local = fun
++        ({ok, "200", _, "foobarbazinga"}) ->
++            true;
++        (_) ->
++            false
++    end,
++    Req = #req{path="/passes_eof_body"},
++    ?_test(check_request(Req, Remote, Local)).
++
++should_proxy_chunked_body(_) ->
++    BodyChunks = [<<"foo">>, <<"bar">>, <<"bazinga">>],
++    Remote = fun(Req) ->
++        'POST' = Req:get(method),
++        "/passes_chunked_body" = Req:get(path),
++        RecvBody = fun
++            ({Length, Chunk}, [Chunk | Rest]) ->
++                Length = size(Chunk),
++                Rest;
++            ({0, []}, []) ->
++                ok
++        end,
++        ok = Req:stream_body(1024 * 1024, RecvBody, BodyChunks),
++        {ok, {201, [], "ok"}}
++    end,
++    Local = fun
++        ({ok, "201", _, "ok"}) ->
++            true;
++        (_) ->
++            false
++    end,
++    Req = #req{
++        method=post,
++        path="/passes_chunked_body",
++        headers=[{"Transfer-Encoding", "chunked"}],
++        body=chunked_body(BodyChunks)
++    },
++    ?_test(check_request(Req, Remote, Local)).
++
++should_proxy_chunked_body_back(_) ->
++    ?_test(begin
++        Remote = fun(Req) ->
++            'GET' = Req:get(method),
++            "/passes_chunked_body_back" = Req:get(path),
++            BodyChunks = [<<"foo">>, <<"bar">>, <<"bazinga">>],
++            {chunked, {200, [{"Transfer-Encoding", "chunked"}], BodyChunks}}
++        end,
++        Req = #req{
++            path="/passes_chunked_body_back",
++            opts=[{stream_to, self()}]
++        },
++
++        Resp = check_request(Req, Remote, no_local),
++        ?assertMatch({ibrowse_req_id, _}, Resp),
++        {_, ReqId} = Resp,
++
++        % Grab headers from response
++        receive
++            {ibrowse_async_headers, ReqId, "200", Headers} ->
++                ?assertEqual("chunked",
++                             proplists:get_value("Transfer-Encoding", Headers)),
++            ibrowse:stream_next(ReqId)
++        after 1000 ->
++            throw({error, timeout})
++        end,
++
++        ?assertEqual(<<"foobarbazinga">>, recv_body(ReqId, [])),
++        ?assertEqual(was_ok, test_web:check_last())
++    end).
++
++should_refuse_connection_to_backend(_) ->
++    Local = fun
++        ({ok, "500", _, _}) ->
++            true;
++        (_) ->
++            false
++    end,
++    Req = #req{opts=[{url, server_url("/_error")}]},
++    ?_test(check_request(Req, no_remote, Local)).
++
++should_rewrite_location_header(_) ->
++    {
++        "Testing location header rewrites",
++        do_rewrite_tests([
++            {"Location", proxy_url() ++ "/foo/bar",
++                         server_url() ++ "/foo/bar"},
++            {"Content-Location", proxy_url() ++ "/bing?q=2",
++                                 server_url() ++ "/bing?q=2"},
++            {"Uri", proxy_url() ++ "/zip#frag",
++                    server_url() ++ "/zip#frag"},
++            {"Destination", proxy_url(),
++                            server_url() ++ "/"}
++        ])
++    }.
++
++should_not_rewrite_external_locations(_) ->
++    {
++        "Testing no rewrite of external locations",
++        do_rewrite_tests([
++            {"Location", external_url() ++ "/search",
++                         external_url() ++ "/search"},
++            {"Content-Location", external_url() ++ "/s?q=2",
++                                 external_url() ++ "/s?q=2"},
++            {"Uri", external_url() ++ "/f#f",
++                    external_url() ++ "/f#f"},
++            {"Destination", external_url() ++ "/f?q=2#f",
++                            external_url() ++ "/f?q=2#f"}
++        ])
++    }.
++
++should_rewrite_relative_location(_) ->
++    {
++        "Testing relative rewrites",
++        do_rewrite_tests([
++            {"Location", "/foo",
++                         server_url() ++ "/foo"},
++            {"Content-Location", "bar",
++                                 server_url() ++ "/bar"},
++            {"Uri", "/zing?q=3",
++                    server_url() ++ "/zing?q=3"},
++            {"Destination", "bing?q=stuff#yay",
++                            server_url() ++ "/bing?q=stuff#yay"}
++        ])
++    }.
++
++
++do_rewrite_tests(Tests) ->
++    lists:map(fun({Header, Location, Url}) ->
++        should_rewrite_header(Header, Location, Url)
++    end, Tests).
++
++should_rewrite_header(Header, Location, Url) ->
++    Remote = fun(Req) ->
++        "/rewrite_test" = Req:get(path),
++        {ok, {302, [{Header, Location}], "ok"}}
++    end,
++    Local = fun
++        ({ok, "302", Headers, "ok"}) ->
++            ?assertEqual(Url, couch_util:get_value(Header, Headers)),
++            true;
++        (E) ->
++            ?debugFmt("~p", [E]),
++            false
++    end,
++    Req = #req{path="/rewrite_test"},
++    {Header, ?_test(check_request(Req, Remote, Local))}.
++
++
++server_url() ->
++    server_url("/_test").
++
++server_url(Resource) ->
++    Addr = couch_config:get("httpd", "bind_address"),
++    Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
++    lists:concat(["http://", Addr, ":", Port, Resource]).
++
++proxy_url() ->
++    "http://127.0.0.1:" ++ integer_to_list(test_web:get_port()).
++
++external_url() ->
++    "https://google.com".
++
++check_request(Req, Remote, Local) ->
++    case Remote of
++        no_remote ->
++            ok;
++        _ ->
++            test_web:set_assert(Remote)
++    end,
++    Url = case proplists:lookup(url, Req#req.opts) of
++        none ->
++            server_url() ++ Req#req.path;
++        {url, DestUrl} ->
++            DestUrl
++    end,
++    Opts = [{headers_as_is, true} | Req#req.opts],
++    Resp =ibrowse:send_req(
++        Url, Req#req.headers, Req#req.method, Req#req.body, Opts
++    ),
++    %?debugFmt("ibrowse response: ~p", [Resp]),
++    case Local of
++        no_local ->
++            ok;
++        _ ->
++            ?assert(Local(Resp))
++    end,
++    case {Remote, Local} of
++        {no_remote, _} ->
++            ok;
++        {_, no_local} ->
++            ok;
++        _ ->
++            ?assertEqual(was_ok, test_web:check_last())
++    end,
++    Resp.
++
++chunked_body(Chunks) ->
++    chunked_body(Chunks, []).
++
++chunked_body([], Acc) ->
++    iolist_to_binary(lists:reverse(Acc, "0\r\n\r\n"));
++chunked_body([Chunk | Rest], Acc) ->
++    Size = to_hex(size(Chunk)),
++    chunked_body(Rest, ["\r\n", Chunk, "\r\n", Size | Acc]).
++
++to_hex(Val) ->
++    to_hex(Val, []).
++
++to_hex(0, Acc) ->
++    Acc;
++to_hex(Val, Acc) ->
++    to_hex(Val div 16, [hex_char(Val rem 16) | Acc]).
++
++hex_char(V) when V < 10 -> $0 + V;
++hex_char(V) -> $A + V - 10.
++
++recv_body(ReqId, Acc) ->
++    receive
++        {ibrowse_async_response, ReqId, Data} ->
++            recv_body(ReqId, [Data | Acc]);
++        {ibrowse_async_response_end, ReqId} ->
++            iolist_to_binary(lists:reverse(Acc));
++        Else ->
++            throw({error, unexpected_mesg, Else})
++    after ?TIMEOUT ->
++        throw({error, timeout})
++    end.
+diff --git a/test/couchdb/couchdb_modules_load_tests.erl b/test/couchdb/couchdb_modules_load_tests.erl
+new file mode 100644
+index 0000000..4eaa42b
+--- /dev/null
++++ b/test/couchdb/couchdb_modules_load_tests.erl
+@@ -0,0 +1,68 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couchdb_modules_load_tests).
++
++-include("couch_eunit.hrl").
++
++
++modules_load_test_() ->
++    {
++        "Verify that all modules loads",
++        should_load_modules()
++    }.
++
++
++should_load_modules() ->
++    Modules = [
++        couch_auth_cache,
++        couch_btree,
++        couch_changes,
++        couch_compress,
++        couch_config,
++        couch_config_writer,
++        couch_db,
++        couch_db_update_notifier,
++        couch_db_update_notifier_sup,
++        couch_db_updater,
++        couch_doc,
++        % Fails unless couch_config gen_server is started.
++        % couch_ejson_compare,
++        couch_event_sup,
++        couch_external_manager,
++        couch_external_server,
++        couch_file,
++        couch_httpd,
++        couch_httpd_db,
++        couch_httpd_external,
++        couch_httpd_misc_handlers,
++        couch_httpd_rewrite,
++        couch_httpd_stats_handlers,
++        couch_key_tree,
++        couch_log,
++        couch_os_process,
++        couch_query_servers,
++        couch_ref_counter,
++        couch_server,
++        couch_server_sup,
++        couch_stats_aggregator,
++        couch_stats_collector,
++        couch_stream,
++        couch_task_status,
++        couch_util,
++        couch_work_queue,
++        json_stream_parse
++    ],
++    [should_load_module(Mod) || Mod <- Modules].
++
++should_load_module(Mod) ->
++    {atom_to_list(Mod), ?_assertMatch({module, _}, code:load_file(Mod))}.
+diff --git a/test/couchdb/couchdb_os_daemons_tests.erl b/test/couchdb/couchdb_os_daemons_tests.erl
+new file mode 100644
+index 0000000..ed9b6e8
+--- /dev/null
++++ b/test/couchdb/couchdb_os_daemons_tests.erl
+@@ -0,0 +1,329 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couchdb_os_daemons_tests).
++
++-include("couch_eunit.hrl").
++
++%% keep in sync with couchdb/couch_os_daemons.erl
++-record(daemon, {
++    port,
++    name,
++    cmd,
++    kill,
++    status=running,
++    cfg_patterns=[],
++    errors=[],
++    buf=[]
++}).
++
++-define(DAEMON_CONFIGER, "os_daemon_configer.escript").
++-define(DAEMON_LOOPER, "os_daemon_looper.escript").
++-define(DAEMON_BAD_PERM, "os_daemon_bad_perm.sh").
++-define(DAEMON_CAN_REBOOT, "os_daemon_can_reboot.sh").
++-define(DAEMON_DIE_ON_BOOT, "os_daemon_die_on_boot.sh").
++-define(DAEMON_DIE_QUICKLY, "os_daemon_die_quickly.sh").
++-define(DAEMON_CFGREG, "test_cfg_register").
++-define(DELAY, 100).
++-define(FIXTURES_BUILDDIR,
++        filename:join([?BUILDDIR, "test", "couchdb", "fixtures"])).
++-define(TIMEOUT, 1000).
++
++
++setup(DName) ->
++    {ok, CfgPid} = couch_config:start_link(?CONFIG_CHAIN),
++    {ok, OsDPid} = couch_os_daemons:start_link(),
++    Path = case DName of
++        ?DAEMON_CFGREG ->
++            filename:join([?FIXTURES_BUILDDIR, DName]);
++        ?DAEMON_CONFIGER ->
++            filename:join([?FIXTURES_BUILDDIR, DName]);
++        _ ->
++            filename:join([?FIXTURESDIR, DName])
++    end,
++    couch_config:set("os_daemons", DName, Path, false),
++    timer:sleep(?DELAY),  % sleep a bit to let daemon set kill flag
++    {CfgPid, OsDPid}.
++
++teardown(_, {CfgPid, OsDPid}) ->
++    erlang:monitor(process, CfgPid),
++    couch_config:stop(),
++    receive
++        {'DOWN', _, _, CfgPid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout, config_stop})
++    end,
++
++    erlang:monitor(process, OsDPid),
++    exit(OsDPid, normal),
++    receive
++        {'DOWN', _, _, OsDPid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout, os_daemon_stop})
++    end.
++
++
++os_daemons_test_() ->
++    {
++        "OS Daemons tests",
++        {
++            foreachx,
++            fun setup/1, fun teardown/2,
++            [{?DAEMON_LOOPER, Fun} || Fun <- [
++                fun should_check_daemon/2,
++                fun should_check_daemon_table_form/2,
++                fun should_clean_tables_on_daemon_remove/2,
++                fun should_spawn_multiple_daemons/2,
++                fun should_keep_alive_one_daemon_on_killing_other/2
++            ]]
++        }
++    }.
++
++configuration_reader_test_() ->
++    {
++        "OS Daemon requests CouchDB configuration",
++        {
++            foreachx,
++            fun setup/1, fun teardown/2,
++            [{?DAEMON_CONFIGER,
++              fun should_read_write_config_settings_by_daemon/2}]
++
++        }
++    }.
++
++error_test_() ->
++    {
++        "OS Daemon process error tests",
++        {
++            foreachx,
++            fun setup/1, fun teardown/2,
++            [{?DAEMON_BAD_PERM, fun should_fail_due_to_lack_of_permissions/2},
++             {?DAEMON_DIE_ON_BOOT, fun should_die_on_boot/2},
++             {?DAEMON_DIE_QUICKLY, fun should_die_quickly/2},
++             {?DAEMON_CAN_REBOOT, fun should_not_being_halted/2}]
++        }
++    }.
++
++configuration_register_test_() ->
++    {
++        "OS daemon subscribed to config changes",
++        {
++            foreachx,
++            fun setup/1, fun teardown/2,
++            [{?DAEMON_CFGREG, Fun} || Fun <- [
++                fun should_start_daemon/2,
++                fun should_restart_daemon_on_section_change/2,
++                fun should_not_restart_daemon_on_changing_ignored_section_key/2,
++                fun should_restart_daemon_on_section_key_change/2
++            ]]
++        }
++    }.
++
++
++should_check_daemon(DName, _) ->
++    ?_test(begin
++        {ok, [D]} = couch_os_daemons:info([table]),
++        check_daemon(D, DName)
++    end).
++
++should_check_daemon_table_form(DName, _) ->
++    ?_test(begin
++        {ok, Tab} = couch_os_daemons:info(),
++        [D] = ets:tab2list(Tab),
++        check_daemon(D, DName)
++    end).
++
++should_clean_tables_on_daemon_remove(DName, _) ->
++    ?_test(begin
++        couch_config:delete("os_daemons", DName, false),
++        {ok, Tab2} = couch_os_daemons:info(),
++        ?_assertEqual([], ets:tab2list(Tab2))
++    end).
++
++should_spawn_multiple_daemons(DName, _) ->
++    ?_test(begin
++        couch_config:set("os_daemons", "bar",
++                         filename:join([?FIXTURESDIR, DName]), false),
++        couch_config:set("os_daemons", "baz",
++                         filename:join([?FIXTURESDIR, DName]), false),
++        timer:sleep(?DELAY),
++        {ok, Daemons} = couch_os_daemons:info([table]),
++        lists:foreach(fun(D) ->
++            check_daemon(D)
++        end, Daemons),
++        {ok, Tab} = couch_os_daemons:info(),
++        lists:foreach(fun(D) ->
++            check_daemon(D)
++        end, ets:tab2list(Tab))
++    end).
++
++should_keep_alive_one_daemon_on_killing_other(DName, _) ->
++    ?_test(begin
++        couch_config:set("os_daemons", "bar",
++                         filename:join([?FIXTURESDIR, DName]), false),
++        timer:sleep(?DELAY),
++        {ok, Daemons} = couch_os_daemons:info([table]),
++        lists:foreach(fun(D) ->
++            check_daemon(D)
++        end, Daemons),
++
++        couch_config:delete("os_daemons", "bar", false),
++        timer:sleep(?DELAY),
++        {ok, [D2]} = couch_os_daemons:info([table]),
++        check_daemon(D2, DName),
++
++        {ok, Tab} = couch_os_daemons:info(),
++        [T] = ets:tab2list(Tab),
++        check_daemon(T, DName)
++    end).
++
++should_read_write_config_settings_by_daemon(DName, _) ->
++    ?_test(begin
++        % have to wait till daemon run all his tests
++        % see daemon's script for more info
++        timer:sleep(?TIMEOUT),
++        {ok, [D]} = couch_os_daemons:info([table]),
++        check_daemon(D, DName)
++    end).
++
++should_fail_due_to_lack_of_permissions(DName, _) ->
++    ?_test(should_halts(DName, 1000)).
++
++should_die_on_boot(DName, _) ->
++    ?_test(should_halts(DName, 1000)).
++
++should_die_quickly(DName, _) ->
++    ?_test(should_halts(DName, 4000)).
++
++should_not_being_halted(DName, _) ->
++    ?_test(begin
++        timer:sleep(1000),
++        {ok, [D1]} = couch_os_daemons:info([table]),
++        check_daemon(D1, DName, running, 0),
++
++        % Should reboot every two seconds. We're at 1s, so wait
++        % until 3s to be in the middle of the next invocation's
++        % life span.
++
++        timer:sleep(2000),
++        {ok, [D2]} = couch_os_daemons:info([table]),
++        check_daemon(D2, DName, running, 1),
++
++        % If the kill command changed, that means we rebooted the process.
++        ?assertNotEqual(D1#daemon.kill, D2#daemon.kill)
++    end).
++
++should_halts(DName, Time) ->
++    timer:sleep(Time),
++    {ok, [D]} = couch_os_daemons:info([table]),
++    check_dead(D, DName),
++    couch_config:delete("os_daemons", DName, false).
++
++should_start_daemon(DName, _) ->
++    ?_test(begin
++        wait_for_start(10),
++        {ok, [D]} = couch_os_daemons:info([table]),
++        check_daemon(D, DName, running, 0, [{"s1"}, {"s2", "k"}])
++    end).
++
++should_restart_daemon_on_section_change(DName, _) ->
++    ?_test(begin
++        wait_for_start(10),
++        {ok, [D1]} = couch_os_daemons:info([table]),
++        couch_config:set("s1", "k", "foo", false),
++        wait_for_restart(10),
++        {ok, [D2]} = couch_os_daemons:info([table]),
++        check_daemon(D2, DName, running, 0, [{"s1"}, {"s2", "k"}]),
++        ?assertNotEqual(D1, D2)
++    end).
++
++should_not_restart_daemon_on_changing_ignored_section_key(_, _) ->
++    ?_test(begin
++        wait_for_start(10),
++        {ok, [D1]} = couch_os_daemons:info([table]),
++        couch_config:set("s2", "k2", "baz", false),
++        timer:sleep(?DELAY),
++        {ok, [D2]} = couch_os_daemons:info([table]),
++        ?assertEqual(D1, D2)
++    end).
++
++should_restart_daemon_on_section_key_change(DName, _) ->
++    ?_test(begin
++        wait_for_start(10),
++        {ok, [D1]} = couch_os_daemons:info([table]),
++        couch_config:set("s2", "k", "bingo", false),
++        wait_for_restart(10),
++        {ok, [D2]} = couch_os_daemons:info([table]),
++        check_daemon(D2, DName, running, 0, [{"s1"}, {"s2", "k"}]),
++        ?assertNotEqual(D1, D2)
++    end).
++
++
++wait_for_start(0) ->
++    erlang:error({assertion_failed,
++                  [{module, ?MODULE},
++                   {line, ?LINE},
++                   {reason, "Timeout on waiting daemon for start"}]});
++wait_for_start(N) ->
++    case couch_os_daemons:info([table]) of
++        {ok, []} ->
++            timer:sleep(?DELAY),
++            wait_for_start(N - 1);
++        _ ->
++            timer:sleep(?TIMEOUT)
++    end.
++
++wait_for_restart(0) ->
++    erlang:error({assertion_failed,
++                  [{module, ?MODULE},
++                   {line, ?LINE},
++                   {reason, "Timeout on waiting daemon for restart"}]});
++wait_for_restart(N) ->
++    {ok, [D]} = couch_os_daemons:info([table]),
++    case D#daemon.status of
++        restarting ->
++            timer:sleep(?DELAY),
++            wait_for_restart(N - 1);
++        _ ->
++            timer:sleep(?TIMEOUT)
++    end.
++
++check_daemon(D) ->
++    check_daemon(D, D#daemon.name).
++
++check_daemon(D, Name) ->
++    check_daemon(D, Name, running).
++
++check_daemon(D, Name, Status) ->
++    check_daemon(D, Name, Status, 0).
++
++check_daemon(D, Name, Status, Errs) ->
++    check_daemon(D, Name, Status, Errs, []).
++
++check_daemon(D, Name, Status, Errs, CfgPatterns) ->
++    ?assert(is_port(D#daemon.port)),
++    ?assertEqual(Name, D#daemon.name),
++    ?assertNotEqual(undefined, D#daemon.kill),
++    ?assertEqual(Status, D#daemon.status),
++    ?assertEqual(CfgPatterns, D#daemon.cfg_patterns),
++    ?assertEqual(Errs, length(D#daemon.errors)),
++    ?assertEqual([], D#daemon.buf).
++
++check_dead(D, Name) ->
++    ?assert(is_port(D#daemon.port)),
++    ?assertEqual(Name, D#daemon.name),
++    ?assertNotEqual(undefined, D#daemon.kill),
++    ?assertEqual(halted, D#daemon.status),
++    ?assertEqual(nil, D#daemon.errors),
++    ?assertEqual(nil, D#daemon.buf).
+diff --git a/test/couchdb/couchdb_os_proc_pool.erl b/test/couchdb/couchdb_os_proc_pool.erl
+new file mode 100644
+index 0000000..1bb266e
+--- /dev/null
++++ b/test/couchdb/couchdb_os_proc_pool.erl
+@@ -0,0 +1,179 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couchdb_os_proc_pool).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(TIMEOUT, 3000).
++
++
++start() ->
++    {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    couch_config:set("query_server_config", "os_process_limit", "3", false),
++    Pid.
++
++stop(Pid) ->
++    couch_server_sup:stop(),
++    erlang:monitor(process, Pid),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout, server_stop})
++    end.
++
++
++os_proc_pool_test_() ->
++    {
++        "OS processes pool tests",
++        {
++            setup,
++            fun start/0, fun stop/1,
++            [
++                should_block_new_proc_on_full_pool(),
++                should_free_slot_on_proc_unexpected_exit()
++            ]
++        }
++    }.
++
++
++should_block_new_proc_on_full_pool() ->
++    ?_test(begin
++        Client1 = spawn_client(),
++        Client2 = spawn_client(),
++        Client3 = spawn_client(),
++
++        ?assertEqual(ok, ping_client(Client1)),
++        ?assertEqual(ok, ping_client(Client2)),
++        ?assertEqual(ok, ping_client(Client3)),
++
++        Proc1 = get_client_proc(Client1, "1"),
++        Proc2 = get_client_proc(Client2, "2"),
++        Proc3 = get_client_proc(Client3, "3"),
++
++        ?assertNotEqual(Proc1, Proc2),
++        ?assertNotEqual(Proc2, Proc3),
++        ?assertNotEqual(Proc3, Proc1),
++
++        Client4 = spawn_client(),
++        ?assertEqual(timeout, ping_client(Client4)),
++
++        ?assertEqual(ok, stop_client(Client1)),
++        ?assertEqual(ok, ping_client(Client4)),
++
++        Proc4 = get_client_proc(Client4, "4"),
++        ?assertEqual(Proc1, Proc4),
++
++        lists:map(fun(C) ->
++            ?assertEqual(ok, stop_client(C))
++        end, [Client2, Client3, Client4])
++    end).
++
++should_free_slot_on_proc_unexpected_exit() ->
++    ?_test(begin
++        Client1 = spawn_client(),
++        Client2 = spawn_client(),
++        Client3 = spawn_client(),
++
++        ?assertEqual(ok, ping_client(Client1)),
++        ?assertEqual(ok, ping_client(Client2)),
++        ?assertEqual(ok, ping_client(Client3)),
++
++        Proc1 = get_client_proc(Client1, "1"),
++        Proc2 = get_client_proc(Client2, "2"),
++        Proc3 = get_client_proc(Client3, "3"),
++
++        ?assertNotEqual(Proc1, Proc2),
++        ?assertNotEqual(Proc2, Proc3),
++        ?assertNotEqual(Proc3, Proc1),
++
++        ?assertEqual(ok, kill_client(Client1)),
++
++        Client4 = spawn_client(),
++        ?assertEqual(ok, ping_client(Client4)),
++
++        Proc4 = get_client_proc(Client4, "4"),
++        ?assertNotEqual(Proc4, Proc1),
++        ?assertNotEqual(Proc2, Proc4),
++        ?assertNotEqual(Proc3, Proc4),
++
++        lists:map(fun(C) ->
++            ?assertEqual(ok, stop_client(C))
++        end, [Client2, Client3, Client4])
++    end).
++
++
++spawn_client() ->
++    Parent = self(),
++    Ref = make_ref(),
++    Pid = spawn(fun() ->
++        Proc = couch_query_servers:get_os_process(<<"javascript">>),
++        loop(Parent, Ref, Proc)
++    end),
++    {Pid, Ref}.
++
++ping_client({Pid, Ref}) ->
++    Pid ! ping,
++    receive
++        {pong, Ref} ->
++            ok
++    after ?TIMEOUT ->
++        timeout
++    end.
++
++get_client_proc({Pid, Ref}, ClientName) ->
++    Pid ! get_proc,
++    receive
++        {proc, Ref, Proc} -> Proc
++    after ?TIMEOUT ->
++        erlang:error({assertion_failed,
++                     [{module, ?MODULE},
++                      {line, ?LINE},
++                      {reason, "Timeout getting client "
++                               ++ ClientName ++ " proc"}]})
++    end.
++
++stop_client({Pid, Ref}) ->
++    Pid ! stop,
++    receive
++        {stop, Ref} ->
++            ok
++    after ?TIMEOUT ->
++        timeout
++    end.
++
++kill_client({Pid, Ref}) ->
++    Pid ! die,
++    receive
++        {die, Ref} ->
++            ok
++    after ?TIMEOUT ->
++        timeout
++    end.
++
++loop(Parent, Ref, Proc) ->
++    receive
++        ping ->
++            Parent ! {pong, Ref},
++            loop(Parent, Ref, Proc);
++        get_proc  ->
++            Parent ! {proc, Ref, Proc},
++            loop(Parent, Ref, Proc);
++        stop ->
++            couch_query_servers:ret_os_process(Proc),
++            Parent ! {stop, Ref};
++        die ->
++            Parent ! {die, Ref},
++            exit(some_error)
++    end.
+diff --git a/test/couchdb/couchdb_update_conflicts_tests.erl b/test/couchdb/couchdb_update_conflicts_tests.erl
+new file mode 100644
+index 0000000..7226860
+--- /dev/null
++++ b/test/couchdb/couchdb_update_conflicts_tests.erl
+@@ -0,0 +1,243 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couchdb_update_conflicts_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(i2l(I), integer_to_list(I)).
++-define(ADMIN_USER, {userctx, #user_ctx{roles=[<<"_admin">>]}}).
++-define(DOC_ID, <<"foobar">>).
++-define(NUM_CLIENTS, [100, 500, 1000, 2000, 5000, 10000]).
++-define(TIMEOUT, 10000).
++
++
++start() ->
++    {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    couch_config:set("couchdb", "delayed_commits", "true", false),
++    Pid.
++
++stop(Pid) ->
++    erlang:monitor(process, Pid),
++    couch_server_sup:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout, server_stop})
++    end.
++
++setup() ->
++    DbName = ?tempdb(),
++    {ok, Db} = couch_db:create(DbName, [?ADMIN_USER, overwrite]),
++    Doc = couch_doc:from_json_obj({[{<<"_id">>, ?DOC_ID},
++                                    {<<"value">>, 0}]}),
++    {ok, Rev} = couch_db:update_doc(Db, Doc, []),
++    ok = couch_db:close(Db),
++    RevStr = couch_doc:rev_to_str(Rev),
++    {DbName, RevStr}.
++setup(_) ->
++    setup().
++
++teardown({DbName, _}) ->
++    ok = couch_server:delete(DbName, []),
++    ok.
++teardown(_, {DbName, _RevStr}) ->
++    teardown({DbName, _RevStr}).
++
++
++view_indexes_cleanup_test_() ->
++    {
++        "Update conflicts",
++        {
++            setup,
++            fun start/0, fun stop/1,
++            [
++                concurrent_updates(),
++                couchdb_188()
++            ]
++        }
++    }.
++
++concurrent_updates()->
++    {
++        "Concurrent updates",
++        {
++            foreachx,
++            fun setup/1, fun teardown/2,
++            [{NumClients, fun should_concurrently_update_doc/2}
++             || NumClients <- ?NUM_CLIENTS]
++        }
++    }.
++
++couchdb_188()->
++    {
++        "COUCHDB-188",
++        {
++            foreach,
++            fun setup/0, fun teardown/1,
++            [fun should_bulk_create_delete_doc/1]
++        }
++    }.
++
++
++should_concurrently_update_doc(NumClients, {DbName, InitRev})->
++     {?i2l(NumClients) ++ " clients",
++      {inorder,
++       [{"update doc",
++         {timeout, ?TIMEOUT div 1000,
++          ?_test(concurrent_doc_update(NumClients, DbName, InitRev))}},
++        {"ensure in single leaf",
++         ?_test(ensure_in_single_revision_leaf(DbName))}]}}.
++
++should_bulk_create_delete_doc({DbName, InitRev})->
++    ?_test(bulk_delete_create(DbName, InitRev)).
++
++
++concurrent_doc_update(NumClients, DbName, InitRev) ->
++    Clients = lists:map(
++        fun(Value) ->
++            ClientDoc = couch_doc:from_json_obj({[
++                {<<"_id">>, ?DOC_ID},
++                {<<"_rev">>, InitRev},
++                {<<"value">>, Value}
++            ]}),
++            Pid = spawn_client(DbName, ClientDoc),
++            {Value, Pid, erlang:monitor(process, Pid)}
++        end,
++        lists:seq(1, NumClients)),
++
++    lists:foreach(fun({_, Pid, _}) -> Pid ! go end, Clients),
++
++    {NumConflicts, SavedValue} = lists:foldl(
++        fun({Value, Pid, MonRef}, {AccConflicts, AccValue}) ->
++            receive
++                {'DOWN', MonRef, process, Pid, {ok, _NewRev}} ->
++                    {AccConflicts, Value};
++                {'DOWN', MonRef, process, Pid, conflict} ->
++                    {AccConflicts + 1, AccValue};
++                {'DOWN', MonRef, process, Pid, Error} ->
++                    erlang:error({assertion_failed,
++                         [{module, ?MODULE},
++                          {line, ?LINE},
++                          {reason, "Client " ++ ?i2l(Value)
++                                             ++ " got update error: "
++                                             ++ couch_util:to_list(Error)}]})
++            after ?TIMEOUT div 2 ->
++                 erlang:error({assertion_failed,
++                         [{module, ?MODULE},
++                          {line, ?LINE},
++                          {reason, "Timeout waiting for client "
++                                   ++ ?i2l(Value) ++ " to die"}]})
++            end
++        end, {0, nil}, Clients),
++    ?assertEqual(NumClients - 1, NumConflicts),
++
++    {ok, Db} = couch_db:open_int(DbName, []),
++    {ok, Leaves} = couch_db:open_doc_revs(Db, ?DOC_ID, all, []),
++    ok = couch_db:close(Db),
++    ?assertEqual(1, length(Leaves)),
++
++    [{ok, Doc2}] = Leaves,
++    {JsonDoc} = couch_doc:to_json_obj(Doc2, []),
++    ?assertEqual(SavedValue, couch_util:get_value(<<"value">>, JsonDoc)).
++
++ensure_in_single_revision_leaf(DbName) ->
++    {ok, Db} = couch_db:open_int(DbName, []),
++    {ok, Leaves} = couch_db:open_doc_revs(Db, ?DOC_ID, all, []),
++    ok = couch_db:close(Db),
++    [{ok, Doc}] = Leaves,
++
++    %% FIXME: server restart won't work from test side
++    %% stop(ok),
++    %% start(),
++
++    {ok, Db2} = couch_db:open_int(DbName, []),
++    {ok, Leaves2} = couch_db:open_doc_revs(Db2, ?DOC_ID, all, []),
++    ok = couch_db:close(Db2),
++    ?assertEqual(1, length(Leaves2)),
++
++    [{ok, Doc2}] = Leaves,
++    ?assertEqual(Doc, Doc2).
++    
++bulk_delete_create(DbName, InitRev) ->
++    {ok, Db} = couch_db:open_int(DbName, []),
++    
++    DeletedDoc = couch_doc:from_json_obj({[
++        {<<"_id">>, ?DOC_ID},
++        {<<"_rev">>, InitRev},
++        {<<"_deleted">>, true}
++    ]}),
++    NewDoc = couch_doc:from_json_obj({[
++        {<<"_id">>, ?DOC_ID},
++        {<<"value">>, 666}
++    ]}),
++
++    {ok, Results} = couch_db:update_docs(Db, [DeletedDoc, NewDoc], []),
++    ok = couch_db:close(Db),
++
++    ?assertEqual(2, length([ok || {ok, _} <- Results])),
++    [{ok, Rev1}, {ok, Rev2}] = Results,
++    
++    {ok, Db2} = couch_db:open_int(DbName, []),
++    {ok, [{ok, Doc1}]} = couch_db:open_doc_revs(
++        Db2, ?DOC_ID, [Rev1], [conflicts, deleted_conflicts]),
++    {ok, [{ok, Doc2}]} = couch_db:open_doc_revs(
++        Db2, ?DOC_ID, [Rev2], [conflicts, deleted_conflicts]),
++    ok = couch_db:close(Db2),
++
++    {Doc1Props} = couch_doc:to_json_obj(Doc1, []),
++    {Doc2Props} = couch_doc:to_json_obj(Doc2, []),
++
++    %% Document was deleted
++    ?assert(couch_util:get_value(<<"_deleted">>, Doc1Props)),
++    %% New document not flagged as deleted
++    ?assertEqual(undefined, couch_util:get_value(<<"_deleted">>,
++                                                 Doc2Props)),
++    %% New leaf revision has the right value
++    ?assertEqual(666, couch_util:get_value(<<"value">>,
++                                           Doc2Props)),
++    %% Deleted document has no conflicts
++    ?assertEqual(undefined, couch_util:get_value(<<"_conflicts">>,
++                                                 Doc1Props)),
++    %% Deleted document has no deleted conflicts
++    ?assertEqual(undefined, couch_util:get_value(<<"_deleted_conflicts">>,
++                                                 Doc1Props)),
++    %% New leaf revision doesn't have conflicts
++    ?assertEqual(undefined, couch_util:get_value(<<"_conflicts">>,
++                                                 Doc1Props)),
++    %% New leaf revision doesn't have deleted conflicts
++    ?assertEqual(undefined, couch_util:get_value(<<"_deleted_conflicts">>,
++                                                 Doc1Props)),
++
++    %% Deleted revision has position 2
++    ?assertEqual(2, element(1, Rev1)),
++    %% New leaf revision has position 1
++    ?assertEqual(1, element(1, Rev2)).
++
++
++spawn_client(DbName, Doc) ->
++    spawn(fun() ->
++        {ok, Db} = couch_db:open_int(DbName, []),
++        receive
++            go -> ok
++        end,
++        erlang:yield(),
++        Result = try
++            couch_db:update_doc(Db, Doc, [])
++        catch _:Error ->
++            Error
++        end,
++        ok = couch_db:close(Db),
++        exit(Result)
++    end).
+diff --git a/test/couchdb/couchdb_vhosts_tests.erl b/test/couchdb/couchdb_vhosts_tests.erl
+new file mode 100644
+index 0000000..94b1957
+--- /dev/null
++++ b/test/couchdb/couchdb_vhosts_tests.erl
+@@ -0,0 +1,441 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couchdb_vhosts_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++
++-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
++-define(TIMEOUT, 1000).
++-define(iofmt(S, A), lists:flatten(io_lib:format(S, A))).
++
++
++start() ->
++    {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    Pid.
++
++stop(Pid) ->
++    erlang:monitor(process, Pid),
++    couch_server_sup:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout, server_stop})
++    end.
++
++setup() ->
++    DbName = ?tempdb(),
++    {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
++    Doc = couch_doc:from_json_obj({[
++        {<<"_id">>, <<"doc1">>},
++        {<<"value">>, 666}
++    ]}),
++
++    Doc1 = couch_doc:from_json_obj({[
++        {<<"_id">>, <<"_design/doc1">>},
++        {<<"shows">>, {[
++            {<<"test">>, <<"function(doc, req) {
++            return { json: {
++                    requested_path: '/' + req.requested_path.join('/'),
++                    path: '/' + req.path.join('/')}};}">>}
++        ]}},
++        {<<"rewrites">>, [
++            {[
++                {<<"from">>, <<"/">>},
++                {<<"to">>, <<"_show/test">>}
++            ]}
++        ]}
++    ]}),
++    {ok, _} = couch_db:update_docs(Db, [Doc, Doc1]),
++    couch_db:ensure_full_commit(Db),
++    couch_db:close(Db),
++
++    Addr = couch_config:get("httpd", "bind_address", "127.0.0.1"),
++    Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
++    Url = "http://" ++ Addr ++ ":" ++ Port,
++    {Url, ?b2l(DbName)}.
++
++setup_oauth() ->
++    DbName = ?tempdb(),
++    {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
++
++    couch_config:set("couch_httpd_auth", "authentication_db",
++                     ?b2l(?tempdb()), false),
++    couch_config:set("oauth_token_users", "otoksec1", "joe", false),
++    couch_config:set("oauth_consumer_secrets", "consec1", "foo", false),
++    couch_config:set("oauth_token_secrets", "otoksec1", "foobar", false),
++    couch_config:set("couch_httpd_auth", "require_valid_user", "true", false),
++
++    ok = couch_config:set(
++        "vhosts", "oauth-example.com",
++        "/" ++ ?b2l(DbName) ++ "/_design/test/_rewrite/foobar", false),
++
++    DDoc = couch_doc:from_json_obj({[
++        {<<"_id">>, <<"_design/test">>},
++        {<<"language">>, <<"javascript">>},
++        {<<"rewrites">>, [
++            {[
++                {<<"from">>, <<"foobar">>},
++                {<<"to">>, <<"_info">>}
++            ]}
++        ]}
++    ]}),
++    {ok, _} = couch_db:update_doc(Db, DDoc, []),
++
++    couch_db:ensure_full_commit(Db),
++    couch_db:close(Db),
++
++    Addr = couch_config:get("httpd", "bind_address", "127.0.0.1"),
++    Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
++    Url = "http://" ++ Addr ++ ":" ++ Port,
++    {Url, ?b2l(DbName)}.
++
++teardown({_, DbName}) ->
++    ok = couch_server:delete(?l2b(DbName), []),
++    ok.
++
++
++vhosts_test_() ->
++    {
++        "Virtual Hosts rewrite tests",
++        {
++            setup,
++            fun start/0, fun stop/1,
++            {
++                foreach,
++                fun setup/0, fun teardown/1,
++                [
++                    fun should_return_database_info/1,
++                    fun should_return_revs_info/1,
++                    fun should_serve_utils_for_vhost/1,
++                    fun should_return_virtual_request_path_field_in_request/1,
++                    fun should_return_real_request_path_field_in_request/1,
++                    fun should_match_wildcard_vhost/1,
++                    fun should_return_db_info_for_wildcard_vhost_for_custom_db/1,
++                    fun should_replace_rewrite_variables_for_db_and_doc/1,
++                    fun should_return_db_info_for_vhost_with_resource/1,
++                    fun should_return_revs_info_for_vhost_with_resource/1,
++                    fun should_return_db_info_for_vhost_with_wildcard_resource/1,
++                    fun should_return_path_for_vhost_with_wildcard_host/1
++                ]
++            }
++        }
++    }.
++
++oauth_test_() ->
++    {
++        "Virtual Hosts OAuth tests",
++        {
++            setup,
++            fun start/0, fun stop/1,
++            {
++                foreach,
++                fun setup_oauth/0, fun teardown/1,
++                [
++                    fun should_require_auth/1,
++                    fun should_succeed_oauth/1,
++                    fun should_fail_oauth_with_wrong_credentials/1
++                ]
++            }
++        }
++    }.
++
++
++should_return_database_info({Url, DbName}) ->
++    ?_test(begin
++        ok = couch_config:set("vhosts", "example.com", "/" ++ DbName, false),
++        case test_request:get(Url, [], [{host_header, "example.com"}]) of
++            {ok, _, _, Body} ->
++                {JsonBody} = ejson:decode(Body),
++                ?assert(proplists:is_defined(<<"db_name">>, JsonBody));
++            Else ->
++                erlang:error({assertion_failed,
++                             [{module, ?MODULE},
++                              {line, ?LINE},
++                              {reason, ?iofmt("Request failed: ~p", [Else])}]})
++        end
++    end).
++
++should_return_revs_info({Url, DbName}) ->
++    ?_test(begin
++        ok = couch_config:set("vhosts", "example.com", "/" ++ DbName, false),
++        case test_request:get(Url ++ "/doc1?revs_info=true", [],
++                              [{host_header, "example.com"}]) of
++            {ok, _, _, Body} ->
++                {JsonBody} = ejson:decode(Body),
++                ?assert(proplists:is_defined(<<"_revs_info">>, JsonBody));
++            Else ->
++                erlang:error({assertion_failed,
++                             [{module, ?MODULE},
++                              {line, ?LINE},
++                              {reason, ?iofmt("Request failed: ~p", [Else])}]})
++        end
++    end).
++
++should_serve_utils_for_vhost({Url, DbName}) ->
++    ?_test(begin
++        ok = couch_config:set("vhosts", "example.com", "/" ++ DbName, false),
++        case test_request:get(Url ++ "/_utils/index.html", [],
++                              [{host_header, "example.com"}]) of
++            {ok, _, _, Body} ->
++                ?assertMatch(<<"<!DOCTYPE html>", _/binary>>, Body);
++            Else ->
++                erlang:error({assertion_failed,
++                             [{module, ?MODULE},
++                              {line, ?LINE},
++                              {reason, ?iofmt("Request failed: ~p", [Else])}]})
++        end
++    end).
++
++should_return_virtual_request_path_field_in_request({Url, DbName}) ->
++    ?_test(begin
++        ok = couch_config:set("vhosts", "example1.com",
++                              "/" ++ DbName ++ "/_design/doc1/_rewrite/",
++                              false),
++        case test_request:get(Url, [], [{host_header, "example1.com"}]) of
++            {ok, _, _, Body} ->
++                {Json} = ejson:decode(Body),
++                ?assertEqual(<<"/">>,
++                             proplists:get_value(<<"requested_path">>, Json));
++            Else ->
++                erlang:error({assertion_failed,
++                             [{module, ?MODULE},
++                              {line, ?LINE},
++                              {reason, ?iofmt("Request failed: ~p", [Else])}]})
++        end
++    end).
++
++should_return_real_request_path_field_in_request({Url, DbName}) ->
++    ?_test(begin
++        ok = couch_config:set("vhosts", "example1.com",
++                              "/" ++ DbName ++ "/_design/doc1/_rewrite/",
++                              false),
++        case test_request:get(Url, [], [{host_header, "example1.com"}]) of
++            {ok, _, _, Body} ->
++                {Json} = ejson:decode(Body),
++                Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
++                ?assertEqual(Path, proplists:get_value(<<"path">>, Json));
++            Else ->
++                erlang:error({assertion_failed,
++                             [{module, ?MODULE},
++                              {line, ?LINE},
++                              {reason, ?iofmt("Request failed: ~p", [Else])}]})
++        end
++    end).
++
++should_match_wildcard_vhost({Url, DbName}) ->
++    ?_test(begin
++        ok = couch_config:set("vhosts", "*.example.com",
++                              "/" ++ DbName ++ "/_design/doc1/_rewrite", false),
++        case test_request:get(Url, [], [{host_header, "test.example.com"}]) of
++            {ok, _, _, Body} ->
++                {Json} = ejson:decode(Body),
++                Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
++                ?assertEqual(Path, proplists:get_value(<<"path">>, Json));
++            Else ->
++                erlang:error({assertion_failed,
++                             [{module, ?MODULE},
++                              {line, ?LINE},
++                              {reason, ?iofmt("Request failed: ~p", [Else])}]})
++        end
++    end).
++
++should_return_db_info_for_wildcard_vhost_for_custom_db({Url, DbName}) ->
++    ?_test(begin
++        ok = couch_config:set("vhosts", ":dbname.example1.com",
++                              "/:dbname", false),
++        Host = DbName ++ ".example1.com",
++        case test_request:get(Url, [], [{host_header, Host}]) of
++            {ok, _, _, Body} ->
++                {JsonBody} = ejson:decode(Body),
++                ?assert(proplists:is_defined(<<"db_name">>, JsonBody));
++            Else ->
++                erlang:error({assertion_failed,
++                             [{module, ?MODULE},
++                              {line, ?LINE},
++                              {reason, ?iofmt("Request failed: ~p", [Else])}]})
++        end
++    end).
++
++should_replace_rewrite_variables_for_db_and_doc({Url, DbName}) ->
++    ?_test(begin
++        ok = couch_config:set("vhosts",":appname.:dbname.example1.com",
++                              "/:dbname/_design/:appname/_rewrite/", false),
++        Host = "doc1." ++ DbName ++ ".example1.com",
++        case test_request:get(Url, [], [{host_header, Host}]) of
++            {ok, _, _, Body} ->
++                {Json} = ejson:decode(Body),
++                Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
++                ?assertEqual(Path, proplists:get_value(<<"path">>, Json));
++            Else ->
++                erlang:error({assertion_failed,
++                             [{module, ?MODULE},
++                              {line, ?LINE},
++                              {reason, ?iofmt("Request failed: ~p", [Else])}]})
++        end
++    end).
++
++should_return_db_info_for_vhost_with_resource({Url, DbName}) ->
++    ?_test(begin
++        ok = couch_config:set("vhosts",
++                              "example.com/test", "/" ++ DbName, false),
++        ReqUrl = Url ++ "/test",
++        case test_request:get(ReqUrl, [], [{host_header, "example.com"}]) of
++            {ok, _, _, Body} ->
++                {JsonBody} = ejson:decode(Body),
++                ?assert(proplists:is_defined(<<"db_name">>, JsonBody));
++            Else ->
++                erlang:error({assertion_failed,
++                             [{module, ?MODULE},
++                              {line, ?LINE},
++                              {reason, ?iofmt("Request failed: ~p", [Else])}]})
++        end
++    end).
++
++
++should_return_revs_info_for_vhost_with_resource({Url, DbName}) ->
++    ?_test(begin
++        ok = couch_config:set("vhosts",
++                              "example.com/test", "/" ++ DbName, false),
++        ReqUrl = Url ++ "/test/doc1?revs_info=true",
++        case test_request:get(ReqUrl, [], [{host_header, "example.com"}]) of
++            {ok, _, _, Body} ->
++                {JsonBody} = ejson:decode(Body),
++                ?assert(proplists:is_defined(<<"_revs_info">>, JsonBody));
++            Else ->
++                erlang:error({assertion_failed,
++                             [{module, ?MODULE},
++                              {line, ?LINE},
++                              {reason, ?iofmt("Request failed: ~p", [Else])}]})
++        end
++    end).
++
++should_return_db_info_for_vhost_with_wildcard_resource({Url, DbName}) ->
++    ?_test(begin
++        ok = couch_config:set("vhosts", "*.example2.com/test", "/*", false),
++        ReqUrl = Url ++ "/test",
++        Host = DbName ++ ".example2.com",
++        case test_request:get(ReqUrl, [], [{host_header, Host}]) of
++            {ok, _, _, Body} ->
++                {JsonBody} = ejson:decode(Body),
++                ?assert(proplists:is_defined(<<"db_name">>, JsonBody));
++            Else ->
++                erlang:error({assertion_failed,
++                             [{module, ?MODULE},
++                              {line, ?LINE},
++                              {reason, ?iofmt("Request failed: ~p", [Else])}]})
++        end
++    end).
++
++should_return_path_for_vhost_with_wildcard_host({Url, DbName}) ->
++    ?_test(begin
++        ok = couch_config:set("vhosts", "*/test1",
++                              "/" ++ DbName ++ "/_design/doc1/_show/test",
++                              false),
++        case test_request:get(Url ++ "/test1") of
++            {ok, _, _, Body} ->
++                {Json} = ejson:decode(Body),
++                Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
++                ?assertEqual(Path, proplists:get_value(<<"path">>, Json));
++            Else ->
++                erlang:error({assertion_failed,
++                             [{module, ?MODULE},
++                              {line, ?LINE},
++                              {reason, ?iofmt("Request failed: ~p", [Else])}]})
++        end
++    end).
++
++should_require_auth({Url, _}) ->
++    ?_test(begin
++        case test_request:get(Url, [], [{host_header, "oauth-example.com"}]) of
++            {ok, Code, _, Body} ->
++                ?assertEqual(401, Code),
++                {JsonBody} = ejson:decode(Body),
++                ?assertEqual(<<"unauthorized">>,
++                             couch_util:get_value(<<"error">>, JsonBody));
++            Else ->
++                erlang:error({assertion_failed,
++                             [{module, ?MODULE},
++                              {line, ?LINE},
++                              {reason, ?iofmt("Request failed: ~p", [Else])}]})
++        end
++    end).
++
++should_succeed_oauth({Url, _}) ->
++    ?_test(begin
++        AuthDbName = couch_config:get("couch_httpd_auth", "authentication_db"),
++        JoeDoc = couch_doc:from_json_obj({[
++            {<<"_id">>, <<"org.couchdb.user:joe">>},
++            {<<"type">>, <<"user">>},
++            {<<"name">>, <<"joe">>},
++            {<<"roles">>, []},
++            {<<"password_sha">>, <<"fe95df1ca59a9b567bdca5cbaf8412abd6e06121">>},
++            {<<"salt">>, <<"4e170ffeb6f34daecfd814dfb4001a73">>}
++        ]}),
++        {ok, AuthDb} = couch_db:open_int(?l2b(AuthDbName), [?ADMIN_USER]),
++        {ok, _} = couch_db:update_doc(AuthDb, JoeDoc, [?ADMIN_USER]),
++
++        Host = "oauth-example.com",
++        Consumer = {"consec1", "foo", hmac_sha1},
++        SignedParams = oauth:sign(
++            "GET", "http://" ++ Host ++ "/", [], Consumer, "otoksec1", "foobar"),
++        OAuthUrl = oauth:uri(Url, SignedParams),
++
++        case test_request:get(OAuthUrl, [], [{host_header, Host}]) of
++            {ok, Code, _, Body} ->
++                ?assertEqual(200, Code),
++                {JsonBody} = ejson:decode(Body),
++                ?assertEqual(<<"test">>,
++                             couch_util:get_value(<<"name">>, JsonBody));
++            Else ->
++                erlang:error({assertion_failed,
++                             [{module, ?MODULE},
++                              {line, ?LINE},
++                              {reason, ?iofmt("Request failed: ~p", [Else])}]})
++        end
++    end).
++
++should_fail_oauth_with_wrong_credentials({Url, _}) ->
++    ?_test(begin
++        AuthDbName = couch_config:get("couch_httpd_auth", "authentication_db"),
++        JoeDoc = couch_doc:from_json_obj({[
++            {<<"_id">>, <<"org.couchdb.user:joe">>},
++            {<<"type">>, <<"user">>},
++            {<<"name">>, <<"joe">>},
++            {<<"roles">>, []},
++            {<<"password_sha">>, <<"fe95df1ca59a9b567bdca5cbaf8412abd6e06121">>},
++            {<<"salt">>, <<"4e170ffeb6f34daecfd814dfb4001a73">>}
++        ]}),
++        {ok, AuthDb} = couch_db:open_int(?l2b(AuthDbName), [?ADMIN_USER]),
++        {ok, _} = couch_db:update_doc(AuthDb, JoeDoc, [?ADMIN_USER]),
++
++        Host = "oauth-example.com",
++        Consumer = {"consec1", "bad_secret", hmac_sha1},
++        SignedParams = oauth:sign(
++            "GET", "http://" ++ Host ++ "/", [], Consumer, "otoksec1", "foobar"),
++        OAuthUrl = oauth:uri(Url, SignedParams),
++
++        case test_request:get(OAuthUrl, [], [{host_header, Host}]) of
++            {ok, Code, _, Body} ->
++                ?assertEqual(401, Code),
++                {JsonBody} = ejson:decode(Body),
++                ?assertEqual(<<"unauthorized">>,
++                             couch_util:get_value(<<"error">>, JsonBody));
++            Else ->
++                erlang:error({assertion_failed,
++                             [{module, ?MODULE},
++                              {line, ?LINE},
++                              {reason, ?iofmt("Request failed: ~p", [Else])}]})
++        end
++    end).
+diff --git a/test/couchdb/couchdb_views_tests.erl b/test/couchdb/couchdb_views_tests.erl
+new file mode 100644
+index 0000000..c61f783
+--- /dev/null
++++ b/test/couchdb/couchdb_views_tests.erl
+@@ -0,0 +1,669 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(couchdb_views_tests).
++
++-include("couch_eunit.hrl").
++-include_lib("couchdb/couch_db.hrl").
++-include_lib("couch_mrview/include/couch_mrview.hrl").
++
++-define(ADMIN_USER, {user_ctx, #user_ctx{roles=[<<"_admin">>]}}).
++-define(DELAY, 100).
++-define(TIMEOUT, 1000).
++
++
++start() ->
++    {ok, Pid} = couch_server_sup:start_link(?CONFIG_CHAIN),
++    Pid.
++
++stop(Pid) ->
++    erlang:monitor(process, Pid),
++    couch_server_sup:stop(),
++    receive
++        {'DOWN', _, _, Pid, _} ->
++            ok
++    after ?TIMEOUT ->
++        throw({timeout, server_stop})
++    end.
++
++setup() ->
++    DbName = ?tempdb(),
++    {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
++    ok = couch_db:close(Db),
++    FooRev = create_design_doc(DbName, <<"_design/foo">>, <<"bar">>),
++    query_view(DbName, "foo", "bar"),
++    BooRev = create_design_doc(DbName, <<"_design/boo">>, <<"baz">>),
++    query_view(DbName, "boo", "baz"),
++    {DbName, {FooRev, BooRev}}.
++
++setup_with_docs() ->
++    DbName = ?tempdb(),
++    {ok, Db} = couch_db:create(DbName, [?ADMIN_USER]),
++    ok = couch_db:close(Db),
++    create_docs(DbName),
++    create_design_doc(DbName, <<"_design/foo">>, <<"bar">>),
++    DbName.
++
++teardown({DbName, _}) ->
++    teardown(DbName);
++teardown(DbName) when is_binary(DbName) ->
++    couch_server:delete(DbName, [?ADMIN_USER]),
++    ok.
++
++
++view_indexes_cleanup_test_() ->
++    {
++        "View indexes cleanup",
++        {
++            setup,
++            fun start/0, fun stop/1,
++            {
++                foreach,
++                fun setup/0, fun teardown/1,
++                [
++                    fun should_have_two_indexes_alive_before_deletion/1,
++                    fun should_cleanup_index_file_after_ddoc_deletion/1,
++                    fun should_cleanup_all_index_files/1
++                ]
++            }
++        }
++    }.
++
++view_group_db_leaks_test_() ->
++    {
++        "View group db leaks",
++        {
++            setup,
++            fun start/0, fun stop/1,
++            {
++                foreach,
++                fun setup_with_docs/0, fun teardown/1,
++                [
++                    fun couchdb_1138/1,
++                    fun couchdb_1309/1
++                ]
++            }
++        }
++    }.
++
++view_group_shutdown_test_() ->
++    {
++        "View group shutdown",
++        {
++            setup,
++            fun start/0, fun stop/1,
++            [couchdb_1283()]
++        }
++    }.
++
++
++should_not_remember_docs_in_index_after_backup_restore_test() ->
++    %% COUCHDB-640
++    start(),
++    DbName = setup_with_docs(),
++
++    ok = backup_db_file(DbName),
++    create_doc(DbName, "doc666"),
++
++    Rows0 = query_view(DbName, "foo", "bar"),
++    ?assert(has_doc("doc1", Rows0)),
++    ?assert(has_doc("doc2", Rows0)),
++    ?assert(has_doc("doc3", Rows0)),
++    ?assert(has_doc("doc666", Rows0)),
++
++    restore_backup_db_file(DbName),
++
++    Rows1 = query_view(DbName, "foo", "bar"),
++    ?assert(has_doc("doc1", Rows1)),
++    ?assert(has_doc("doc2", Rows1)),
++    ?assert(has_doc("doc3", Rows1)),
++    ?assertNot(has_doc("doc666", Rows1)),
++
++    teardown(DbName),
++    stop(whereis(couch_server_sup)).
++
++
++should_upgrade_legacy_view_files_test() ->
++    start(),
++
++    ok = couch_config:set("query_server_config", "commit_freq", "0", false),
++
++    DbName = <<"test">>,
++    DbFileName = "test.couch",
++    DbFilePath = filename:join([?FIXTURESDIR, DbFileName]),
++    OldViewName = "3b835456c235b1827e012e25666152f3.view",
++    FixtureViewFilePath = filename:join([?FIXTURESDIR, OldViewName]),
++    NewViewName = "a1c5929f912aca32f13446122cc6ce50.view",
++
++    DbDir = couch_config:get("couchdb", "database_dir"),
++    ViewDir = couch_config:get("couchdb", "view_index_dir"),
++    OldViewFilePath = filename:join([ViewDir, ".test_design", OldViewName]),
++    NewViewFilePath = filename:join([ViewDir, ".test_design", "mrview",
++                                     NewViewName]),
++
++    % cleanup
++    Files = [
++        filename:join([DbDir, DbFileName]),
++        OldViewFilePath,
++        NewViewFilePath
++    ],
++    lists:foreach(fun(File) -> file:delete(File) end, Files),
++
++    % copy old db file into db dir
++    {ok, _} = file:copy(DbFilePath, filename:join([DbDir, DbFileName])),
++
++    % copy old view file into view dir
++    ok = filelib:ensure_dir(filename:join([ViewDir, ".test_design"]) ++ "/"),
++    {ok, _} = file:copy(FixtureViewFilePath, OldViewFilePath),
++
++    % ensure old header
++    OldHeader = read_header(OldViewFilePath),
++    ?assertMatch(#index_header{}, OldHeader),
++
++    % query view for expected results
++    Rows0 = query_view(DbName, "test", "test"),
++    ?assertEqual(2, length(Rows0)),
++
++    % ensure old file gone
++    ?assertNot(filelib:is_regular(OldViewFilePath)),
++
++    % add doc to trigger update
++    DocUrl = db_url(DbName) ++ "/boo",
++    {ok, _, _, _} = test_request:put(
++        DocUrl, [{"Content-Type", "application/json"}], <<"{\"a\":3}">>),
++
++    % query view for expected results
++    Rows1 = query_view(DbName, "test", "test"),
++    ?assertEqual(3, length(Rows1)),
++
++    % ensure new header
++    timer:sleep(2000),  % have to wait for awhile to upgrade the index
++    NewHeader = read_header(NewViewFilePath),
++    ?assertMatch(#mrheader{}, NewHeader),
++
++    teardown(DbName),
++    stop(whereis(couch_server_sup)).
++
++
++should_have_two_indexes_alive_before_deletion({DbName, _}) ->
++    view_cleanup(DbName),
++    ?_assertEqual(2, count_index_files(DbName)).
++
++should_cleanup_index_file_after_ddoc_deletion({DbName, {FooRev, _}}) ->
++    delete_design_doc(DbName, <<"_design/foo">>, FooRev),
++    view_cleanup(DbName),
++    ?_assertEqual(1, count_index_files(DbName)).
++
++should_cleanup_all_index_files({DbName, {FooRev, BooRev}})->
++    delete_design_doc(DbName, <<"_design/foo">>, FooRev),
++    delete_design_doc(DbName, <<"_design/boo">>, BooRev),
++    view_cleanup(DbName),
++    ?_assertEqual(0, count_index_files(DbName)).
++
++couchdb_1138(DbName) ->
++    ?_test(begin
++        {ok, IndexerPid} = couch_index_server:get_index(
++            couch_mrview_index, DbName, <<"_design/foo">>),
++        ?assert(is_pid(IndexerPid)),
++        ?assert(is_process_alive(IndexerPid)),
++        ?assertEqual(2, count_db_refs(DbName)),
++
++        Rows0 = query_view(DbName, "foo", "bar"),
++        ?assertEqual(3, length(Rows0)),
++        ?assertEqual(2, count_db_refs(DbName)),
++        ?assert(is_process_alive(IndexerPid)),
++
++        create_doc(DbName, "doc1000"),
++        Rows1 = query_view(DbName, "foo", "bar"),
++        ?assertEqual(4, length(Rows1)),
++        ?assertEqual(2, count_db_refs(DbName)),
++        ?assert(is_process_alive(IndexerPid)),
++
++        Ref1 = get_db_ref_counter(DbName),
++        compact_db(DbName),
++        Ref2 = get_db_ref_counter(DbName),
++        ?assertEqual(2, couch_ref_counter:count(Ref2)),
++        ?assertNotEqual(Ref2, Ref1),
++        ?assertNot(is_process_alive(Ref1)),
++        ?assert(is_process_alive(IndexerPid)),
++
++        compact_view_group(DbName, "foo"),
++        ?assertEqual(2, count_db_refs(DbName)),
++        Ref3 = get_db_ref_counter(DbName),
++        ?assertEqual(Ref3, Ref2),
++        ?assert(is_process_alive(IndexerPid)),
++
++        create_doc(DbName, "doc1001"),
++        Rows2 = query_view(DbName, "foo", "bar"),
++        ?assertEqual(5, length(Rows2)),
++        ?assertEqual(2, count_db_refs(DbName)),
++        ?assert(is_process_alive(IndexerPid))
++    end).
++
++couchdb_1309(DbName) ->
++    ?_test(begin
++        {ok, IndexerPid} = couch_index_server:get_index(
++            couch_mrview_index, DbName, <<"_design/foo">>),
++        ?assert(is_pid(IndexerPid)),
++        ?assert(is_process_alive(IndexerPid)),
++        ?assertEqual(2, count_db_refs(DbName)),
++
++        create_doc(DbName, "doc1001"),
++        Rows0 = query_view(DbName, "foo", "bar"),
++        check_rows_value(Rows0, null),
++        ?assertEqual(4, length(Rows0)),
++        ?assertEqual(2, count_db_refs(DbName)),
++        ?assert(is_process_alive(IndexerPid)),
++
++        update_design_doc(DbName,  <<"_design/foo">>, <<"bar">>),
++        {ok, NewIndexerPid} = couch_index_server:get_index(
++            couch_mrview_index, DbName, <<"_design/foo">>),
++        ?assert(is_pid(NewIndexerPid)),
++        ?assert(is_process_alive(NewIndexerPid)),
++        ?assertNotEqual(IndexerPid, NewIndexerPid),
++        ?assertEqual(2, count_db_refs(DbName)),
++
++        Rows1 = query_view(DbName, "foo", "bar", ok),
++        ?assertEqual(0, length(Rows1)),
++        Rows2 = query_view(DbName, "foo", "bar"),
++        check_rows_value(Rows2, 1),
++        ?assertEqual(4, length(Rows2)),
++
++        MonRef0 = erlang:monitor(process, IndexerPid),
++        receive
++            {'DOWN', MonRef0, _, _, _} ->
++                ok
++        after ?TIMEOUT ->
++            erlang:error(
++                {assertion_failed,
++                 [{module, ?MODULE}, {line, ?LINE},
++                  {reason, "old view group is not dead after ddoc update"}]})
++        end,
++
++        MonRef1 = erlang:monitor(process, NewIndexerPid),
++        ok = couch_server:delete(DbName, [?ADMIN_USER]),
++        receive
++            {'DOWN', MonRef1, _, _, _} ->
++                ok
++        after ?TIMEOUT ->
++            erlang:error(
++                {assertion_failed,
++                 [{module, ?MODULE}, {line, ?LINE},
++                  {reason, "new view group did not die after DB deletion"}]})
++        end
++    end).
++
++couchdb_1283() ->
++    ?_test(begin
++        ok = couch_config:set("couchdb", "max_dbs_open", "3", false),
++        ok = couch_config:set("couchdb", "delayed_commits", "false", false),
++
++        {ok, MDb1} = couch_db:create(?tempdb(), [?ADMIN_USER]),
++        DDoc = couch_doc:from_json_obj({[
++            {<<"_id">>, <<"_design/foo">>},
++            {<<"language">>, <<"javascript">>},
++            {<<"views">>, {[
++                {<<"foo">>, {[
++                    {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
++                ]}},
++                {<<"foo2">>, {[
++                    {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
++                ]}},
++                {<<"foo3">>, {[
++                    {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
++                ]}},
++                {<<"foo4">>, {[
++                    {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
++                ]}},
++                {<<"foo5">>, {[
++                    {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
++                ]}}
++            ]}}
++        ]}),
++        {ok, _} = couch_db:update_doc(MDb1, DDoc, []),
++        ok = populate_db(MDb1, 100, 100),
++        query_view(MDb1#db.name, "foo", "foo"),
++        ok = couch_db:close(MDb1),
++
++        {ok, Db1} = couch_db:create(?tempdb(), [?ADMIN_USER]),
++        ok = couch_db:close(Db1),
++        {ok, Db2} = couch_db:create(?tempdb(), [?ADMIN_USER]),
++        ok = couch_db:close(Db2),
++        {ok, Db3} = couch_db:create(?tempdb(), [?ADMIN_USER]),
++        ok = couch_db:close(Db3),
++
++        Writer1 = spawn_writer(Db1#db.name),
++        Writer2 = spawn_writer(Db2#db.name),
++
++        ?assert(is_process_alive(Writer1)),
++        ?assert(is_process_alive(Writer2)),
++
++        ?assertEqual(ok, get_writer_status(Writer1)),
++        ?assertEqual(ok, get_writer_status(Writer2)),
++
++        {ok, MonRef} = couch_mrview:compact(MDb1#db.name, <<"_design/foo">>,
++                                            [monitor]),
++
++        Writer3 = spawn_writer(Db3#db.name),
++        ?assert(is_process_alive(Writer3)),
++        ?assertEqual({error, all_dbs_active}, get_writer_status(Writer3)),
++
++        ?assert(is_process_alive(Writer1)),
++        ?assert(is_process_alive(Writer2)),
++        ?assert(is_process_alive(Writer3)),
++
++        receive
++            {'DOWN', MonRef, process, _, Reason} ->
++                ?assertEqual(normal, Reason)
++        after ?TIMEOUT ->
++            erlang:error(
++                {assertion_failed,
++                 [{module, ?MODULE}, {line, ?LINE},
++                  {reason, "Failure compacting view group"}]})
++        end,
++
++        ?assertEqual(ok, writer_try_again(Writer3)),
++        ?assertEqual(ok, get_writer_status(Writer3)),
++
++        ?assert(is_process_alive(Writer1)),
++        ?assert(is_process_alive(Writer2)),
++        ?assert(is_process_alive(Writer3)),
++
++        ?assertEqual(ok, stop_writer(Writer1)),
++        ?assertEqual(ok, stop_writer(Writer2)),
++        ?assertEqual(ok, stop_writer(Writer3))
++    end).
++
++create_doc(DbName, DocId) when is_list(DocId) ->
++    create_doc(DbName, ?l2b(DocId));
++create_doc(DbName, DocId) when is_binary(DocId) ->
++    {ok, Db} = couch_db:open(DbName, [?ADMIN_USER]),
++    Doc666 = couch_doc:from_json_obj({[
++        {<<"_id">>, DocId},
++        {<<"value">>, 999}
++    ]}),
++    {ok, _} = couch_db:update_docs(Db, [Doc666]),
++    couch_db:ensure_full_commit(Db),
++    couch_db:close(Db).
++
++create_docs(DbName) ->
++    {ok, Db} = couch_db:open(DbName, [?ADMIN_USER]),
++    Doc1 = couch_doc:from_json_obj({[
++        {<<"_id">>, <<"doc1">>},
++        {<<"value">>, 1}
++
++    ]}),
++    Doc2 = couch_doc:from_json_obj({[
++        {<<"_id">>, <<"doc2">>},
++        {<<"value">>, 2}
++
++    ]}),
++    Doc3 = couch_doc:from_json_obj({[
++        {<<"_id">>, <<"doc3">>},
++        {<<"value">>, 3}
++
++    ]}),
++    {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]),
++    couch_db:ensure_full_commit(Db),
++    couch_db:close(Db).
++
++populate_db(Db, BatchSize, N) when N > 0 ->
++    Docs = lists:map(
++        fun(_) ->
++            couch_doc:from_json_obj({[
++                {<<"_id">>, couch_uuids:new()},
++                {<<"value">>, base64:encode(crypto:rand_bytes(1000))}
++            ]})
++        end,
++        lists:seq(1, BatchSize)),
++    {ok, _} = couch_db:update_docs(Db, Docs, []),
++    populate_db(Db, BatchSize, N - length(Docs));
++populate_db(_Db, _, _) ->
++    ok.
++
++create_design_doc(DbName, DDName, ViewName) ->
++    {ok, Db} = couch_db:open(DbName, [?ADMIN_USER]),
++    DDoc = couch_doc:from_json_obj({[
++        {<<"_id">>, DDName},
++        {<<"language">>, <<"javascript">>},
++        {<<"views">>, {[
++            {ViewName, {[
++                {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>}
++            ]}}
++        ]}}
++    ]}),
++    {ok, Rev} = couch_db:update_doc(Db, DDoc, []),
++    couch_db:ensure_full_commit(Db),
++    couch_db:close(Db),
++    Rev.
++
++update_design_doc(DbName, DDName, ViewName) ->
++    {ok, Db} = couch_db:open(DbName, [?ADMIN_USER]),
++    {ok, Doc} = couch_db:open_doc(Db, DDName, [?ADMIN_USER]),
++    {Props} = couch_doc:to_json_obj(Doc, []),
++    Rev = couch_util:get_value(<<"_rev">>, Props),
++    DDoc = couch_doc:from_json_obj({[
++        {<<"_id">>, DDName},
++        {<<"_rev">>, Rev},
++        {<<"language">>, <<"javascript">>},
++        {<<"views">>, {[
++            {ViewName, {[
++                {<<"map">>, <<"function(doc) { emit(doc.value, 1); }">>}
++            ]}}
++        ]}}
++    ]}),
++    {ok, NewRev} = couch_db:update_doc(Db, DDoc, [?ADMIN_USER]),
++    couch_db:ensure_full_commit(Db),
++    couch_db:close(Db),
++    NewRev.
++
++delete_design_doc(DbName, DDName, Rev) ->
++    {ok, Db} = couch_db:open(DbName, [?ADMIN_USER]),
++    DDoc = couch_doc:from_json_obj({[
++        {<<"_id">>, DDName},
++        {<<"_rev">>, couch_doc:rev_to_str(Rev)},
++        {<<"_deleted">>, true}
++    ]}),
++    {ok, _} = couch_db:update_doc(Db, DDoc, [Rev]),
++    couch_db:close(Db).
++
++db_url(DbName) ->
++    Addr = couch_config:get("httpd", "bind_address", "127.0.0.1"),
++    Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
++    "http://" ++ Addr ++ ":" ++ Port ++ "/" ++ ?b2l(DbName).
++
++query_view(DbName, DDoc, View) ->
++    query_view(DbName, DDoc, View, false).
++
++query_view(DbName, DDoc, View, Stale) ->
++    {ok, Code, _Headers, Body} = test_request:get(
++        db_url(DbName) ++ "/_design/" ++ DDoc ++ "/_view/" ++ View
++        ++ case Stale of
++               false -> [];
++               _ -> "?stale=" ++ atom_to_list(Stale)
++           end),
++    ?assertEqual(200, Code),
++    {Props} = ejson:decode(Body),
++    couch_util:get_value(<<"rows">>, Props, []).
++
++check_rows_value(Rows, Value) ->
++    lists:foreach(
++        fun({Row}) ->
++            ?assertEqual(Value, couch_util:get_value(<<"value">>, Row))
++        end, Rows).
++
++view_cleanup(DbName) ->
++    {ok, Db} = couch_db:open(DbName, [?ADMIN_USER]),
++    couch_mrview:cleanup(Db),
++    couch_db:close(Db).
++
++get_db_ref_counter(DbName) ->
++    {ok, #db{fd_ref_counter = Ref} = Db} = couch_db:open_int(DbName, []),
++    ok = couch_db:close(Db),
++    Ref.
++
++count_db_refs(DbName) ->
++    Ref = get_db_ref_counter(DbName),
++    % have to sleep a bit to let couchdb cleanup all refs and leave only
++    % active ones. otherwise the related tests will randomly fail due to
++    % count number mismatch
++    timer:sleep(200),
++    couch_ref_counter:count(Ref).
++
++count_index_files(DbName) ->
++    % call server to fetch the index files
++    RootDir = couch_config:get("couchdb", "view_index_dir"),
++    length(filelib:wildcard(RootDir ++ "/." ++
++        binary_to_list(DbName) ++ "_design"++"/mrview/*")).
++
++has_doc(DocId1, Rows) ->
++    DocId = iolist_to_binary(DocId1),
++    lists:any(fun({R}) -> lists:member({<<"id">>, DocId}, R) end, Rows).
++
++backup_db_file(DbName) ->
++    DbDir = couch_config:get("couchdb", "database_dir"),
++    DbFile = filename:join([DbDir, ?b2l(DbName) ++ ".couch"]),
++    {ok, _} = file:copy(DbFile, DbFile ++ ".backup"),
++    ok.
++
++restore_backup_db_file(DbName) ->
++    DbDir = couch_config:get("couchdb", "database_dir"),
++    stop(whereis(couch_server_sup)),
++    DbFile = filename:join([DbDir, ?b2l(DbName) ++ ".couch"]),
++    ok = file:delete(DbFile),
++    ok = file:rename(DbFile ++ ".backup", DbFile),
++    start(),
++    ok.
++
++compact_db(DbName) ->
++    {ok, Db} = couch_db:open_int(DbName, []),
++    {ok, _} = couch_db:start_compact(Db),
++    ok = couch_db:close(Db),
++    wait_db_compact_done(DbName, 10).
++
++wait_db_compact_done(_DbName, 0) ->
++    erlang:error({assertion_failed,
++                  [{module, ?MODULE},
++                   {line, ?LINE},
++                   {reason, "DB compaction failed to finish"}]});
++wait_db_compact_done(DbName, N) ->
++    {ok, Db} = couch_db:open_int(DbName, []),
++    ok = couch_db:close(Db),
++    case is_pid(Db#db.compactor_pid) of
++    false ->
++        ok;
++    true ->
++        ok = timer:sleep(?DELAY),
++        wait_db_compact_done(DbName, N - 1)
++    end.
++
++compact_view_group(DbName, DDocId) when is_list(DDocId) ->
++    compact_view_group(DbName, ?l2b("_design/" ++ DDocId));
++compact_view_group(DbName, DDocId) when is_binary(DDocId) ->
++    ok = couch_mrview:compact(DbName, DDocId),
++    wait_view_compact_done(DbName, DDocId, 10).
++
++wait_view_compact_done(_DbName, _DDocId, 0) ->
++    erlang:error({assertion_failed,
++                  [{module, ?MODULE},
++                   {line, ?LINE},
++                   {reason, "DB compaction failed to finish"}]});
++wait_view_compact_done(DbName, DDocId, N) ->
++    {ok, Code, _Headers, Body} = test_request:get(
++        db_url(DbName) ++ "/" ++ ?b2l(DDocId) ++ "/_info"),
++    ?assertEqual(200, Code),
++    {Info} = ejson:decode(Body),
++    {IndexInfo} = couch_util:get_value(<<"view_index">>, Info),
++    CompactRunning = couch_util:get_value(<<"compact_running">>, IndexInfo),
++    case CompactRunning of
++        false ->
++            ok;
++        true ->
++            ok = timer:sleep(?DELAY),
++            wait_view_compact_done(DbName, DDocId, N - 1)
++    end.
++
++spawn_writer(DbName) ->
++    Parent = self(),
++    spawn(fun() ->
++        process_flag(priority, high),
++        writer_loop(DbName, Parent)
++    end).
++
++get_writer_status(Writer) ->
++    Ref = make_ref(),
++    Writer ! {get_status, Ref},
++    receive
++        {db_open, Ref} ->
++            ok;
++        {db_open_error, Error, Ref} ->
++            Error
++    after ?TIMEOUT ->
++        timeout
++    end.
++
++writer_try_again(Writer) ->
++    Ref = make_ref(),
++    Writer ! {try_again, Ref},
++    receive
++        {ok, Ref} ->
++            ok
++    after ?TIMEOUT ->
++        timeout
++    end.
++
++stop_writer(Writer) ->
++    Ref = make_ref(),
++    Writer ! {stop, Ref},
++    receive
++        {ok, Ref} ->
++            ok
++    after ?TIMEOUT ->
++        erlang:error({assertion_failed,
++                      [{module, ?MODULE},
++                       {line, ?LINE},
++                       {reason, "Timeout on stopping process"}]})
++    end.
++
++writer_loop(DbName, Parent) ->
++    case couch_db:open_int(DbName, []) of
++        {ok, Db} ->
++            writer_loop_1(Db, Parent);
++        Error ->
++            writer_loop_2(DbName, Parent, Error)
++    end.
++
++writer_loop_1(Db, Parent) ->
++    receive
++        {get_status, Ref} ->
++            Parent ! {db_open, Ref},
++            writer_loop_1(Db, Parent);
++        {stop, Ref} ->
++            ok = couch_db:close(Db),
++            Parent ! {ok, Ref}
++    end.
++
++writer_loop_2(DbName, Parent, Error) ->
++    receive
++        {get_status, Ref} ->
++            Parent ! {db_open_error, Error, Ref},
++            writer_loop_2(DbName, Parent, Error);
++        {try_again, Ref} ->
++            Parent ! {ok, Ref},
++            writer_loop(DbName, Parent)
++    end.
++
++read_header(File) ->
++    {ok, Fd} = couch_file:open(File),
++    {ok, {_Sig, Header}} = couch_file:read_header(Fd),
++    couch_file:close(Fd),
++    Header.
+diff --git a/test/couchdb/eunit.ini b/test/couchdb/eunit.ini
+new file mode 100644
+index 0000000..50024a3
+--- /dev/null
++++ b/test/couchdb/eunit.ini
+@@ -0,0 +1,28 @@
++; Licensed to the Apache Software Foundation (ASF) under one
++; or more contributor license agreements.  See the NOTICE file
++; distributed with this work for additional information
++; regarding copyright ownership.  The ASF licenses this file
++; to you under the Apache License, Version 2.0 (the
++; "License"); you may not use this file except in compliance
++; with the License.  You may obtain a copy of the License at
++;
++;   http://www.apache.org/licenses/LICENSE-2.0
++;
++; Unless required by applicable law or agreed to in writing,
++; software distributed under the License is distributed on an
++; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++; KIND, either express or implied.  See the License for the
++; specific language governing permissions and limitations
++; under the License.
++
++[couchdb]
++; time to relax!
++uuid = 74696d6520746f2072656c617821
++
++[httpd]
++port = 0
++
++[log]
++; logging is disabled to remove unwanted noise in stdout from tests processing
++level = none
++include_sasl = false
+diff --git a/test/couchdb/fixtures/Makefile.am b/test/couchdb/fixtures/Makefile.am
+new file mode 100644
+index 0000000..fa38d3f
+--- /dev/null
++++ b/test/couchdb/fixtures/Makefile.am
+@@ -0,0 +1,20 @@
++## Licensed under the Apache License, Version 2.0 (the "License"); you may not
++## use this file except in compliance with the License. You may obtain a copy of
++## the License at
++##
++##   http://www.apache.org/licenses/LICENSE-2.0
++##
++## Unless required by applicable law or agreed to in writing, software
++## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++## License for the specific language governing permissions and limitations under
++## the License.
++
++noinst_PROGRAMS = test_cfg_register
++test_cfg_register_SOURCES = test_cfg_register.c
++test_cfg_register_CFLAGS = -D_BSD_SOURCE
++
++noinst_SCRIPTS = os_daemon_configer.escript
++
++all:
++	chmod +x os_daemon_configer.escript
+diff --git a/test/couchdb/fixtures/couch_config_tests_1.ini b/test/couchdb/fixtures/couch_config_tests_1.ini
+new file mode 100644
+index 0000000..55451da
+--- /dev/null
++++ b/test/couchdb/fixtures/couch_config_tests_1.ini
+@@ -0,0 +1,22 @@
++; Licensed to the Apache Software Foundation (ASF) under one
++; or more contributor license agreements.  See the NOTICE file
++; distributed with this work for additional information
++; regarding copyright ownership.  The ASF licenses this file
++; to you under the Apache License, Version 2.0 (the
++; "License"); you may not use this file except in compliance
++; with the License.  You may obtain a copy of the License at
++; 
++;   http://www.apache.org/licenses/LICENSE-2.0
++;
++; Unless required by applicable law or agreed to in writing,
++; software distributed under the License is distributed on an
++; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++; KIND, either express or implied.  See the License for the
++; specific language governing permissions and limitations
++; under the License.
++
++[couchdb]
++max_dbs_open=10
++
++[httpd]
++port=4895
+diff --git a/test/couchdb/fixtures/couch_config_tests_2.ini b/test/couchdb/fixtures/couch_config_tests_2.ini
+new file mode 100644
+index 0000000..5f46357
+--- /dev/null
++++ b/test/couchdb/fixtures/couch_config_tests_2.ini
+@@ -0,0 +1,22 @@
++; Licensed to the Apache Software Foundation (ASF) under one
++; or more contributor license agreements.  See the NOTICE file
++; distributed with this work for additional information
++; regarding copyright ownership.  The ASF licenses this file
++; to you under the Apache License, Version 2.0 (the
++; "License"); you may not use this file except in compliance
++; with the License.  You may obtain a copy of the License at
++; 
++;   http://www.apache.org/licenses/LICENSE-2.0
++;
++; Unless required by applicable law or agreed to in writing,
++; software distributed under the License is distributed on an
++; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++; KIND, either express or implied.  See the License for the
++; specific language governing permissions and limitations
++; under the License.
++
++[httpd]
++port = 80
++
++[fizbang]
++unicode = normalized
+diff --git a/test/couchdb/fixtures/couch_stats_aggregates.cfg b/test/couchdb/fixtures/couch_stats_aggregates.cfg
+new file mode 100644
+index 0000000..30e475d
+--- /dev/null
++++ b/test/couchdb/fixtures/couch_stats_aggregates.cfg
+@@ -0,0 +1,19 @@
++% Licensed to the Apache Software Foundation (ASF) under one
++% or more contributor license agreements.  See the NOTICE file
++% distributed with this work for additional information
++% regarding copyright ownership.  The ASF licenses this file
++% to you under the Apache License, Version 2.0 (the
++% "License"); you may not use this file except in compliance
++% with the License.  You may obtain a copy of the License at
++% 
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing,
++% software distributed under the License is distributed on an
++% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++% KIND, either express or implied.  See the License for the
++% specific language governing permissions and limitations
++% under the License.
++
++{testing, stuff, "yay description"}.
++{number, '11', "randomosity"}.
+diff --git a/test/couchdb/fixtures/couch_stats_aggregates.ini b/test/couchdb/fixtures/couch_stats_aggregates.ini
+new file mode 100644
+index 0000000..cc5cd21
+--- /dev/null
++++ b/test/couchdb/fixtures/couch_stats_aggregates.ini
+@@ -0,0 +1,20 @@
++; Licensed to the Apache Software Foundation (ASF) under one
++; or more contributor license agreements.  See the NOTICE file
++; distributed with this work for additional information
++; regarding copyright ownership.  The ASF licenses this file
++; to you under the Apache License, Version 2.0 (the
++; "License"); you may not use this file except in compliance
++; with the License.  You may obtain a copy of the License at
++; 
++;   http://www.apache.org/licenses/LICENSE-2.0
++;
++; Unless required by applicable law or agreed to in writing,
++; software distributed under the License is distributed on an
++; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++; KIND, either express or implied.  See the License for the
++; specific language governing permissions and limitations
++; under the License.
++
++[stats]
++rate = 10000000 ; We call collect_sample in testing
++samples = [0, 1]
+diff --git a/test/couchdb/fixtures/os_daemon_bad_perm.sh b/test/couchdb/fixtures/os_daemon_bad_perm.sh
+new file mode 100644
+index 0000000..345c8b4
+--- /dev/null
++++ b/test/couchdb/fixtures/os_daemon_bad_perm.sh
+@@ -0,0 +1,17 @@
++#!/bin/sh -e
++#
++# Licensed under the Apache License, Version 2.0 (the "License"); you may not
++# use this file except in compliance with the License. You may obtain a copy of
++# the License at
++# 
++#   http://www.apache.org/licenses/LICENSE-2.0
++# 
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++# License for the specific language governing permissions and limitations under
++# the License.
++# 
++# Please do not make this file executable as that's the error being tested.
++
++sleep 5
+diff --git a/test/couchdb/fixtures/os_daemon_can_reboot.sh b/test/couchdb/fixtures/os_daemon_can_reboot.sh
+new file mode 100755
+index 0000000..5bc10e8
+--- /dev/null
++++ b/test/couchdb/fixtures/os_daemon_can_reboot.sh
+@@ -0,0 +1,15 @@
++#!/bin/sh -e
++#
++# Licensed under the Apache License, Version 2.0 (the "License"); you may not
++# use this file except in compliance with the License. You may obtain a copy of
++# the License at
++# 
++#   http://www.apache.org/licenses/LICENSE-2.0
++# 
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++# License for the specific language governing permissions and limitations under
++# the License.
++
++sleep 2
+diff --git a/test/couchdb/fixtures/os_daemon_configer.escript.in b/test/couchdb/fixtures/os_daemon_configer.escript.in
+new file mode 100755
+index 0000000..d2ecfa8
+--- /dev/null
++++ b/test/couchdb/fixtures/os_daemon_configer.escript.in
+@@ -0,0 +1,84 @@
++#! /usr/bin/env escript
++%% -*- erlang -*-
++%%! -DTEST -pa @abs_top_builddir@/src/ejson
++%%
++%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++%% use this file except in compliance with the License. You may obtain a copy of
++%% the License at
++%%
++%%   http://www.apache.org/licenses/LICENSE-2.0
++%%
++%% Unless required by applicable law or agreed to in writing, software
++%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++%% License for the specific language governing permissions and limitations under
++%% the License.
++
++read() ->
++    case io:get_line('') of
++        eof ->
++            stop;
++        Data ->
++            ejson:decode(Data)
++    end.
++
++write(Mesg) ->
++    Data = iolist_to_binary(ejson:encode(Mesg)),
++    io:format(binary_to_list(Data) ++ "\n", []).
++
++get_cfg(Section) ->
++    write([<<"get">>, Section]),
++    read().
++
++get_cfg(Section, Name) ->
++    write([<<"get">>, Section, Name]),
++    read().
++
++log(Mesg) ->
++    write([<<"log">>, Mesg]).
++
++log(Mesg, Level) ->
++    write([<<"log">>, Mesg, {[{<<"level">>, Level}]}]).
++
++test_get_cfg1() ->
++    Path = list_to_binary(?FILE),
++    FileName = list_to_binary(filename:basename(?FILE)),
++    {[{FileName, Path}]} = get_cfg(<<"os_daemons">>).
++
++test_get_cfg2() ->
++    Path = list_to_binary(?FILE),
++    FileName = list_to_binary(filename:basename(?FILE)),
++    Path = get_cfg(<<"os_daemons">>, FileName),
++    <<"sequential">> = get_cfg(<<"uuids">>, <<"algorithm">>).
++
++
++test_get_unknown_cfg() ->
++    {[]} = get_cfg(<<"aal;3p4">>),
++    null = get_cfg(<<"aal;3p4">>, <<"313234kjhsdfl">>).
++
++test_log() ->
++    log(<<"foobar!">>),
++    log(<<"some stuff!">>, <<"debug">>),
++    log(2),
++    log(true),
++    write([<<"log">>, <<"stuff">>, 2]),
++    write([<<"log">>, 3, null]),
++    write([<<"log">>, [1, 2], {[{<<"level">>, <<"debug">>}]}]),
++    write([<<"log">>, <<"true">>, {[]}]).
++
++do_tests() ->
++    test_get_cfg1(),
++    test_get_cfg2(),
++    test_get_unknown_cfg(),
++    test_log(),
++    loop(io:read("")).
++
++loop({ok, _}) ->
++    loop(io:read(""));
++loop(eof) ->
++    init:stop();
++loop({error, _Reason}) ->
++    init:stop().
++
++main([]) ->
++    do_tests().
+diff --git a/test/couchdb/fixtures/os_daemon_die_on_boot.sh b/test/couchdb/fixtures/os_daemon_die_on_boot.sh
+new file mode 100755
+index 0000000..256ee79
+--- /dev/null
++++ b/test/couchdb/fixtures/os_daemon_die_on_boot.sh
+@@ -0,0 +1,15 @@
++#!/bin/sh -e
++#
++# Licensed under the Apache License, Version 2.0 (the "License"); you may not
++# use this file except in compliance with the License. You may obtain a copy of
++# the License at
++# 
++#   http://www.apache.org/licenses/LICENSE-2.0
++# 
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++# License for the specific language governing permissions and limitations under
++# the License.
++
++exit 1
+diff --git a/test/couchdb/fixtures/os_daemon_die_quickly.sh b/test/couchdb/fixtures/os_daemon_die_quickly.sh
+new file mode 100755
+index 0000000..f5a1368
+--- /dev/null
++++ b/test/couchdb/fixtures/os_daemon_die_quickly.sh
+@@ -0,0 +1,15 @@
++#!/bin/sh -e
++#
++# Licensed under the Apache License, Version 2.0 (the "License"); you may not
++# use this file except in compliance with the License. You may obtain a copy of
++# the License at
++# 
++#   http://www.apache.org/licenses/LICENSE-2.0
++# 
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++# License for the specific language governing permissions and limitations under
++# the License.
++
++sleep 1
+diff --git a/test/couchdb/fixtures/os_daemon_looper.escript b/test/couchdb/fixtures/os_daemon_looper.escript
+new file mode 100755
+index 0000000..73974e9
+--- /dev/null
++++ b/test/couchdb/fixtures/os_daemon_looper.escript
+@@ -0,0 +1,26 @@
++#! /usr/bin/env escript
++
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++loop() ->
++    loop(io:read("")).
++
++loop({ok, _}) ->
++    loop(io:read(""));
++loop(eof) ->
++    stop;
++loop({error, Reason}) ->
++    throw({error, Reason}).
++
++main([]) ->
++    loop().
+diff --git a/test/couchdb/fixtures/test_cfg_register.c b/test/couchdb/fixtures/test_cfg_register.c
+new file mode 100644
+index 0000000..c910bac
+--- /dev/null
++++ b/test/couchdb/fixtures/test_cfg_register.c
+@@ -0,0 +1,31 @@
++// Licensed under the Apache License, Version 2.0 (the "License"); you may not
++// use this file except in compliance with the License. You may obtain a copy of
++// the License at
++//
++//   http://www.apache.org/licenses/LICENSE-2.0
++//
++// Unless required by applicable law or agreed to in writing, software
++// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++// License for the specific language governing permissions and limitations under
++// the License.
++
++#include <stdio.h>
++#include <stdlib.h>
++
++int
++main(int argc, const char * argv[])
++{
++    char c = '\0';
++    size_t num = 1;
++    
++    fprintf(stdout, "[\"register\", \"s1\"]\n");
++    fprintf(stdout, "[\"register\", \"s2\", \"k\"]\n");
++    fflush(stdout);
++    
++    while(c != '\n' && num > 0) {
++        num = fread(&c, 1, 1, stdin);
++    }
++    
++    exit(0);
++}
+diff --git a/test/couchdb/include/couch_eunit.hrl.in b/test/couchdb/include/couch_eunit.hrl.in
+new file mode 100644
+index 0000000..ff080e1
+--- /dev/null
++++ b/test/couchdb/include/couch_eunit.hrl.in
+@@ -0,0 +1,44 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-include_lib("eunit/include/eunit.hrl").
++
++-define(BUILDDIR, "@abs_top_builddir@").
++-define(SOURCEDIR, "@abs_top_srcdir@").
++-define(CONFIG_CHAIN, [
++    filename:join([?BUILDDIR, "etc", "couchdb", "default_dev.ini"]),
++    filename:join([?BUILDDIR, "etc", "couchdb", "local_dev.ini"]),
++    filename:join([?SOURCEDIR, "test", "couchdb", "eunit.ini"])]).
++-define(FIXTURESDIR,
++    filename:join([?SOURCEDIR, "test", "couchdb", "fixtures"])).
++-define(TEMPDIR,
++    filename:join([?BUILDDIR, "test", "couchdb", "temp"])).
++
++-define(tempfile,
++    fun() ->
++        {A, B, C} = erlang:now(),
++        N = node(),
++        FileName = lists:flatten(io_lib:format("~p-~p.~p.~p", [N, A, B, C])),
++        filename:join([?TEMPDIR, FileName])
++    end).
++-define(tempdb,
++    fun() ->
++            Nums = tuple_to_list(erlang:now()),
++            Prefix = "eunit-test-db",
++            Suffix = lists:concat([integer_to_list(Num) || Num <- Nums]),
++            list_to_binary(Prefix ++ "-" ++ Suffix)
++    end).
++-define(docid,
++    fun() ->
++        {A, B, C} = erlang:now(),
++        lists:flatten(io_lib:format("~p~p~p", [A, B, C]))
++    end).
+diff --git a/test/couchdb/json_stream_parse_tests.erl b/test/couchdb/json_stream_parse_tests.erl
+new file mode 100644
+index 0000000..92303b6
+--- /dev/null
++++ b/test/couchdb/json_stream_parse_tests.erl
+@@ -0,0 +1,151 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(json_stream_parse_tests).
++
++-include("couch_eunit.hrl").
++
++-define(CASES,
++    [
++        {1, "1", "integer numeric literial"},
++        {3.1416, "3.14160", "float numeric literal"},  % text representation may truncate, trail zeroes
++        {-1, "-1", "negative integer numeric literal"},
++        {-3.1416, "-3.14160", "negative float numeric literal"},
++        {12.0e10, "1.20000e+11", "float literal in scientific notation"},
++        {1.234E+10, "1.23400e+10", "another float literal in scientific notation"},
++        {-1.234E-10, "-1.23400e-10", "negative float literal in scientific notation"},
++        {10.0, "1.0e+01", "yet another float literal in scientific notation"},
++        {123.456, "1.23456E+2", "yet another float literal in scientific notation"},
++        {10.0, "1e1", "yet another float literal in scientific notation"},
++        {<<"foo">>, "\"foo\"", "string literal"},
++        {<<"foo", 5, "bar">>, "\"foo\\u0005bar\"", "string literal with \\u0005"},
++        {<<"">>, "\"\"", "empty string literal"},
++        {<<"\n\n\n">>, "\"\\n\\n\\n\"", "only new lines literal"},
++        {<<"\" \b\f\r\n\t\"">>, "\"\\\" \\b\\f\\r\\n\\t\\\"\"",
++            "only white spaces string literal"},
++        {null, "null", "null literal"},
++        {true, "true", "true literal"},
++        {false, "false", "false literal"},
++        {<<"null">>, "\"null\"", "null string literal"},
++        {<<"true">>, "\"true\"", "true string literal"},
++        {<<"false">>, "\"false\"", "false string literal"},
++        {{[]}, "{}", "empty object literal"},
++        {{[{<<"foo">>, <<"bar">>}]}, "{\"foo\":\"bar\"}",
++            "simple object literal"},
++        {{[{<<"foo">>, <<"bar">>}, {<<"baz">>, 123}]},
++            "{\"foo\":\"bar\",\"baz\":123}", "another simple object literal"},
++        {[], "[]", "empty array literal"},
++        {[[]], "[[]]", "empty array literal inside a single element array literal"},
++        {[1, <<"foo">>], "[1,\"foo\"]", "simple non-empty array literal"},
++        {[1199344435545.0, 1], "[1199344435545.0,1]",
++             "another simple non-empty array literal"},
++        {[false, true, 321, null], "[false, true, 321, null]", "array of literals"},
++        {{[{<<"foo">>, [123]}]}, "{\"foo\":[123]}",
++             "object literal with an array valued property"},
++        {{[{<<"foo">>, {[{<<"bar">>, true}]}}]},
++            "{\"foo\":{\"bar\":true}}", "nested object literal"},
++        {{[{<<"foo">>, []}, {<<"bar">>, {[{<<"baz">>, true}]}},
++                {<<"alice">>, <<"bob">>}]},
++            "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}",
++            "complex object literal"},
++        {[-123, <<"foo">>, {[{<<"bar">>, []}]}, null],
++            "[-123,\"foo\",{\"bar\":[]},null]",
++            "complex array literal"}
++    ]
++).
++
++
++raw_json_input_test_() ->
++    Tests = lists:map(
++        fun({EJson, JsonString, Desc}) ->
++            {Desc,
++             ?_assert(equiv(EJson, json_stream_parse:to_ejson(JsonString)))}
++        end, ?CASES),
++    {"Tests with raw JSON string as the input", Tests}.
++
++one_byte_data_fun_test_() ->
++    Tests = lists:map(
++        fun({EJson, JsonString, Desc}) ->
++            DataFun = fun() -> single_byte_data_fun(JsonString) end,
++            {Desc,
++             ?_assert(equiv(EJson, json_stream_parse:to_ejson(DataFun)))}
++        end, ?CASES),
++    {"Tests with a 1 byte output data function as the input", Tests}.
++
++test_multiple_bytes_data_fun_test_() ->
++    Tests = lists:map(
++        fun({EJson, JsonString, Desc}) ->
++            DataFun = fun() -> multiple_bytes_data_fun(JsonString) end,
++            {Desc,
++             ?_assert(equiv(EJson, json_stream_parse:to_ejson(DataFun)))}
++        end, ?CASES),
++    {"Tests with a multiple bytes output data function as the input", Tests}.
++
++
++%% Test for equivalence of Erlang terms.
++%% Due to arbitrary order of construction, equivalent objects might
++%% compare unequal as erlang terms, so we need to carefully recurse
++%% through aggregates (tuples and objects).
++equiv({Props1}, {Props2}) ->
++    equiv_object(Props1, Props2);
++equiv(L1, L2) when is_list(L1), is_list(L2) ->
++    equiv_list(L1, L2);
++equiv(N1, N2) when is_number(N1), is_number(N2) ->
++    N1 == N2;
++equiv(B1, B2) when is_binary(B1), is_binary(B2) ->
++    B1 == B2;
++equiv(true, true) ->
++    true;
++equiv(false, false) ->
++    true;
++equiv(null, null) ->
++    true.
++
++%% Object representation and traversal order is unknown.
++%% Use the sledgehammer and sort property lists.
++equiv_object(Props1, Props2) ->
++    L1 = lists:keysort(1, Props1),
++    L2 = lists:keysort(1, Props2),
++    Pairs = lists:zip(L1, L2),
++    true = lists:all(
++        fun({{K1, V1}, {K2, V2}}) ->
++            equiv(K1, K2) andalso equiv(V1, V2)
++        end,
++        Pairs).
++
++%% Recursively compare tuple elements for equivalence.
++equiv_list([], []) ->
++    true;
++equiv_list([V1 | L1], [V2 | L2]) ->
++    equiv(V1, V2) andalso equiv_list(L1, L2).
++
++single_byte_data_fun([]) ->
++    done;
++single_byte_data_fun([H | T]) ->
++    {<<H>>, fun() -> single_byte_data_fun(T) end}.
++
++multiple_bytes_data_fun([]) ->
++    done;
++multiple_bytes_data_fun(L) ->
++    N = crypto:rand_uniform(0, 7),
++    {Part, Rest} = split(L, N),
++    {list_to_binary(Part), fun() -> multiple_bytes_data_fun(Rest) end}.
++
++split(L, N) when length(L) =< N ->
++    {L, []};
++split(L, N) ->
++    take(N, L, []).
++
++take(0, L, Acc) ->
++    {lists:reverse(Acc), L};
++take(N, [H|L], Acc) ->
++    take(N - 1, L, [H | Acc]).
+diff --git a/test/couchdb/run.in b/test/couchdb/run.in
+new file mode 100644
+index 0000000..2405f63
+--- /dev/null
++++ b/test/couchdb/run.in
+@@ -0,0 +1,111 @@
++#!/usr/bin/env escript
++%% -*- erlang -*-
++%%! -DTEST -env ERL_LIBS @abs_top_builddir@/src:$ERL_LIBS -pa @abs_top_builddir@/test/couchdb/ebin
++%%
++%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++%% use this file except in compliance with the License. You may obtain a copy of
++%% the License at
++%%
++%%   http://www.apache.org/licenses/LICENSE-2.0
++%%
++%% Unless required by applicable law or agreed to in writing, software
++%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++%% License for the specific language governing permissions and limitations under
++%% the License.
++
++-define(BUILDDIR, "@abs_top_builddir@").
++-define(SOURCEDIR, "@abs_top_srcdir@").
++-define(TESTS_EBIN, filename:join([?BUILDDIR, "test", "couchdb", "ebin", ""])).
++-define(TESTS_TEMP, filename:join([?BUILDDIR, "test", "couchdb", "temp", ""])).
++
++main([]) ->
++    io:fwrite("Path to test file or directory wasn't specified.~n"),
++    erlang:halt(1);
++main(["-v"]) ->
++    io:fwrite("Path to test file or directory wasn't specified.~n"),
++    erlang:halt(1);
++main(["-v", Path]) ->
++    run(Path, [verbose]);
++main(["-v", _ | _]) ->
++    io:fwrite("Only single tests source path is supported.~n"),
++    erlang:halt(1);
++main([Path]) ->
++    run(Path, []),
++    ok;
++main([_|_]) ->
++    io:fwrite("Only single tests source path is supported.~n"),
++    erlang:halt(1).
++
++
++run(Path, Options) ->
++    ensure_dirs(),
++    Mask = "*_tests.erl",
++    Files = list_files(Path, Mask),
++    init_code_path(),
++    Mods = compile(Files),
++    run_tests(Mods, Options).
++
++ensure_dirs() ->
++    ok = filelib:ensure_dir(?TESTS_EBIN),
++    ok = filelib:ensure_dir(?TESTS_TEMP),
++    ok.
++
++list_files(Path, Mask)->
++    AbsPath = filename:absname(Path),
++    case filelib:is_file(AbsPath) of
++        true ->
++            ok;
++        false ->
++            io:fwrite("File or directory not found: ~p~n", [AbsPath]),
++            erlang:halt(1)
++    end,
++    case filelib:is_dir(AbsPath) of
++        true ->
++            case filelib:wildcard(filename:join([AbsPath, Mask])) of
++                [] ->
++                    io:fwrite("No test files was found at ~p by mask ~p ~n",
++                              [AbsPath, Mask]),
++                    erlang:halt(1);
++                Files ->
++                    Files
++            end;
++        false -> [AbsPath]
++    end.
++
++
++compile(Files) ->
++    lists:map(
++        fun(File)->
++            io:fwrite("compile ~p~n", [File]),
++            Opts = [report, verbose, {outdir, ?TESTS_EBIN},
++                    {i, filename:join([?BUILDDIR, "test", "couchdb",
++                                       "include"])},
++                    {i, filename:join([?SOURCEDIR, "src"])}],
++            {ok, Mod} = compile:file(File, Opts),
++            Mod
++        end,
++    Files).
++
++
++run_tests(Mods, Options) ->
++    %% disable error_logger to reduce noise in stdout
++    error_logger:tty(false),
++    case eunit:test(Mods, Options) of
++        error -> erlang:halt(1);
++        _ -> ok
++    end.
++
++
++init_code_path() ->
++    Paths = [
++        "couchdb",
++        "ejson",
++        "erlang-oauth",
++        "ibrowse",
++        "mochiweb",
++        "snappy"
++    ],
++    lists:foreach(fun(Name) ->
++        code:add_patha(filename:join([?BUILDDIR, "src", Name]))
++    end, Paths).
+diff --git a/test/couchdb/test_request.erl b/test/couchdb/test_request.erl
+new file mode 100644
+index 0000000..68e4956
+--- /dev/null
++++ b/test/couchdb/test_request.erl
+@@ -0,0 +1,75 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(test_request).
++
++-export([get/1, get/2, get/3]).
++-export([put/2, put/3]).
++-export([options/1, options/2, options/3]).
++-export([request/3, request/4]).
++
++get(Url) ->
++    request(get, Url, []).
++
++get(Url, Headers) ->
++    request(get, Url, Headers).
++get(Url, Headers, Opts) ->
++    request(get, Url, Headers, [], Opts).
++
++
++put(Url, Body) ->
++    request(put, Url, [], Body).
++
++put(Url, Headers, Body) ->
++    request(put, Url, Headers, Body).
++
++
++options(Url) ->
++    request(options, Url, []).
++
++options(Url, Headers) ->
++    request(options, Url, Headers).
++
++options(Url, Headers, Opts) ->
++    request(options, Url, Headers, [], Opts).
++
++
++request(Method, Url, Headers) ->
++    request(Method, Url, Headers, []).
++
++request(Method, Url, Headers, Body) ->
++    request(Method, Url, Headers, Body, [], 3).
++
++request(Method, Url, Headers, Body, Opts) ->
++    request(Method, Url, Headers, Body, Opts, 3).
++
++request(_Method, _Url, _Headers, _Body, _Opts, 0) ->
++    {error, request_failed};
++request(Method, Url, Headers, Body, Opts, N) ->
++    case code:is_loaded(ibrowse) of
++        false ->
++            {ok, _} = ibrowse:start();
++        _ ->
++            ok
++    end,
++    case ibrowse:send_req(Url, Headers, Method, Body, Opts) of
++        {ok, Code0, RespHeaders, RespBody0} ->
++            Code = list_to_integer(Code0),
++            RespBody = iolist_to_binary(RespBody0),
++            {ok, Code, RespHeaders, RespBody};
++        {error, {'EXIT', {normal, _}}} ->
++            % Connection closed right after a successful request that
++            % used the same connection.
++            request(Method, Url, Headers, Body, N - 1);
++        Error ->
++            Error
++    end.
+diff --git a/test/couchdb/test_web.erl b/test/couchdb/test_web.erl
+new file mode 100644
+index 0000000..1de2cd1
+--- /dev/null
++++ b/test/couchdb/test_web.erl
+@@ -0,0 +1,112 @@
++% Licensed under the Apache License, Version 2.0 (the "License"); you may not
++% use this file except in compliance with the License. You may obtain a copy of
++% the License at
++%
++%   http://www.apache.org/licenses/LICENSE-2.0
++%
++% Unless required by applicable law or agreed to in writing, software
++% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++% License for the specific language governing permissions and limitations under
++% the License.
++
++-module(test_web).
++-behaviour(gen_server).
++
++-include("couch_eunit.hrl").
++
++-export([start_link/0, stop/0, loop/1, get_port/0, set_assert/1, check_last/0]).
++-export([init/1, terminate/2, code_change/3]).
++-export([handle_call/3, handle_cast/2, handle_info/2]).
++
++-define(SERVER, test_web_server).
++-define(HANDLER, test_web_handler).
++-define(DELAY, 500).
++
++start_link() ->
++    gen_server:start({local, ?HANDLER}, ?MODULE, [], []),
++    mochiweb_http:start([
++        {name, ?SERVER},
++        {loop, {?MODULE, loop}},
++        {port, 0}
++    ]).
++
++loop(Req) ->
++    %?debugFmt("Handling request: ~p", [Req]),
++    case gen_server:call(?HANDLER, {check_request, Req}) of
++        {ok, RespInfo} ->
++            {ok, Req:respond(RespInfo)};
++        {raw, {Status, Headers, BodyChunks}} ->
++            Resp = Req:start_response({Status, Headers}),
++            lists:foreach(fun(C) -> Resp:send(C) end, BodyChunks),
++            erlang:put(mochiweb_request_force_close, true),
++            {ok, Resp};
++        {chunked, {Status, Headers, BodyChunks}} ->
++            Resp = Req:respond({Status, Headers, chunked}),
++            timer:sleep(?DELAY),
++            lists:foreach(fun(C) -> Resp:write_chunk(C) end, BodyChunks),
++            Resp:write_chunk([]),
++            {ok, Resp};
++        {error, Reason} ->
++            ?debugFmt("Error: ~p", [Reason]),
++            Body = lists:flatten(io_lib:format("Error: ~p", [Reason])),
++            {ok, Req:respond({200, [], Body})}
++    end.
++
++get_port() ->
++    mochiweb_socket_server:get(?SERVER, port).
++
++set_assert(Fun) ->
++    ?assertEqual(ok, gen_server:call(?HANDLER, {set_assert, Fun})).
++
++check_last() ->
++    gen_server:call(?HANDLER, last_status).
++
++init(_) ->
++    {ok, nil}.
++
++terminate(_Reason, _State) ->
++    ok.
++
++stop() ->
++    gen_server:cast(?SERVER, stop).
++
++
++handle_call({check_request, Req}, _From, State) when is_function(State, 1) ->
++    Resp2 = case (catch State(Req)) of
++        {ok, Resp} ->
++            {reply, {ok, Resp}, was_ok};
++        {raw, Resp} ->
++            {reply, {raw, Resp}, was_ok};
++        {chunked, Resp} ->
++            {reply, {chunked, Resp}, was_ok};
++        Error ->
++            {reply, {error, Error}, not_ok}
++    end,
++    Req:cleanup(),
++    Resp2;
++handle_call({check_request, _Req}, _From, _State) ->
++    {reply, {error, no_assert_function}, not_ok};
++handle_call(last_status, _From, State) when is_atom(State) ->
++    {reply, State, nil};
++handle_call(last_status, _From, State) ->
++    {reply, {error, not_checked}, State};
++handle_call({set_assert, Fun}, _From, nil) ->
++    {reply, ok, Fun};
++handle_call({set_assert, _}, _From, State) ->
++    {reply, {error, assert_function_set}, State};
++handle_call(Msg, _From, State) ->
++    {reply, {ignored, Msg}, State}.
++
++handle_cast(stop, State) ->
++    {stop, normal, State};
++handle_cast(Msg, State) ->
++    ?debugFmt("Ignoring cast message: ~p", [Msg]),
++    {noreply, State}.
++
++handle_info(Msg, State) ->
++    ?debugFmt("Ignoring info message: ~p", [Msg]),
++    {noreply, State}.
++
++code_change(_OldVsn, State, _Extra) ->
++    {ok, State}.
+diff --git a/test/etap/run.tpl b/test/etap/run.tpl
+deleted file mode 100644
+index d6d6dbe..0000000
+--- a/test/etap/run.tpl
++++ /dev/null
+@@ -1,32 +0,0 @@
+-#!/bin/sh -e
+-
+-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+-# use this file except in compliance with the License. You may obtain a copy of
+-# the License at
+-#
+-#   http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+-# License for the specific language governing permissions and limitations under
+-# the License.
+-
+-SRCDIR="%abs_top_srcdir%"
+-BUILDDIR="%abs_top_builddir%"
+-export ERL_LIBS="$BUILDDIR/src/:$ERL_LIBS"
+-export ERL_FLAGS="$ERL_FLAGS -pa $BUILDDIR/test/etap/"
+-
+-if test $# -eq 1; then
+-    OPTS=""
+-    TGT=$1
+-else
+-    OPTS=$1
+-    TGT=$2
+-fi
+-
+-if test -f $TGT; then
+-    prove $OPTS $TGT
+-else
+-    prove $OPTS $TGT/*.t
+-fi
diff --git a/couchdb.spec b/couchdb.spec
index d553210..d65a324 100644
--- a/couchdb.spec
+++ b/couchdb.spec
@@ -7,7 +7,7 @@
 
 Name:           couchdb
 Version:        1.6.0
-Release:        12%{?dist}
+Release:        13%{?dist}
 Summary:        A document database server, accessible via a RESTful JSON API
 
 Group:          Applications/Databases
@@ -19,6 +19,8 @@ Source2:        %{name}.init
 Source3:        %{name}.service
 Source4:        %{name}.tmpfiles.conf
 Source5:        %{name}.temporary.sh
+# FIXME remove as soon as eunit tests will be merged upstream
+Source6:        %{name}-tests-blobs.tar
 Patch1:         couchdb-0001-Do-not-gzip-doc-files-and-do-not-install-installatio.patch
 Patch2:         couchdb-0002-More-directories-to-search-for-place-for-init-script.patch
 Patch3:         couchdb-0003-Install-into-erllibdir-by-default.patch
@@ -32,6 +34,7 @@ Patch10:        couchdb-0010-Use-_DEFAULT_SOURCE-instead-of-obsolete-_BSD_SOURCE
 Patch11:        couchdb-0011-Silence-redundant-logging-to-stdout-stderr.patch
 Patch12:        couchdb-0012-Expand-.d-directories-in-erlang.patch
 Patch13:        couchdb-0013-Add-systemd-notification-support.patch
+Patch14:	couchdb-0014-Add-run-script-to-execute-eunit-tests.patch
 
 BuildRequires:  autoconf
 BuildRequires:    autoconf-archive
@@ -39,9 +42,7 @@ BuildRequires:  automake
 BuildRequires:  libtool
 BuildRequires:    curl-devel >= 7.18.0
 BuildRequires:    erlang-erts >= R13B
-# FIXME - this time CouchDB bundled a copy of etap which is heavily different
-# from the one we're shipping
-#BuildRequires:    erlang-etap
+BuildRequires:    erlang-eunit >= R15B
 BuildRequires:    erlang-ibrowse >= 4.0.1
 BuildRequires:    erlang-mochiweb
 BuildRequires:    erlang-oauth >= 1.3.0
@@ -50,8 +51,6 @@ BuildRequires:    erlang-snappy
 BuildRequires:    help2man
 BuildRequires:    js-devel
 BuildRequires:    libicu-devel
-# For /usr/bin/prove
-BuildRequires:    perl(Test::Harness)
 
 Requires:    erlang-crypto%{?_isa}
 # Error:erlang(erlang:max/2) in R12B and earlier
@@ -116,20 +115,24 @@ JavaScript acting as the default view definition language.
 %patch11 -p1 -b .redundant_logging
 %patch12 -p1 -b .expands_d
 %patch13 -p1 -b .sd_notify
+%patch14 -p1 -b .no_etap
+tar xvf %{SOURCE6}
 
 #gzip -d -k ./share/doc/build/latex/CouchDB.pdf.gz
 
 # Remove bundled libraries
 rm -rf src/erlang-oauth
-# FIXME - this time CouchDB bundled a copy of etap which is heavily different
-# from the one we're shipping
-#rm -rf src/etap
 rm -rf src/ibrowse
 rm -rf src/mochiweb
 rm -rf src/snappy
+rm -rf src/etap
+rm -rf test/etap
 
-# More verbose tests
-sed -i -e "s,prove,prove -v,g" test/etap/run.tpl
+# FIXME remove as soon as eunit tests will be merged upstream
+chmod +x test/couchdb/fixtures/os_daemon_looper.escript
+chmod +x test/couchdb/fixtures/*.sh
+# This is intentional - this daemon shouldn't start
+chmod -x test/couchdb/fixtures/os_daemon_bad_perm.sh
 
 
 %build
@@ -167,13 +170,9 @@ rm -rf %{buildroot}%{_defaultdocdir}
 # Remove unneeded info-files
 rm -rf %{buildroot}%{_datadir}/info/
 
-# FIXME - this time CouchDB bundled a copy of etap which is heavily different
-# from the one we're shipping
-rm -rf %{buildroot}%{_libdir}/erlang/lib/etap/
-
 
 %check
-make check
+make check-eunit
 
 
 %pre
@@ -243,6 +242,9 @@ fi
 
 
 %changelog
+* Fri Aug 29 2014 Peter Lemenkov <lemenkov at gmail.com> - 1.6.0-13
+- Kill fragile etap tests in favor of eunit-based test-suite
+
 * Thu Aug 28 2014 Peter Lemenkov <lemenkov at gmail.com> - 1.6.0-12
 - Rebuild with Erlang 17.2.1
 
diff --git a/sources b/sources
index b7f46e1..576dd4c 100644
--- a/sources
+++ b/sources
@@ -1 +1,2 @@
 f986bbfe58051c3b186d520967237eea  apache-couchdb-1.6.0.tar.gz
+b590941d0eb4230317c119fa79fbfa87  couchdb-tests-blobs.tar


More information about the scm-commits mailing list