You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@solr.apache.org by ct...@apache.org on 2021/06/30 18:57:44 UTC

[solr] branch main updated: SOLR-14444: Ref Guide re-organization (#190)

This is an automated email from the ASF dual-hosted git repository.

ctargett pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/solr.git


The following commit(s) were added to refs/heads/main by this push:
     new 73bb565  SOLR-14444: Ref Guide re-organization (#190)
73bb565 is described below

commit 73bb5654c525607462eb8e265993ada02cd01b6d
Author: Cassandra Targett <ct...@apache.org>
AuthorDate: Wed Jun 30 13:57:36 2021 -0500

    SOLR-14444: Ref Guide re-organization (#190)
---
 solr/solr-ref-guide/build.gradle                   |   9 +-
 solr/solr-ref-guide/src/_config.yml.template       |   5 +-
 solr/solr-ref-guide/src/_includes/head.html        |   2 -
 solr/solr-ref-guide/src/_includes/topnav.html      |  16 +-
 .../src/_templates/example.html.slim               |   3 -
 solr/solr-ref-guide/src/_templates/helpers.rb      |   4 +-
 solr/solr-ref-guide/src/_templates/pass.html.slim  |   1 -
 .../src/_templates/section.html.slim               |  13 -
 solr/solr-ref-guide/src/_templates/table.html.slim |   4 +-
 .../src/_templates/thematic_break.html.slim        |   1 -
 solr/solr-ref-guide/src/a-quick-overview.adoc      |  45 -
 solr/solr-ref-guide/src/about-filters.adoc         |  63 --
 solr/solr-ref-guide/src/about-this-guide.adoc      |   2 +-
 solr/solr-ref-guide/src/about-tokenizers.adoc      |  63 --
 ...lection-aliasing.adoc => alias-management.adoc} | 183 ++--
 solr/solr-ref-guide/src/aliases.adoc               | 362 ++++----
 solr/solr-ref-guide/src/analysis-screen.adoc       |  54 +-
 .../src/analytics-expression-sources.adoc          |   2 +-
 solr/solr-ref-guide/src/analytics.adoc             |   2 +-
 solr/solr-ref-guide/src/analyzers.adoc             |  53 +-
 solr/solr-ref-guide/src/audit-logging.adoc         | 183 +++-
 .../authentication-and-authorization-plugins.adoc  | 112 ++-
 ...-restoring-backups.adoc => backup-restore.adoc} | 174 +++-
 .../src/basic-authentication-plugin.adoc           |  90 +-
 .../src/block-join-query-parser.adoc               | 210 +++++
 solr/solr-ref-guide/src/caches-warming.adoc        | 326 ++++++++
 .../src/cert-authentication-plugin.adoc            |   7 +-
 solr/solr-ref-guide/src/charfilterfactories.adoc   |  29 +-
 .../src/choosing-an-output-format.adoc             |  23 -
 solr/solr-ref-guide/src/client-api-lineup.adoc     |  19 -
 solr/solr-ref-guide/src/client-apis.adoc           |  57 +-
 solr/solr-ref-guide/src/cloud-screens.adoc         |  28 +-
 .../src/cluster-node-management.adoc               | 107 ++-
 solr/solr-ref-guide/src/cluster-types.adoc         | 111 +++
 .../src/collapse-and-expand-results.adoc           |   5 +-
 solr/solr-ref-guide/src/collection-management.adoc | 379 +++++----
 .../src/collection-specific-tools.adoc             |  43 -
 solr/solr-ref-guide/src/collections-api.adoc       |   3 +-
 .../solr-ref-guide/src/collections-core-admin.adoc |   5 +-
 .../combining-distribution-and-replication.adoc    |  33 -
 .../src/commits-transaction-logs.adoc              | 321 ++++++++
 .../src/common-query-parameters.adoc               | 147 ++--
 solr/solr-ref-guide/src/config-api.adoc            |  86 +-
 solr/solr-ref-guide/src/config-sets.adoc           |  28 +-
 solr/solr-ref-guide/src/configsets-api.adoc        |  50 +-
 solr/solr-ref-guide/src/configuration-apis.adoc    |  27 +-
 solr/solr-ref-guide/src/configuration-files.adoc   | 107 +++
 solr/solr-ref-guide/src/configuration-guide.adoc   |  74 ++
 solr/solr-ref-guide/src/configuring-logging.adoc   |  95 ++-
 ...-of-solr-xml.adoc => configuring-solr-xml.adoc} | 119 ++-
 .../src/configuring-solrconfig-xml.adoc            | 204 +----
 solr/solr-ref-guide/src/content-streams.adoc       |  21 +-
 solr/solr-ref-guide/src/controlling-results.adoc   |  58 ++
 .../src/{copying-fields.adoc => copy-fields.adoc}  |  29 +-
 solr/solr-ref-guide/src/core-discovery.adoc        | 131 +++
 solr/solr-ref-guide/src/core-specific-tools.adoc   |  47 --
 solr/solr-ref-guide/src/coreadmin-api.adoc         | 191 +++--
 solr/solr-ref-guide/src/css/decoration.css         |  68 +-
 solr/solr-ref-guide/src/css/navs.css               |   4 +-
 solr/solr-ref-guide/src/css/ref-guide.css          |  58 +-
 ...e-rates.adoc => currencies-exchange-rates.adoc} |  41 +-
 ...g-with-dates.adoc => date-formatting-math.adoc} |  43 +-
 solr/solr-ref-guide/src/de-duplication.adoc        |  34 +-
 .../src/defining-core-properties.adoc              |  95 ---
 .../src/deployment-and-operations.adoc             |  40 -
 solr/solr-ref-guide/src/deployment-guide.adoc      | 100 +++
 ...-query-parser.adoc => dismax-query-parser.adoc} |  15 +-
 solr/solr-ref-guide/src/distributed-requests.adoc  | 216 -----
 .../distributed-search-with-index-sharding.adoc    | 178 ----
 ...{solr-tracing.adoc => distributed-tracing.adoc} |   9 +-
 solr/solr-ref-guide/src/docker-faq.adoc            |   2 +-
 solr/solr-ref-guide/src/document-analysis.adoc     |  61 ++
 ...t-documents.adoc => document-transformers.adoc} |  16 +-
 .../src/documents-fields-and-schema-design.adoc    |  44 -
 .../src/documents-fields-schema-design.adoc        |  99 +++
 solr/solr-ref-guide/src/documents-screen.adoc      |   8 +-
 solr/solr-ref-guide/src/docvalues.adoc             |  11 +-
 solr/solr-ref-guide/src/dynamic-fields.adoc        |  12 +-
 ...query-parser.adoc => edismax-query-parser.adoc} |  71 +-
 solr/solr-ref-guide/src/enabling-ssl.adoc          |  46 +-
 solr/solr-ref-guide/src/enhancing-queries.adoc     |  58 ++
 ...king-with-enum-fields.adoc => enum-fields.adoc} |  11 +-
 solr/solr-ref-guide/src/errata.adoc                |  25 -
 solr/solr-ref-guide/src/exporting-result-sets.adoc |  37 +-
 ...rocesses.adoc => external-files-processes.adoc} |  73 +-
 solr/solr-ref-guide/src/faceting.adoc              |  14 +-
 .../src/field-properties-by-use-case.adoc          |   4 +-
 .../src/field-type-definitions-and-properties.adoc |  43 +-
 .../src/field-types-included-with-solr.adoc        |  26 +-
 solr/solr-ref-guide/src/field-types.adoc           |  44 +
 .../src/fields-and-schema-design.adoc              |  42 +
 .../src/{defining-fields.adoc => fields.adoc}      |  26 +-
 solr/solr-ref-guide/src/files-screen.adoc          |  37 -
 .../src/{filter-descriptions.adoc => filters.adoc} |  64 +-
 solr/solr-ref-guide/src/function-queries.adoc      |  10 +-
 solr/solr-ref-guide/src/further-assistance.adoc    |  21 -
 solr/solr-ref-guide/src/getting-started.adoc       |  32 +-
 solr/solr-ref-guide/src/graph-traversal.adoc       |   2 +-
 solr/solr-ref-guide/src/graph.adoc                 | 112 ++-
 .../src/hadoop-authentication-plugin.adoc          | 121 ++-
 solr/solr-ref-guide/src/highlighting.adoc          |   6 +-
 solr/solr-ref-guide/src/how-solrcloud-works.adoc   |  54 --
 .../a-quick-overview/sample-client-app-arch.png    | Bin 52100 -> 0 bytes
 .../a-quick-overview/sample-client-app-arch.svg    | 488 -----------
 .../analysis_compare_0.png                         | Bin
 .../analysis_compare_1.png                         | Bin
 .../analysis_compare_2.png                         | Bin
 .../analysis_compare_3.png                         | Bin
 .../analysis_compare_4.png                         | Bin
 .../src/images/analysis-screen/analysis_normal.png | Bin 57653 -> 234822 bytes
 .../images/analysis-screen/analysis_verbose.png    | Bin 66742 -> 0 bytes
 .../distributed-replication.png                    | Bin 38896 -> 0 bytes
 .../files-screen.png                               | Bin
 .../level_menu.png                                 | Bin
 .../{logging => configuring-logging}/logging.png   | Bin
 .../query-request-tracing.png                      | Bin
 .../leader-follower-replication.png                | Bin 18224 -> 0 bytes
 .../segments_info.png                              | Bin
 .../sample-pdf-query.png                           | Bin
 .../SolrAdminDashboard.png                         | Bin
 .../dbvisualizer_solrjdbc_1.png                    | Bin
 .../dbvisualizer_solrjdbc_11.png                   | Bin
 .../dbvisualizer_solrjdbc_12.png                   | Bin
 .../dbvisualizer_solrjdbc_13.png                   | Bin
 .../dbvisualizer_solrjdbc_14.png                   | Bin
 .../dbvisualizer_solrjdbc_15.png                   | Bin
 .../dbvisualizer_solrjdbc_16.png                   | Bin
 .../dbvisualizer_solrjdbc_17.png                   | Bin
 .../dbvisualizer_solrjdbc_19.png                   | Bin
 .../dbvisualizer_solrjdbc_2.png                    | Bin
 .../dbvisualizer_solrjdbc_20.png                   | Bin
 .../dbvisualizer_solrjdbc_3.png                    | Bin
 .../dbvisualizer_solrjdbc_4.png                    | Bin
 .../dbvisualizer_solrjdbc_5.png                    | Bin
 .../dbvisualizer_solrjdbc_6.png                    | Bin
 .../dbvisualizer_solrjdbc_7.png                    | Bin
 .../dbvisualizer_solrjdbc_9.png                    | Bin
 .../squirrelsql_solrjdbc_1.png                     | Bin
 .../squirrelsql_solrjdbc_10.png                    | Bin
 .../squirrelsql_solrjdbc_11.png                    | Bin
 .../squirrelsql_solrjdbc_12.png                    | Bin
 .../squirrelsql_solrjdbc_13.png                    | Bin
 .../squirrelsql_solrjdbc_14.png                    | Bin
 .../squirrelsql_solrjdbc_15.png                    | Bin
 .../squirrelsql_solrjdbc_2.png                     | Bin
 .../squirrelsql_solrjdbc_3.png                     | Bin
 .../squirrelsql_solrjdbc_4.png                     | Bin
 .../squirrelsql_solrjdbc_5.png                     | Bin
 .../squirrelsql_solrjdbc_7.png                     | Bin
 .../squirrelsql_solrjdbc_9.png                     | Bin
 .../zeppelin_solrjdbc_1.png                        | Bin
 .../zeppelin_solrjdbc_2.png                        | Bin
 .../zeppelin_solrjdbc_3.png                        | Bin
 .../zeppelin_solrjdbc_4.png                        | Bin
 .../zeppelin_solrjdbc_5.png                        | Bin
 .../zeppelin_solrjdbc_6.png                        | Bin
 .../javaproperties.png                             | Bin
 .../grafana-solr-dashboard.png                     | Bin
 .../prometheus-solr-ping.png                       | Bin
 .../solr-exporter-diagram.png                      | Bin
 .../images/running-solr/solr34_responseHeader.png  | Bin 269440 -> 0 bytes
 .../cnet-faceting.png}                             | Bin
 .../search-process.png}                            | Bin
 .../Assistance.png                                 | Bin
 .../collection_dashboard.png                       | Bin
 .../core_dashboard.png                             | Bin
 .../dashboard.png                                  | Bin
 .../login.png                                      | Bin
 .../aws-key.png                                    | Bin
 .../aws-security-create.png                        | Bin
 .../aws-security-edit.png                          | Bin
 .../replication.png                                | Bin
 .../user-managed-replication.png                   | Bin 0 -> 16752 bytes
 .../src/implicit-requesthandlers.adoc              |  27 +-
 ...-solrconfig.adoc => index-location-format.adoc} |   6 +-
 solr/solr-ref-guide/src/index-replication.adoc     | 384 ---------
 .../solr-ref-guide/src/index-segments-merging.adoc | 331 ++++++++
 solr/solr-ref-guide/src/index.adoc                 | 113 +--
 .../src/indexconfig-in-solrconfig.adoc             | 242 ------
 .../src/indexing-and-basic-data-operations.adoc    |  56 --
 .../src/indexing-data-operations.adoc              |  55 ++
 .../src/indexing-nested-documents.adoc             |  59 +-
 ...ng-apache-tika.adoc => indexing-with-tika.adoc} |  11 +-
 ...ers.adoc => indexing-with-update-handlers.adoc} |  14 +-
 ...itparams-in-solrconfig.adoc => initparams.adoc} |  31 +-
 .../src/installation-deployment.adoc               |  45 +
 solr/solr-ref-guide/src/installing-solr.adoc       |   8 +-
 .../src/introduction-to-client-apis.adoc           |  29 -
 .../introduction-to-scaling-and-distribution.adoc  |  43 -
 solr/solr-ref-guide/src/introduction.adoc          |  43 +
 solr/solr-ref-guide/src/java-properties.adoc       |  22 -
 .../src/{using-javascript.adoc => javascript.adoc} |   9 +-
 solr/solr-ref-guide/src/jdbc-dbvisualizer.adoc     | 126 +++
 ...-python-jython.adoc => jdbc-python-jython.adoc} |   8 +-
 .../src/{solr-jdbc-r.adoc => jdbc-r.adoc}          |   2 +-
 ...r-jdbc-squirrel-sql.adoc => jdbc-squirrel.adoc} |  37 +-
 solr/solr-ref-guide/src/jdbc-zeppelin.adoc         |  91 ++
 ...using-jmx-with-solr.adoc => jmx-with-solr.adoc} |  19 +-
 solr/solr-ref-guide/src/join-query-parser.adoc     | 237 ++++++
 solr/solr-ref-guide/src/js/customscripts.js        |  20 +-
 solr/solr-ref-guide/src/json-facet-api.adoc        |   4 +-
 .../src/json-faceting-domain-changes.adoc          |   6 +-
 solr/solr-ref-guide/src/json-query-dsl.adoc        |   4 +-
 solr/solr-ref-guide/src/jvm-settings.adoc          |  16 +-
 .../src/jwt-authentication-plugin.adoc             |  42 +-
 .../src/kerberos-authentication-plugin.adoc        | 269 ++++--
 solr/solr-ref-guide/src/language-analysis.adoc     | 250 ++++--
 ...uring-indexing.adoc => language-detection.adoc} |  42 +-
 solr/solr-ref-guide/src/learning-to-rank.adoc      |   4 +-
 .../src/legacy-scaling-and-distribution.adoc       |  32 -
 solr/solr-ref-guide/src/libs.adoc                  |   2 +-
 .../src/local-parameters-in-queries.adoc           |  81 --
 solr/solr-ref-guide/src/local-params.adoc          |  89 ++
 solr/solr-ref-guide/src/logging.adoc               |  35 -
 solr/solr-ref-guide/src/logs.adoc                  |   3 +-
 ...to-solr-6.adoc => major-changes-in-solr-6.adoc} |  10 +-
 .../src/major-changes-in-solr-7.adoc               |  20 +-
 .../src/major-changes-in-solr-8.adoc               |  27 +-
 .../src/major-changes-in-solr-9.adoc               |   8 +-
 solr/solr-ref-guide/src/managed-resources.adoc     |  55 +-
 solr/solr-ref-guide/src/math-expressions.adoc      |  26 +-
 solr/solr-ref-guide/src/math-start.adoc            |  44 +-
 solr/solr-ref-guide/src/matrix-math.adoc           |   9 +-
 solr/solr-ref-guide/src/mbean-request-handler.adoc |  15 +-
 solr/solr-ref-guide/src/merging-indexes.adoc       |  47 --
 solr/solr-ref-guide/src/metrics-reporting.adoc     | 165 ++--
 solr/solr-ref-guide/src/monitoring-solr.adoc       |  59 +-
 ...=> monitoring-with-prometheus-and-grafana.adoc} | 125 +--
 solr/solr-ref-guide/src/morelikethis.adoc          |  20 +-
 .../src/near-real-time-searching.adoc              |  88 --
 solr/solr-ref-guide/src/numerical-analysis.adoc    | 209 ++---
 solr/solr-ref-guide/src/other-parsers.adoc         | 710 +++++-----------
 solr/solr-ref-guide/src/other-schema-elements.adoc |  95 ---
 ...view-of-documents-fields-and-schema-design.adoc |  61 --
 .../src/overview-of-searching-in-solr.adoc         |  59 --
 .../src/overview-of-the-solr-admin-ui.adoc         |  83 --
 solr/solr-ref-guide/src/pagination-of-results.adoc |  41 +-
 .../solr-ref-guide/src/parallel-sql-interface.adoc | 223 +++--
 solr/solr-ref-guide/src/parameter-reference.adoc   |  56 --
 ...ocuments.adoc => partial-document-updates.adoc} | 118 ++-
 .../src/performance-statistics-reference.adoc      |  14 +-
 solr/solr-ref-guide/src/phonetic-matching.adoc     |  42 +-
 solr/solr-ref-guide/src/ping.adoc                  |   2 +-
 solr/solr-ref-guide/src/post-tool.adoc             |   2 +-
 solr/solr-ref-guide/src/property-substitution.adoc | 146 ++++
 .../src/putting-the-pieces-together.adoc           |  73 --
 .../src/{using-python.adoc => python.adoc}         |  11 +-
 ...mponent.adoc => query-elevation-component.adoc} |  37 +-
 solr/solr-ref-guide/src/query-guide.adoc           |  72 ++
 solr/solr-ref-guide/src/query-re-ranking.adoc      |  28 +-
 solr/solr-ref-guide/src/query-screen.adoc          |  10 +-
 .../src/query-settings-in-solrconfig.adoc          | 274 ------
 .../src/query-syntax-and-parsers.adoc              |  65 ++
 .../src/query-syntax-and-parsing.adoc              |  31 -
 solr/solr-ref-guide/src/realtime-get.adoc          |  11 +-
 solr/solr-ref-guide/src/reindexing.adoc            |   4 +-
 solr/solr-ref-guide/src/replica-management.adoc    | 137 ++-
 solr/solr-ref-guide/src/replication-screen.adoc    |  33 -
 .../solr-ref-guide/src/request-parameters-api.adoc |  48 +-
 ...r-in-solrconfig.adoc => requestdispatcher.adoc} | 111 ++-
 ....adoc => requesthandlers-searchcomponents.adoc} |  46 +-
 solr/solr-ref-guide/src/resource-loading.adoc      |  12 +-
 solr/solr-ref-guide/src/response-writers.adoc      |  70 +-
 solr/solr-ref-guide/src/result-clustering.adoc     | 123 ++-
 solr/solr-ref-guide/src/result-grouping.adoc       |  72 +-
 .../src/{using-solr-from-ruby.adoc => ruby.adoc}   |   5 +-
 .../src/rule-based-authorization-plugin.adoc       | 360 ++++++--
 solr/solr-ref-guide/src/running-your-analyzer.adoc |  56 --
 solr/solr-ref-guide/src/scaling-solr.adoc          |  49 ++
 solr/solr-ref-guide/src/schema-api.adoc            | 205 +++--
 solr/solr-ref-guide/src/schema-elements.adoc       | 158 ++++
 .../schema-factory-definition-in-solrconfig.adoc   |  91 --
 solr/solr-ref-guide/src/schema-factory.adoc        | 121 +++
 solr/solr-ref-guide/src/schema-indexing-guide.adoc |  63 ++
 solr/solr-ref-guide/src/schemaless-mode.adoc       |  12 +-
 solr/solr-ref-guide/src/searching-in-solr.adoc     |  89 ++
 .../src/searching-nested-documents.adoc            |  56 +-
 solr/solr-ref-guide/src/searching.adoc             |  88 --
 solr/solr-ref-guide/src/securing-solr.adoc         |  70 +-
 solr/solr-ref-guide/src/segments-info.adoc         |  23 -
 solr/solr-ref-guide/src/shard-management.adoc      |   4 +-
 solr/solr-ref-guide/src/solr-admin-ui.adoc         | 167 ++++
 solr/solr-ref-guide/src/solr-concepts.adoc         |  40 +
 .../src/solr-configuration-files.adoc              |  78 --
 .../src/solr-control-script-reference.adoc         | 314 ++++---
 .../src/solr-cores-and-solr-xml.adoc               |  36 -
 solr/solr-ref-guide/src/solr-field-types.adoc      |  38 -
 solr/solr-ref-guide/src/solr-glossary.adoc         |  83 +-
 ...ing-solr-in-docker.adoc => solr-in-docker.adoc} |   5 +-
 ...on-to-solr-indexing.adoc => solr-indexing.adoc} |  25 +-
 .../src/solr-jdbc-apache-zeppelin.adoc             |  79 --
 .../solr-ref-guide/src/solr-jdbc-dbvisualizer.adoc | 121 ---
 ...running-solr-on-hdfs.adoc => solr-on-hdfs.adoc} |  75 +-
 solr/solr-ref-guide/src/solr-plugins.adoc          |  26 +-
 solr/solr-ref-guide/src/solr-schema.adoc           |  40 +
 solr/solr-ref-guide/src/solr-tutorial.adoc         | 917 +--------------------
 solr/solr-ref-guide/src/solr-upgrade-notes.adoc    |  36 +-
 solr/solr-ref-guide/src/solrcloud-clusters.adoc    |  67 ++
 .../solrcloud-configuration-and-parameters.adoc    |  32 -
 .../src/solrcloud-distributed-requests.adoc        | 454 ++++++++++
 ...solrcloud-query-routing-and-read-tolerance.adoc | 134 ---
 .../solrcloud-recoveries-and-write-tolerance.adoc  |   2 +-
 solr/solr-ref-guide/src/solrcloud-resilience.adoc  |  26 -
 ...lrcloud.adoc => solrcloud-shards-indexing.adoc} |   4 +-
 .../solrcloud-with-legacy-configuration-files.adoc |   2 +-
 solr/solr-ref-guide/src/solrcloud.adoc             |  45 -
 .../src/{using-solrj.adoc => solrj.adoc}           |  85 +-
 solr/solr-ref-guide/src/spatial-search.adoc        | 199 +++--
 solr/solr-ref-guide/src/spell-checking.adoc        |   8 +-
 ...uery-parser.adoc => standard-query-parser.adoc} |  26 +-
 ...e-stats-component.adoc => stats-component.adoc} |  60 +-
 solr/solr-ref-guide/src/stream-api.adoc            |  23 +-
 .../src/stream-decorator-reference.adoc            | 289 +++++--
 .../src/stream-evaluator-reference.adoc            | 404 +++++----
 solr/solr-ref-guide/src/streaming-expressions.adoc |  30 +-
 solr/solr-ref-guide/src/suggester.adoc             |   6 +-
 ...-requirements.adoc => system-requirements.adoc} |  38 +-
 ...the-tagger-handler.adoc => tagger-handler.adoc} |   4 +-
 .../src/taking-solr-to-production.adoc             | 187 +++--
 ...r-component.adoc => term-vector-component.adoc} |   2 +-
 ...e-terms-component.adoc => terms-component.adoc} |   4 +-
 .../src/the-well-configured-solr-instance.adoc     |  48 --
 solr/solr-ref-guide/src/thread-dump.adoc           |  11 +-
 solr/solr-ref-guide/src/tokenizers.adoc            |  27 +-
 .../src/transforming-and-indexing-custom-json.adoc |  20 +-
 ...s-solrcloud-tutorial.adoc => tutorial-aws.adoc} |  82 +-
 solr/solr-ref-guide/src/tutorial-diy.adoc          | 107 +++
 solr/solr-ref-guide/src/tutorial-films.adoc        | 458 ++++++++++
 ...with-solrcloud.adoc => tutorial-solrcloud.adoc} |   9 +-
 solr/solr-ref-guide/src/tutorial-techproducts.adoc | 507 ++++++++++++
 ...rstanding-analyzers-tokenizers-and-filters.adoc |  54 --
 .../src/update-request-processors.adoc             | 173 ++--
 .../src/updatehandlers-in-solrconfig.adoc          | 157 ----
 .../src/upgrading-a-solr-cluster.adoc              |  39 +-
 ...ing-started.adoc => user-managed-clusters.adoc} |  26 +-
 .../src/user-managed-distributed-search.adoc       | 117 +++
 .../src/user-managed-index-replication.adoc        | 519 ++++++++++++
 ...ing-the-solr-administration-user-interface.adoc |  45 -
 solr/solr-ref-guide/src/v2-api.adoc                |  12 +-
 solr/solr-ref-guide/src/variables.adoc             |  22 +-
 solr/solr-ref-guide/src/vector-math.adoc           |  20 +-
 .../src/zookeeper-access-control.adoc              | 111 ++-
 ...eeper-ensemble.adoc => zookeeper-ensemble.adoc} | 101 ++-
 ...n-files.adoc => zookeeper-file-management.adoc} |   6 +-
 ...ine-utilities.adoc => zookeeper-utilities.adoc} |   6 +-
 345 files changed, 12807 insertions(+), 9512 deletions(-)

diff --git a/solr/solr-ref-guide/build.gradle b/solr/solr-ref-guide/build.gradle
index 00b0e98..67f19c6 100644
--- a/solr/solr-ref-guide/build.gradle
+++ b/solr/solr-ref-guide/build.gradle
@@ -22,7 +22,7 @@ buildscript {
     }
 
     dependencies {
-        classpath "org.asciidoctor:asciidoctorj:1.6.2"
+        classpath "org.asciidoctor:asciidoctorj:2.5.1"
     }
 }
 
@@ -68,14 +68,14 @@ dependencies {
     depVer('org.apache.zookeeper:zookeeper')
 
     // jekyll dependencies
-    gems 'rubygems:jekyll:3.5.2'
+    gems 'rubygems:jekyll:4.2.0'
     gems 'rubygems:jekyll-asciidoc:3.0.0'
 
     // don't know why we have to explicitly add these deps but it doesn't resolve them
     // automatically.
     gems 'rubygems:tilt:2.0.10'
-    gems 'rubygems:slim:4.0.1'
-    gems 'rubygems:concurrent-ruby:1.0.5'
+    gems 'rubygems:slim:4.1.0'
+    gems 'rubygems:concurrent-ruby:1.1.9'
 }
 
 sourceSets {
@@ -156,6 +156,7 @@ ext {
         def dependencyProps = [
             ["ivyCommonsCodec", "commons-codec", "commons-codec"],
             ["ivyDropwizardMetrics", "io.dropwizard.metrics", "metrics-core"],
+            ["ivyHadoop", "org.apache.hadoop", "hadoop-auth"],
             ["ivyLog4j", "org.apache.logging.log4j", "log4j-core"],
             ["ivyOpennlpTools", "org.apache.opennlp", "opennlp-tools"],
             ["ivyTika", "org.apache.tika", "tika-core"],
diff --git a/solr/solr-ref-guide/src/_config.yml.template b/solr/solr-ref-guide/src/_config.yml.template
index 88016fe..45c80d2 100755
--- a/solr/solr-ref-guide/src/_config.yml.template
+++ b/solr/solr-ref-guide/src/_config.yml.template
@@ -79,6 +79,7 @@ solr-attributes: &solr-attributes-ref
   build-year: '${buildYear}'
   ivy-commons-codec-version: '${ivyCommonsCodec}'
   ivy-dropwizard-version: '${ivyDropwizardMetrics}'
+  ivy-hadoop-version: '${ivyHadoop}'
   ivy-log4j-version: '${ivyLog4j}'
   ivy-opennlp-version: '${ivyOpennlpTools}'
   ivy-tika-version: '${ivyTika}'
@@ -92,5 +93,7 @@ asciidoctor:
     attribute-missing: "warn"
     icons: "font"
     source-highlighter: "rouge"
-    rouge-theme: "thankful-eyes"
+    rouge-css: "style"
+    rouge-style: "github"
     stem:
+    sectanchors: "true"
diff --git a/solr/solr-ref-guide/src/_includes/head.html b/solr/solr-ref-guide/src/_includes/head.html
index 023328b..c92db6d 100755
--- a/solr/solr-ref-guide/src/_includes/head.html
+++ b/solr/solr-ref-guide/src/_includes/head.html
@@ -22,8 +22,6 @@
 
 <script src="{{ "js/jquery.navgoco.min.js" }}"></script>
 
-<script src="https://cdnjs.cloudflare.com/ajax/libs/anchor-js/4.2.0/anchor.min.js" crossorigin="anonymous"></script>
-
 <link rel="shortcut icon" href="{{ "images/icons/favicon.ico"  }}">
 
 <!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries -->
diff --git a/solr/solr-ref-guide/src/_includes/topnav.html b/solr/solr-ref-guide/src/_includes/topnav.html
index fa868a0..d114728 100755
--- a/solr/solr-ref-guide/src/_includes/topnav.html
+++ b/solr/solr-ref-guide/src/_includes/topnav.html
@@ -11,22 +11,24 @@
         <div class="collapse navbar-collapse justify-content-end" id="bs-example-navbar-collapse-1">
             <ul class="nav navbar-nav navbar-right">
                 <!-- Link to Solr website -->
-                <li><a href="https://solr.apache.org/" target="_blank">Solr Website</a></li>
+                <li><a href="https://solr.apache.org/" target="_blank" rel="noreferrer nopener">Solr Website</a></li>
+                <li><a href="https://solr.apache.org/downloads.html" target="_blank" rel="noreferrer nopener">Download Solr</a></li>
                 <!-- Other Guide Formats dropdown -->
                 <li class="dropdown">
-                    <a href="#" class="dropdown-toggle" data-toggle="dropdown">Other Formats<b class="caret"></b></a>
+                    <a href="#" class="dropdown-toggle" data-toggle="dropdown">Other Versions<b class="caret"></b></a>
                     <ul class="dropdown-menu">
-                       <li><a href="https://archive.apache.org/dist/lucene/solr/ref-guide/" target="_blank">Archived PDFs</a></li>
-                       <li><a href="https://solr.apache.org/guide/" target="_blank">Other Versions Online</a></li>
+                       <li><a href="https://archive.apache.org/dist/lucene/solr/ref-guide/" target="_blank" rel="noreferrer nopener">Archived PDFs</a></li>
+                       <li><a href="https://solr.apache.org/guide/" target="_blank" rel="noreferrer nopener">Other Versions Online</a></li>
                     </ul>
                 </li>
                 <!-- Solr Resources dropdown -->
                 <li class="dropdown">
                     <a href="#" class="dropdown-toggle" data-toggle="dropdown">Solr Resources<b class="caret"></b></a>
                     <ul class="dropdown-menu">
-                       <li><a href="{{site.solr-attributes.solr-javadocs}}/solr-core/index.html" target="_blank">Solr Javadocs</a></li>
-                       <li><a href="https://solr.apache.org/community.html#version-control" target="_blank">Lucene/Solr Source Code</a></li>
-                       <li><a href="https://solr.apache.org/community.html" target="_blank">Solr Community Links</a></li>
+                       <li><a href="{{site.solr-attributes.solr-javadocs}}/solr-core/index.html" target="_blank" rel="noreferrer nopener">Javadocs</a></li>
+                       <li><a href="https://solr.apache.org/community.html#version-control" target="_blank" rel="noreferrer nopener">Source Code</a></li>
+                       <li><a href="https://solr.apache.org/community.html" target="_blank" rel="noreferrer nopener">Community Links</a></li>
+                       <li><a href="https://github.com/apache/solr/tree/main/dev-docs/ref-guide" target="_blank" rel="noreferrer nopener">Contribute</a></li>
                     </ul>
                 </li>
 
diff --git a/solr/solr-ref-guide/src/_templates/example.html.slim b/solr/solr-ref-guide/src/_templates/example.html.slim
deleted file mode 100644
index 1514e63..0000000
--- a/solr/solr-ref-guide/src/_templates/example.html.slim
+++ /dev/null
@@ -1,3 +0,0 @@
-= block_with_caption :top, :class=>'exampleblock'
-  .example
-    =content
diff --git a/solr/solr-ref-guide/src/_templates/helpers.rb b/solr/solr-ref-guide/src/_templates/helpers.rb
index 07163db..0f4a7c4 100644
--- a/solr/solr-ref-guide/src/_templates/helpers.rb
+++ b/solr/solr-ref-guide/src/_templates/helpers.rb
@@ -472,9 +472,9 @@ is book and it's a child of a book part. Excluding block content."
     option? :autowidth
   end
 
-  def spread?
+  def stretch?
     if !autowidth? || local_attr?('width')
-      'spread' if attr? :tablepcwidth, 100
+      'stretch' if attr? :tablepcwidth, 100
     end
   end
 
diff --git a/solr/solr-ref-guide/src/_templates/pass.html.slim b/solr/solr-ref-guide/src/_templates/pass.html.slim
deleted file mode 100644
index 7125c22..0000000
--- a/solr/solr-ref-guide/src/_templates/pass.html.slim
+++ /dev/null
@@ -1 +0,0 @@
-=content
diff --git a/solr/solr-ref-guide/src/_templates/section.html.slim b/solr/solr-ref-guide/src/_templates/section.html.slim
deleted file mode 100644
index b8776f5..0000000
--- a/solr/solr-ref-guide/src/_templates/section.html.slim
+++ /dev/null
@@ -1,13 +0,0 @@
-- sect0 = section_level == 0
-= html_tag_if !sect0, :section, class: [%(sect#{section_level}), role]
-  *{tag: %(h#{section_level + 1}), id: id, class: ('sect0' if sect0)}
-    - if id
-      - if document.attr? :sectanchors
-        a.anchor href="##{id}" aria-hidden='true'
-      - if document.attr? :sectlinks
-        a.link href="##{id}" =section_title
-      - else
-        =section_title
-    - else
-      =section_title
-  =content
diff --git a/solr/solr-ref-guide/src/_templates/table.html.slim b/solr/solr-ref-guide/src/_templates/table.html.slim
index b66bf5b..7f0f8e4 100644
--- a/solr/solr-ref-guide/src/_templates/table.html.slim
+++ b/solr/solr-ref-guide/src/_templates/table.html.slim
@@ -1,7 +1,7 @@
 = block_with_caption :top, :class=>'tableblock'
   table [
-      class=["tableblock", "frame-#{attr :frame, 'all'}", "grid-#{attr :grid, 'all'}", spread?]
-      style=style_value(width: ("#{attr :tablepcwidth}%" if !autowidth? && !spread? || (local_attr :width)),
+      class=["tableblock", "frame-#{attr :frame, 'all'}", "grid-#{attr :grid, 'all'}", "stripes-#{attr :stripes, 'even'}", stretch?]
+      style=style_value(width: ("#{attr :tablepcwidth}%" if !autowidth? && !stretch? || (local_attr :width)),
                         float: (attr :float)) ]
     - unless (attr :rowcount).zero?
       colgroup
diff --git a/solr/solr-ref-guide/src/_templates/thematic_break.html.slim b/solr/solr-ref-guide/src/_templates/thematic_break.html.slim
deleted file mode 100644
index fcb5e9c..0000000
--- a/solr/solr-ref-guide/src/_templates/thematic_break.html.slim
+++ /dev/null
@@ -1 +0,0 @@
-hr
diff --git a/solr/solr-ref-guide/src/a-quick-overview.adoc b/solr/solr-ref-guide/src/a-quick-overview.adoc
deleted file mode 100644
index 0542cb3..0000000
--- a/solr/solr-ref-guide/src/a-quick-overview.adoc
+++ /dev/null
@@ -1,45 +0,0 @@
-= A Quick Overview
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-Solr is a search server built on top of Apache Lucene, an open source, Java-based, information retrieval library. It is designed to drive powerful document retrieval applications - wherever you need to serve data to users based on their queries, Solr can work for you.
-
-Here is a example of how Solr could integrate with an application:
-
-.Solr integration with applications
-image::images/a-quick-overview/sample-client-app-arch.png[image,width=500,height=379]
-
-In the scenario above, Solr runs alongside other server applications. For example, an online store application would provide a user interface, a shopping cart, and a way to make purchases for end users; while an inventory management application would allow store employees to edit product information. The product metadata would be kept in some kind of database, as well as in Solr.
-
-Solr makes it easy to add the capability to search through the online store through the following steps:
-
-. Define a _schema_. The schema tells Solr about the contents of documents it will be indexing. In the online store example, the schema would define fields for the product name, description, price, manufacturer, and so on. Solr's schema is powerful and flexible and allows you to tailor Solr's behavior to your application. See <<documents-fields-and-schema-design.adoc#,Documents, Fields, and Schema Design>> for all the details.
-. Feed Solr documents for which your users will search.
-. Expose search functionality in your application.
-
-Because Solr is based on open standards, it is highly extensible. Solr queries are simple HTTP request URLs and the response is a structured document: mainly JSON, but it could also be XML, CSV, or other formats. This means that a wide variety of clients will be able to use Solr, from other web applications to browser clients, rich client applications, and mobile devices. Any platform capable of HTTP can talk to Solr. See <<client-apis.adoc#,Client APIs>> for details on client APIs.
-
-Solr offers support for the simplest keyword searching through to complex queries on multiple fields and faceted search results. <<searching.adoc#,Searching>> has more information about searching and queries.
-
-If Solr's capabilities are not impressive enough, its ability to handle very high-volume applications should do the trick.
-
-A relatively common scenario is that you have so much data, or so many queries, that a single Solr server is unable to handle your entire workload. In this case, you can scale up the capabilities of your application using <<solrcloud.adoc#,SolrCloud>> to better distribute the data, and the processing of requests, across many servers. Multiple options can be mixed and matched depending on the scalability you need.
-
-For example: "Sharding" is a scaling technique in which a collection is split into multiple logical pieces called "shards" in order to scale up the number of documents in a collection beyond what could physically fit on a single server. Incoming queries are distributed to every shard in the collection, which respond with merged results. Another technique available is to increase the "Replication Factor" of your collection, which allows you to add servers with additional copies of your co [...]
-
-Best of all, this talk about high-volume applications is not just hypothetical: some of the famous Internet sites that use Solr today are Macy's, EBay, and Zappo's.
-For more examples, take a look at https://cwiki.apache.org/confluence/display/solr/PublicServers.
diff --git a/solr/solr-ref-guide/src/about-filters.adoc b/solr/solr-ref-guide/src/about-filters.adoc
deleted file mode 100644
index 900707e..0000000
--- a/solr/solr-ref-guide/src/about-filters.adoc
+++ /dev/null
@@ -1,63 +0,0 @@
-= About Filters
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-Like <<tokenizers.adoc#,tokenizers>>, <<filter-descriptions.adoc#,filters>> consume input and produce a stream of tokens. Filters also derive from `org.apache.lucene.analysis.TokenStream`. Unlike tokenizers, a filter's input is another TokenStream. The job of a filter is usually easier than that of a tokenizer since in most cases a filter looks at each token in the stream sequentially and decides whether to pass it along, replace it or discard it.
-
-A filter may also do more complex analysis by looking ahead to consider multiple tokens at once, although this is less common. One hypothetical use for such a filter might be to normalize state names that would be tokenized as two words. For example, the single token "california" would be replaced with "CA", while the token pair "rhode" followed by "island" would become the single token "RI".
-
-Because filters consume one `TokenStream` and produce a new `TokenStream`, they can be chained one after another indefinitely. Each filter in the chain in turn processes the tokens produced by its predecessor. The order in which you specify the filters is therefore significant. Typically, the most general filtering is done first, and later filtering stages are more specialized.
-
-[.dynamic-tabs]
---
-[example.tab-pane#byname-filterexample]
-====
-[.tab-label]*With name*
-[source,xml]
-----
-<fieldType name="text" class="solr.TextField">
-  <analyzer>
-    <tokenizer name="standard"/>
-    <filter name="lowercase"/>
-    <filter name="englishPorter"/>
-  </analyzer>
-</fieldType>
-----
-====
-[example.tab-pane#byclass-filterexample]
-====
-[.tab-label]*With class name (legacy)*
-[source,xml]
-----
-<fieldType name="text" class="solr.TextField">
-  <analyzer>
-    <tokenizer class="solr.StandardTokenizerFactory"/>
-    <filter class="solr.LowerCaseFilterFactory"/>
-    <filter class="solr.EnglishPorterFilterFactory"/>
-  </analyzer>
-</fieldType>
-----
-====
---
-
-This example starts with Solr's standard tokenizer, which breaks the field's text into tokens. All the tokens are then set to lowercase, which will facilitate case-insensitive matching at query time.
-
-The last filter in the above example is a stemmer filter that uses the Porter stemming algorithm. A stemmer is basically a set of mapping rules that maps the various forms of a word back to the base, or _stem_, word from which they derive. For example, in English the words "hugs", "hugging" and "hugged" are all forms of the stem word "hug". The stemmer will replace all of these terms with "hug", which is what will be indexed. This means that a query for "hug" will match the term "hugged" [...]
-
-Conversely, applying a stemmer to your query terms will allow queries containing non stem terms, like "hugging", to match documents with different variations of the same stem word, such as "hugged". This works because both the indexer and the query will map to the same stem ("hug").
-
-Word stemming is, obviously, very language specific. Solr includes several language-specific stemmers created by the http://snowball.tartarus.org/[Snowball] generator that are based on the Porter stemming algorithm. The generic Snowball Porter Stemmer Filter can be used to configure any of these language stemmers. Solr also includes a convenience wrapper for the English Snowball stemmer. There are also several purpose-built stemmers for non-English languages. These stemmers are described [...]
diff --git a/solr/solr-ref-guide/src/about-this-guide.adoc b/solr/solr-ref-guide/src/about-this-guide.adoc
index 0eff937..4f6a71a 100644
--- a/solr/solr-ref-guide/src/about-this-guide.adoc
+++ b/solr/solr-ref-guide/src/about-this-guide.adoc
@@ -48,7 +48,7 @@ In many cases, this is is in the `server/solr` directory of your installation. H
 
 In several cases of this Guide, our examples are built from the the "techproducts" example (i.e., you have started Solr with the command `bin/solr -e techproducts`). In this case, `solr.home` will be a sub-directory of the `example/` directory created for you automatically.
 
-See also the section <<solr-configuration-files.adoc#solr-home,Solr Home>> for further details on what is contained in this directory.
+See also the section <<configuration-files.adoc#solr-home,Solr Home>> for further details on what is contained in this directory.
 
 == API Examples
 
diff --git a/solr/solr-ref-guide/src/about-tokenizers.adoc b/solr/solr-ref-guide/src/about-tokenizers.adoc
deleted file mode 100644
index b2d0c3b..0000000
--- a/solr/solr-ref-guide/src/about-tokenizers.adoc
+++ /dev/null
@@ -1,63 +0,0 @@
-= About Tokenizers
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-The job of a <<tokenizers.adoc#,tokenizer>> is to break up a stream of text into tokens, where each token is (usually) a sub-sequence of the characters in the text. An analyzer is aware of the field it is configured for, but a tokenizer is not. Tokenizers read from a character stream (a Reader) and produce a sequence of Token objects (a TokenStream).
-
-Characters in the input stream may be discarded, such as whitespace or other delimiters. They may also be added to or replaced, such as mapping aliases or abbreviations to normalized forms. A token contains various metadata in addition to its text value, such as the location at which the token occurs in the field. Because a tokenizer may produce tokens that diverge from the input text, you should not assume that the text of the token is the same text that occurs in the field, or that its [...]
-
-[.dynamic-tabs]
---
-[example.tab-pane#byname-tok]
-====
-[.tab-label]*With name*
-[source,xml]
-----
-<fieldType name="text" class="solr.TextField">
-  <analyzer>
-    <tokenizer name="standard"/>
-  </analyzer>
-</fieldType>
-----
-====
-[example.tab-pane#byclass-tok]
-====
-[.tab-label]*With class name (legacy)*
-[source,xml]
-----
-<fieldType name="text" class="solr.TextField">
-  <analyzer>
-    <tokenizer class="solr.StandardTokenizerFactory"/>
-  </analyzer>
-</fieldType>
-----
-====
---
-
-The class named in the tokenizer element is not the actual tokenizer, but rather a class that implements the `TokenizerFactory` API. This factory class will be called upon to create new tokenizer instances as needed. Objects created by the factory must derive from `Tokenizer`, which indicates that they produce sequences of tokens. If the tokenizer produces tokens that are usable as is, it may be the only component of the analyzer. Otherwise, the tokenizer's output tokens will serve as in [...]
-
-A `TypeTokenFilterFactory` is available that creates a `TypeTokenFilter` that filters tokens based on their TypeAttribute, which is set in `factory.getStopTypes`.
-
-For a complete list of the available TokenFilters, see the section <<tokenizers.adoc#,Tokenizers>>.
-
-== When to Use a CharFilter vs. a TokenFilter
-
-There are several pairs of CharFilters and TokenFilters that have related (i.e., `MappingCharFilter` and `ASCIIFoldingFilter`) or nearly identical (i.e., `PatternReplaceCharFilterFactory` and `PatternReplaceFilterFactory`) functionality and it may not always be obvious which is the best choice.
-
-The decision about which to use depends largely on which Tokenizer you are using, and whether you need to preprocess the stream of characters.
-
-For example, suppose you have a tokenizer such as `StandardTokenizer` and although you are pretty happy with how it works overall, you want to customize how some specific characters behave. You could modify the rules and re-build your own tokenizer with JFlex, but it might be easier to simply map some of the characters before tokenization with a `CharFilter`.
diff --git a/solr/solr-ref-guide/src/collection-aliasing.adoc b/solr/solr-ref-guide/src/alias-management.adoc
similarity index 63%
rename from solr/solr-ref-guide/src/collection-aliasing.adoc
rename to solr/solr-ref-guide/src/alias-management.adoc
index c019fc7..18f5658 100644
--- a/solr/solr-ref-guide/src/collection-aliasing.adoc
+++ b/solr/solr-ref-guide/src/alias-management.adoc
@@ -1,4 +1,4 @@
-= Collection Aliasing
+= Alias Management
 :toclevels: 1
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
@@ -17,69 +17,68 @@
 // specific language governing permissions and limitations
 // under the License.
 
-A collection alias is a virtual collection which Solr treats the same as a normal collection. The alias collection may point to one or more real collections.
+A collection alias is a virtual collection which Solr treats the same as a normal collection.
+The alias collection may point to one or more real collections.
 
 Some use cases for collection aliasing:
 
 * Time series data
 * Reindexing content behind the scenes
 
+For an overview of aliases in Solr, see the section <<aliases.adoc#,Aliases>>.
+
 [[createalias]]
 == CREATEALIAS: Create or Modify an Alias for a Collection
 
 The `CREATEALIAS` action will create a new alias pointing to one or more collections.
 Aliases come in 2 flavors: standard and routed.
 
-*Standard aliases* are simple:  CREATEALIAS registers the alias name with the names of one or more collections provided
-  by the command.
+*Standard aliases* are simple: `CREATEALIAS` registers the alias name with the names of one or more collections provided by the command.
 If an existing alias exists, it is replaced/updated.
-A standard alias can serve as a means to rename a collection, and can be used to atomically swap
-which backing/underlying collection is "live" for various purposes.
-When Solr searches an alias pointing to multiple collections, Solr will search all shards of all the collections as an
-  aggregated whole.
-While it is possible to send updates to an alias spanning multiple collections, standard aliases have no logic for
-  distributing documents among the referenced collections so all updates will go to the first collection in the list.
+
+A standard alias can serve as a means to rename a collection, and can be used to atomically swap which backing/underlying collection is "live" for various purposes.
+
+When Solr searches an alias pointing to multiple collections, Solr will search all shards of all the collections as an aggregated whole.
+While it is possible to send updates to an alias spanning multiple collections, standard aliases have no logic for distributing documents among the referenced collections so all updates will go to the first collection in the list.
 
 `/admin/collections?action=CREATEALIAS&name=_name_&collections=_collectionlist_`
 
-*Routed aliases* are aliases with additional capabilities to act as a kind of super-collection that route
-  updates to the correct collection. Routing is data driven and may be based on a temporal field or on categories
-  specified in a field (normally string based).
-See <<aliases.adoc#routed-aliases,Routed Aliases>> for some important high-level information
-  before getting started.
+*Routed aliases* are aliases with additional capabilities to act as a kind of super-collection that route updates to the correct collection.
+
+Routing is data driven and may be based on a temporal field or on categories   specified in a field (normally string based).
+See <<aliases.adoc#routed-aliases,Routed Aliases>> for some important high-level information before getting started.
 
 [source,text]
 ----
-localhost:8983/solr/admin/collections?action=CREATEALIAS&name=timedata&router.start=NOW/DAY&router.field=evt_dt&router.name=time&router.interval=%2B1DAY&router.maxFutureMs=3600000&create-collection.collection.configName=myConfig&create-collection.numShards=2
+$ http://localhost:8983/solr/admin/collections?action=CREATEALIAS&name=timedata&router.start=NOW/DAY&router.field=evt_dt&router.name=time&router.interval=%2B1DAY&router.maxFutureMs=3600000&create-collection.collection.configName=myConfig&create-collection.numShards=2
 ----
 
-If run on Jan 15, 2018, the above will create an time routed alias named timedata, that contains collections with names prefixed
-with `timedata` and an initial collection named `timedata_2018_01_15` will be created immediately. Updates sent to this
-alias with a (required) value in `evt_dt` that is before or after 2018-01-15 will be rejected, until the last 60
-minutes of 2018-01-15. After 2018-01-15T23:00:00 documents for either 2018-01-15 or 2018-01-16 will be accepted.
-As soon as the system receives a document for an allowable time window for which there is no collection it will
-automatically create the next required collection (and potentially any intervening collections if `router.interval` is
-smaller than `router.maxFutureMs`). Both the initial collection and any subsequent collections will be created using
-the specified configset. All collection creation parameters other than `name` are allowed, prefixed
+If run on Jan 15, 2018, the above will create an time routed alias named timedata, that contains collections with names prefixed with `timedata` and an initial collection named `timedata_2018_01_15` will be created immediately.
+Updates sent to this alias with a (required) value in `evt_dt` that is before or after 2018-01-15 will be rejected, until the last 60 minutes of 2018-01-15.
+After 2018-01-15T23:00:00 documents for either 2018-01-15 or 2018-01-16 will be accepted.
+As soon as the system receives a document for an allowable time window for which there is no collection it will automatically create the next required collection (and potentially any intervening collections if `router.interval` is
+smaller than `router.maxFutureMs`).
+Both the initial collection and any subsequent collections will be created using
+the specified configset.
+All collection creation parameters other than `name` are allowed, prefixed
 by `create-collection.`
 
-This means that one could, for example, partition their collections by day, and within each daily collection route
-the data to shards based on customer id. Such shards can be of any type (NRT, PULL or TLOG), and rule-based replica
+This means that one could, for example, partition their collections by day, and within each daily collection route the data to shards based on customer id.
+Such shards can be of any type (NRT, PULL or TLOG), and rule-based replica
 placement strategies may also be used.
 
 The values supplied in this command for collection creation will be retained
 in alias properties, and can be verified by inspecting `aliases.json` in ZooKeeper.
 
-NOTE: Presently only updates are routed and queries are distributed to all collections in the alias, but future
-features may enable routing of the query to the single appropriate collection based on a special parameter or perhaps
-a filter on the routed field.
+NOTE: Only updates are routed and queries are distributed to all collections in the alias.
 
 === CREATEALIAS Parameters
 
 `name`::
-The alias name to be created. This parameter is required. If the alias is to be routed it also functions
-as a prefix for the names of the dependent collections that will be created. It must therefore adhere to normal
-requirements for collection naming.
+The alias name to be created.
+This parameter is required.
+If the alias is to be routed it also functions as a prefix for the names of the dependent collections that will be created.
+It must therefore adhere to normal requirements for collection naming.
 
 `async`::
 Request ID to track this action which will be <<collections-api.adoc#asynchronous-calls,processed asynchronously>>.
@@ -87,85 +86,84 @@ Request ID to track this action which will be <<collections-api.adoc#asynchronou
 ==== Standard Alias Parameters
 
 `collections`::
-A comma-separated list of collections to be aliased. The collections must already exist in the cluster.
-This parameter signals the creation of a standard alias. If it is present all routing parameters are
-prohibited. If routing parameters are present this parameter is prohibited.
+A comma-separated list of collections to be aliased.
+The collections must already exist in the cluster.
+This parameter signals the creation of a standard alias.
+If it is present all routing parameters are prohibited.
+If routing parameters are present this parameter is prohibited.
 
 ==== Routed Alias Parameters
 
 Most routed alias parameters become _alias properties_ that can subsequently be inspected and <<aliasprop,modified>>.
 
 `router.name`::
-The type of routing to use. Presently only `time` and `category` and `Dimensional[]` are valid.
-In the case of a multi dimensional routed alias (A. K. A. "DRA", see <<aliases.adoc#dimensional-routed-aliases,Aliases>>
-documentation), it is required to express all the dimensions in the same order that they will appear in the dimension
-array. The format for a DRA router.name is Dimensional[dim1,dim2] where dim1 and dim2 are valid router.name
-values for each sub-dimension. Note that DRA's are very new, and only 2D DRA's are presently supported. Higher
-numbers of dimensions will be supported soon. See examples below for further clarification on how to configure
-individual dimensions. This parameter is required.
+The type of routing to use.
+Presently only `time` and `category` and `Dimensional[]` are valid.
+In the case of a multi dimensional routed alias (aka "DRA", see <<aliases.adoc#dimensional-routed-aliases,Aliases>>), it is required to express all the dimensions in the same order that they will appear in the dimension
+array.
+The format for a DRA router.name is Dimensional[dim1,dim2] where dim1 and dim2 are valid router.name values for each sub-dimension.
+Note that DRA's are very new, and only 2D DRA's are presently supported.
+Higher numbers of dimensions will be supported soon.
+See examples below for further clarification on how to configure
+individual dimensions.
+This parameter is required.
 
 `router.field`::
 The field to inspect to determine which underlying collection an incoming document should be routed to.
 This field is required on all incoming documents.
 
 `create-collection.*`::
-The `*` wildcard can be replaced with any parameter from the <<collection-management.adoc#create,CREATE>> command except `name`. All other fields
-are identical in requirements and naming except that we insist that the configset be explicitly specified.
+The `*` wildcard can be replaced with any parameter from the <<collection-management.adoc#create,CREATE>> command except `name`.
+All other fields are identical in requirements and naming except that we insist that the configset be explicitly specified.
 The configset must be created beforehand, either uploaded or copied and modified.
 It's probably a bad idea to use "data driven" mode as schema mutations might happen concurrently leading to errors.
 
 ==== Time Routed Alias Parameters
 
 `router.start`::
-The start date/time of data for this time routed alias in Solr's standard date/time format (i.e., ISO-8601 or "NOW"
-optionally with <<working-with-dates.adoc#date-math,date math>>).
+The start date/time of data for this time routed alias in Solr's standard date/time format (i.e., ISO-8601 or "NOW" optionally with <<date-formatting-math.adoc#date-math,date math>>).
 +
 The first collection created for the alias will be internally named after this value.
-If a document is submitted with an earlier value for router.field then the earliest collection the alias points to then
-it will yield an error since it can't be routed. This date/time MUST NOT have a milliseconds component other than 0.
-Particularly, this means `NOW` will fail 999 times out of 1000, though `NOW/SECOND`, `NOW/MINUTE`, etc. will work
-just fine. This parameter is required.
+If a document is submitted with an earlier value for router.field then the earliest collection the alias points to then it will yield an error since it can't be routed.
+This date/time MUST NOT have a milliseconds component other than 0.
+Particularly, this means `NOW` will fail 999 times out of 1000, though `NOW/SECOND`, `NOW/MINUTE`, etc., will work just fine.
+This parameter is required.
 
 `TZ`::
-The timezone to be used when evaluating any date math in router.start or router.interval. This is equivalent to the
-same parameter supplied to search queries, but understand in this case it's persisted with most of the other parameters
+The timezone to be used when evaluating any date math in router.start or router.interval.
+This is equivalent to the same parameter supplied to search queries, but understand in this case it's persisted with most of the other parameters
 as an alias property.
 +
-If GMT-4 is supplied for this value then a document dated 2018-01-14T21:00:00:01.2345Z would be stored in the
-myAlias_2018-01-15_01 collection (assuming an interval of +1HOUR).
+If GMT-4 is supplied for this value then a document dated 2018-01-14T21:00:00:01.2345Z would be stored in the myAlias_2018-01-15_01 collection (assuming an interval of +1HOUR).
 +
 The default timezone is UTC.
 
 `router.interval`::
 A date math expression that will be appended to a timestamp to determine the next collection in the series.
-Any date math expression that can be evaluated if appended to a timestamp of the form 2018-01-15T16:17:18 will
-work here.
+Any date math expression that can be evaluated if appended to a timestamp of the form 2018-01-15T16:17:18 will work here.
 +
 This parameter is required.
 
 `router.maxFutureMs`::
-The maximum milliseconds into the future that a document is allowed to have in `router.field` for it to be accepted
-without error.  If there was no limit, than an erroneous value could trigger many collections to be created.
+The maximum milliseconds into the future that a document is allowed to have in `router.field` for it to be accepted without error.
+If there was no limit, than an erroneous value could trigger many collections to be created.
 +
-The default is 10 minutes.
+The default is `600000` or 10 minutes.
 
 `router.preemptiveCreateMath`::
 A date math expression that results in early creation of new collections.
 +
-If a document arrives with a timestamp that is after the end time of the most recent collection minus this
-interval, then the next (and only the next) collection will be created asynchronously. Without this setting, collections are created
-synchronously when required by the document time stamp and thus block the flow of documents until the collection
-is created (possibly several seconds). Preemptive creation reduces these hiccups. If set to enough time (perhaps
-an hour or more) then if there are problems creating a collection, this window of time might be enough to take
-corrective action. However after a successful preemptive creation,  the collection is consuming resources without
-being used, and new documents will tend to be routed through it only to be routed elsewhere. Also, note that
-`router.autoDeleteAge` is currently evaluated relative to the date of a newly created collection, and so you may
-want to increase the delete age by the preemptive window amount so that the oldest collection isn't deleted too
-soon. Note that it has to be possible to subtract the interval specified from a date, so if prepending a
-minus sign creates invalid date math, this will cause an error. Also note that a document that is itself
-destined for a collection that does not exist will still trigger synchronous creation up to that destination collection
-but will not trigger additional async preemptive creation. Only one type of collection creation can happen
-per document.
+If a document arrives with a timestamp that is after the end time of the most recent collection minus this interval, then the next (and only the next) collection will be created asynchronously.
+Without this setting, collections are created synchronously when required by the document time stamp and thus block the flow of documents until the collection is created (possibly several seconds).
+Preemptive creation reduces these hiccups.
+If set to enough time (perhaps an hour or more) then if there are problems creating a collection, this window of time might be enough to take
+corrective action.
+However after a successful preemptive creation, the collection is consuming resources without being used, and new documents will tend to be routed through it only to be routed elsewhere.
+Also, note that `router.autoDeleteAge` is currently evaluated relative to the date of a newly created collection, so you may want to increase the delete age by the preemptive window amount so that the oldest collection isn't deleted too
+soon.
+Note that it has to be possible to subtract the interval specified from a date, so if prepending a minus sign creates invalid date math, this will cause an error.
+Also note that a document that is itself destined for a collection that does not exist will still trigger synchronous creation up to that destination collection but will not trigger additional async preemptive creation.
+Only one type of collection creation can happen per document.
 Example: `90MINUTES`.
 +
 This property is blank by default indicating just-in-time, synchronous creation of new collections.
@@ -173,8 +171,7 @@ This property is blank by default indicating just-in-time, synchronous creation
 `router.autoDeleteAge`::
 A date math expression that results in the oldest collections getting deleted automatically.
 +
-The date math is relative to the timestamp of a newly created collection (typically close to the current time),
-and thus this must produce an earlier time via rounding and/or subtracting.
+The date math is relative to the timestamp of a newly created collection (typically close to the current time), and thus this must produce an earlier time via rounding and/or subtracting.
 Collections to be deleted must have a time range that is entirely before the computed age.
 Collections are considered for deletion immediately prior to new collections getting created.
 Example: `/DAY-90DAYS`.
@@ -188,15 +185,14 @@ The maximum number of categories allowed for this alias.
 This setting safeguards against the inadvertent creation of an infinite number of collections in the event of bad data.
 
 `router.mustMatch`::
-A regular expression that the value of the field specified by `router.field` must match before a corresponding
-collection will be created. Note that changing this setting after data has been added will not alter the data already
-indexed. Any valid Java regular expression pattern may be specified. This expression is pre-compiled at the start of
-each request so batching of updates is strongly recommended. Overly complex patterns will produce cpu
-or garbage collecting overhead during indexing as determined by the JVM's implementation of regular expressions.
+A regular expression that the value of the field specified by `router.field` must match before a corresponding collection will be created.
+Note that changing this setting after data has been added will not alter the data already indexed.
+Any valid Java regular expression pattern may be specified.
+This expression is pre-compiled at the start of each request so batching of updates is strongly recommended.
+Overly complex patterns will produce CPU or garbage collection overhead during indexing as determined by the JVM's implementation of regular expressions.
 
 ==== Dimensional Routed Alias Parameters
 
-
 `router.#.`::
 This prefix denotes which position in the dimension array is being referred to for purposes of dimension configuration.
 For example in a Dimensional[time,category] router.0.start would be used to set the start time for the time dimension.
@@ -205,8 +201,8 @@ For example in a Dimensional[time,category] router.0.start would be used to set
 === CREATEALIAS Response
 
 The output will simply be a responseHeader with details of the time it took to process the request.
-To confirm the creation of the alias, you can look in the Solr Admin UI, under the Cloud section and find the
-`aliases.json` file. The initial collection for routed aliases should also be visible in various parts of the admin UI.
+To confirm the creation of the alias, you can look in the Solr Admin UI, under the Cloud section and find the `aliases.json` file.
+The initial collection for routed aliases should also be visible in various parts of the admin UI.
 
 === Examples using CREATEALIAS
 Create an alias named "testalias" and link it to the collections named "foo" and "bar".
@@ -522,7 +518,8 @@ http://localhost:8983/solr/admin/collections?action=LISTALIASES&wt=xml
 [[aliasprop]]
 == ALIASPROP: Modify Alias Properties for a Collection
 
-The `ALIASPROP` action modifies the properties (metadata) on an alias. If a key is set with a value that is empty it will be removed.
+The `ALIASPROP` action modifies the properties (metadata) on an alias.
+If a key is set with a value that is empty it will be removed.
 
 [.dynamic-tabs]
 --
@@ -556,14 +553,15 @@ curl -X POST http://localhost:8983/api/collections -H 'Content-Type: application
 --
 
 
-WARNING: This command allows you to revise any property. No alias specific validation is performed.
-         Routed aliases may cease to function, function incorrectly or cause errors if property values
-         are set carelessly.
+WARNING: This command allows you to revise any property.
+No alias specific validation is performed.
+Routed aliases may cease to function, function incorrectly, or cause errors if property values are set carelessly.
 
 === ALIASPROP Parameters
 
 `name`::
-The alias name on which to set properties. This parameter is required.
+The alias name on which to set properties.
+This parameter is required.
 
 `property._name_=_value_` (v1)::
 Set property _name_ to _value_.
@@ -577,8 +575,7 @@ Request ID to track this action which will be <<collections-api.adoc#asynchronou
 === ALIASPROP Response
 
 The output will simply be a responseHeader with details of the time it took to process the request.
-To confirm the creation of the property or properties, you can look in the Solr Admin UI, under the Cloud section and
-find the `aliases.json` file or use the LISTALIASES api command.
+To confirm the creation of the property or properties, you can look in the Solr Admin UI, under the Cloud section and find the `aliases.json` file or use the LISTALIASES api command.
 
 [[deletealias]]
 == DELETEALIAS: Delete a Collection Alias
@@ -616,7 +613,8 @@ curl -X POST http://localhost:8983/api/collections -H 'Content-Type: application
 === DELETEALIAS Parameters
 
 `name`::
-The name of the alias to delete. This parameter is required.
+The name of the alias to delete.
+This parameter is required.
 
 `async`::
 Request ID to track this action which will be <<collections-api.adoc#asynchronous-calls,processed asynchronously>>.
@@ -624,5 +622,4 @@ Request ID to track this action which will be <<collections-api.adoc#asynchronou
 === DELETEALIAS Response
 
 The output will simply be a responseHeader with details of the time it took to process the request.
-To confirm the removal of the alias, you can look in the Solr Admin UI, under the Cloud section, and
-find the `aliases.json` file.
+To confirm the removal of the alias, you can look in the Solr Admin UI, under the Cloud section, and find the `aliases.json` file.
diff --git a/solr/solr-ref-guide/src/aliases.adoc b/solr/solr-ref-guide/src/aliases.adoc
index 57cdf6e..e30d712 100644
--- a/solr/solr-ref-guide/src/aliases.adoc
+++ b/solr/solr-ref-guide/src/aliases.adoc
@@ -16,34 +16,31 @@
 // specific language governing permissions and limitations
 // under the License.
 
-
-SolrCloud has the ability to query one or more collections via an alternative name. These
-alternative names for collections are known as aliases, and are useful when you want to:
+SolrCloud has the ability to query one or more collections via an alternative name.
+These alternative names for collections are known as aliases, and are useful when you want to:
 
 . Atomically switch to using a newly (re)indexed collection with zero down time (by re-defining the alias)
 . Insulate the client programming versus changes in collection names
 . Issue a single query against several collections with identical schemas
 
-There are two types of aliases: standard aliases and routed aliases. Within routed aliases, there are two types: category-routed aliases and time-routed aliases. These types are discussed in this section.
+There are two types of aliases: standard aliases and routed aliases.
+Within routed aliases, there are two types: category-routed aliases and time-routed aliases.
+These types are discussed in this section.
 
-It's possible to send collection update commands to aliases, but only to those that either resolve to a single collection
-or those that define the routing between multiple collections (<<Routed Aliases>>). In other cases update commands are
-rejected with an error since there is no logic by which to distribute documents among the multiple collections.
+It's possible to send collection update commands to aliases, but only to those that either resolve to a single collection or those that define the routing between multiple collections (<<Routed Aliases>>).
+In other cases update commands are rejected with an error since there is no logic by which to distribute documents among the multiple collections.
 
 == Standard Aliases
 
-Standard aliases are created and updated using the <<collection-aliasing.adoc#createalias,CREATEALIAS>> command.
+Standard aliases are created and updated using the <<alias-management.adoc#createalias,CREATEALIAS>> command.
 
-The current list of collections that are members of an alias can be verified via the
-  <<cluster-node-management.adoc#clusterstatus,CLUSTERSTATUS>> command.
+The current list of collections that are members of an alias can be verified via the <<cluster-node-management.adoc#clusterstatus,CLUSTERSTATUS>> command.
 
-The full definition of all aliases including metadata about that alias (in the case of routed aliases, see below)
-  can be verified via the <<collection-aliasing.adoc#listaliases,LISTALIASES>> command.
+The full definition of all aliases including metadata about that alias (in the case of routed aliases, see below) can be verified via the <<alias-management.adoc#listaliases,LISTALIASES>> command.
 
-Alternatively this information is available by checking `/aliases.json` in ZooKeeper with either the native ZooKeeper
-  client or in the <<cloud-screens.adoc#tree-view,tree page>> of the cloud menu in the admin UI.
+Alternatively this information is available by checking `/aliases.json` in ZooKeeper with either the native ZooKeeper  client or in the <<cloud-screens.adoc#tree-view,tree page>> of the cloud menu in the admin UI.
 
-Aliases may be deleted via the <<collection-aliasing.adoc#deletealias,DELETEALIAS>> command.
+Aliases may be deleted via the <<alias-management.adoc#deletealias,DELETEALIAS>> command.
 When deleting an alias, underlying collections are *unaffected*.
 
 [TIP]
@@ -51,154 +48,122 @@ When deleting an alias, underlying collections are *unaffected*.
 Any alias (standard or routed) that references multiple collections may complicate relevancy.
 By default, SolrCloud scores documents on a per-shard basis.
 
-With multiple collections in an alias this is always a problem, so if you have a use case for which BM25 or
-  TF/IDF relevancy is important you will want to turn on one of the
-  <<distributed-requests.adoc#distributedidf,ExactStatsCache>> implementations.
+With multiple collections in an alias this is always a problem, so if you have a use case for which BM25 or TF/IDF relevancy is important you will want to turn on one of the <<solrcloud-distributed-requests.adoc#distributedidf,ExactStatsCache>> implementations.
 
-However, for analytical use cases where results are sorted on numeric, date, or alphanumeric field values, rather
-  than relevancy calculations, this is not a problem.
+However, for analytical use cases where results are sorted on numeric, date, or alphanumeric field values, rather than relevancy calculations, this is not a problem.
 ====
 
 == Routed Aliases
 
-To address the update limitations associated with standard aliases and provide additional useful features, the concept of
-  routed aliases has been developed.
-There are presently two types of routed alias: time routed and category routed. These are described in detail below,
-  but share some common behavior.
+To address the update limitations associated with standard aliases and provide additional useful features, the concept of routed aliases has been developed.
+There are presently two types of routed alias: time routed and category routed.
+These are described in detail below, but share some common behavior.
 
-When processing an update for a routed alias, Solr initializes its
-  <<update-request-processors.adoc#,UpdateRequestProcessor>> chain as usual, but
-  when `DistributedUpdateProcessor` (DUP) initializes, it detects that the update targets a routed alias and injects
-  `RoutedAliasUpdateProcessor` (RAUP) in front of itself.
-RAUP, in coordination with the Overseer, is the main part of a routed alias, and must immediately precede DUP. It is not
-  possible to configure custom chains with other types of UpdateRequestProcessors between RAUP and DUP.
+When processing an update for a routed alias, Solr initializes its <<update-request-processors.adoc#,UpdateRequestProcessor>> chain as usual, but when `DistributedUpdateProcessor` (DUP) initializes, it detects that the update targets a routed alias and injects `RoutedAliasUpdateProcessor` (RAUP) in front of itself.
+RAUP, in coordination with the Overseer, is the main part of a routed alias, and must immediately precede DUP.
+It is not possible to configure custom chains with other types of UpdateRequestProcessors between RAUP and DUP.
 
-Ideally, as a user of a routed alias, you needn't concern yourself with the particulars of the collection naming pattern
-  since both queries and updates may be done via the alias.
+Ideally, as a user of a routed alias, you needn't concern yourself with the particulars of the collection naming pattern since both queries and updates may be done via the alias.
 When adding data, you should usually direct documents to the alias (e.g., reference the alias name instead of any collection).
 The Solr server and `CloudSolrClient` will direct an update request to the first collection that an alias points to.
 Once the server receives the data it will perform the necessary routing.
 
-WARNING: It's extremely important with all routed aliases that the route values NOT change. Reindexing a document
-with a different route value for the same ID produces two distinct documents with the same ID accessible via the alias.
+WARNING: It's extremely important with all routed aliases that the route values NOT change.
+Reindexing a document with a different route value for the same ID produces two distinct documents with the same ID accessible via the alias.
 All query time behavior of the routed alias is *_undefined_* and not easily predictable once duplicate ID's exist.
 
-CAUTION: It is a bad idea to use "data driven" mode (aka <<schemaless-mode.adoc#,schemaless-mode>>) with
-routed aliases, as duplicate schema mutations might happen concurrently leading to errors.
+CAUTION: It is a bad idea to use "data driven" mode (aka <<schemaless-mode.adoc#,schemaless-mode>>) with routed aliases, as duplicate schema mutations might happen concurrently leading to errors.
 
 
 === Time Routed Aliases
 
-Time Routed Aliases (TRAs) are a SolrCloud feature that manages an alias and a time sequential
- series of collections.
+Time Routed Aliases (TRAs) are a SolrCloud feature that manages an alias and a time sequential series of collections.
 
-It automatically creates new collections and (optionally) deletes old ones as it routes documents to the correct
-  collection based on its timestamp.
-This approach allows for indefinite indexing of data without degradation of performance otherwise experienced due to the
-  continuous growth of a single index.
+It automatically creates new collections and (optionally) deletes old ones as it routes documents to the correct collection based on its timestamp.
+This approach allows for indefinite indexing of data without degradation of performance otherwise experienced due to the continuous growth of a single index.
 
-If you need to store a lot of timestamped data in Solr, such as logs or IoT sensor data, then this feature probably
-  makes more sense than creating one sharded hash-routed collection.
+If you need to store a lot of timestamped data in Solr, such as logs or IoT sensor data, then this feature probably makes more sense than creating one sharded hash-routed collection.
 
 ==== How It Works
 
-First you create a time routed aliases using the <<collection-aliasing.adoc#createalias,CREATEALIAS>> command with the
-desired router settings.
-Most of the settings are editable at a later time using the <<collection-aliasing.adoc#aliasprop,ALIASPROP>> command.
+First you create a time routed aliases using the <<alias-management.adoc#createalias,CREATEALIAS>> command with the desired router settings.
+Most of the settings are editable at a later time using the <<alias-management.adoc#aliasprop,ALIASPROP>> command.
 
 The first collection will be created automatically, along with an alias pointing to it.
 Each underlying Solr "core" in a collection that is a member of a TRA has a special core property referencing the alias.
-The name of each collection is comprised of the TRA name and the start timestamp (UTC), with trailing zeros and symbols
-  truncated.
-
-The collections list for a TRA is always reverse sorted, and thus the connection path of the request will route to the
-  lead collection. Using `CloudSolrClient` is preferable as it can reduce the number of underlying physical HTTP requests by one.
-If you know that a particular set of documents to be delivered is going to a particular older collection then you could
-  direct it there from the client side as an optimization but it's not necessary. `CloudSolrClient` does not (yet) do this.
-
-RAUP first reads TRA configuration from the alias properties when it is initialized. As it sees each document, it checks for
-  changes to TRA properties, updates its cached configuration if needed, and then determines which collection the
-  document belongs to:
-
-* If RAUP needs to send it to a time segment represented by a collection other than the one that
-  the client chose to communicate with, then it will do so using mechanisms shared with DUP.
-  Once the document is forwarded to the correct collection (i.e., the correct TRA time segment), it skips directly to
-  DUP on the target collection and continues normally, potentially being routed again to the correct shard & replica
-  within the target collection.
-
-* If it belongs in the current collection (which is usually the case if processing events as they occur), the document
-  passes through to DUP. DUP does its normal collection-level processing that may involve routing the document
-  to another shard & replica.
-
-* If the timestamp on the document is more recent than the most recent TRA segment, then a new collection needs to be
-  added at the front of the TRA.
-  RAUP will create this collection, add it to the alias, and then forward the document to the collection it just created.
-  This can happen recursively if more than one collection needs to be created.
+The name of each collection is comprised of the TRA name and the start timestamp (UTC), with trailing zeros and symbols truncated.
+
+The collections list for a TRA is always reverse sorted, and thus the connection path of the request will route to the lead collection.
+Using `CloudSolrClient` is preferable as it can reduce the number of underlying physical HTTP requests by one.
+If you know that a particular set of documents to be delivered is going to a particular older collection then you could direct it there from the client side as an optimization but it's not necessary.
+`CloudSolrClient` does not (yet) do this.
+
+RAUP first reads TRA configuration from the alias properties when it is initialized.
+As it sees each document, it checks for changes to TRA properties, updates its cached configuration if needed, and then determines which collection the document belongs to:
+
+* If RAUP needs to send it to a time segment represented by a collection other than the one that the client chose to communicate with, then it will do so using mechanisms shared with DUP.
+Once the document is forwarded to the correct collection (i.e., the correct TRA time segment), it skips directly to DUP on the target collection and continues normally, potentially being routed again to the correct shard & replica within the target collection.
+
+* If it belongs in the current collection (which is usually the case if processing events as they occur), the document passes through to DUP.
+DUP does its normal collection-level processing that may involve routing the document to another shard & replica.
+
+* If the timestamp on the document is more recent than the most recent TRA segment, then a new collection needs to be added at the front of the TRA.
+RAUP will create this collection, add it to the alias, and then forward the document to the collection it just created.
+This can happen recursively if more than one collection needs to be created.
 +
-Each time a new collection is added, the oldest collections in the TRA are examined for possible deletion, if that has
-    been configured.
+Each time a new collection is added, the oldest collections in the TRA are examined for possible deletion, if that has been configured.
 All this happens synchronously, potentially adding seconds to the update request and indexing latency.
 +
-If `router.preemptiveCreateMath` is configured and if the document arrives within this window then it will occur
-asynchronously. See <<collection-aliasing.adoc#time-routed-alias-parameters,Time Routed Alias Parameters>> for more information.
+If `router.preemptiveCreateMath` is configured and if the document arrives within this window then it will occur asynchronously.
+See <<alias-management.adoc#time-routed-alias-parameters,Time Routed Alias Parameters>> for more information.
 
 Any other type of update like a commit or delete is routed by RAUP to all collections.
-Generally speaking, this is not a performance concern. When Solr receives a delete or commit wherein nothing is deleted
-or nothing needs to be committed, then it's pretty cheap.
+Generally speaking, this is not a performance concern.
+When Solr receives a delete or commit wherein nothing is deleted or nothing needs to be committed, then it's pretty cheap.
 
 ==== Limitations & Assumptions
 
-* Only *time* routed aliases are supported. If you instead have some other sequential number, you could fake it
-  as a time (e.g., convert to a timestamp assuming some epoch and increment).
+* Only *time* routed aliases are supported.
+If you instead have some other sequential number, you could fake it as a time (e.g., convert to a timestamp assuming some epoch and increment).
 +
 The smallest possible interval is one second.
-No other routing scheme is supported, although this feature was developed with considerations that it could be
-  extended/improved to other schemes.
+No other routing scheme is supported, although this feature was developed with considerations that it could be extended/improved to other schemes.
 
-* The underlying collections form a contiguous sequence without gaps. This will not be suitable when there are
-  large gaps in the underlying data, as Solr will insist that there be a collection for each increment. This
-  is due in part to Solr calculating the end time of each interval collection based on the timestamp of
-  the next collection, since it is otherwise not stored in any way.
+* The underlying collections form a contiguous sequence without gaps.
+This will not be suitable when there are large gaps in the underlying data, as Solr will insist that there be a collection for each increment.
+This is due in part to Solr calculating the end time of each interval collection based on the timestamp of the next collection, since it is otherwise not stored in any way.
 
-* Avoid sending updates to the oldest collection if you have also configured that old collections should be
-  automatically deleted. It could lead to exceptions bubbling back to the indexing client.
+* Avoid sending updates to the oldest collection if you have also configured that old collections should be automatically deleted.
+It could lead to exceptions bubbling back to the indexing client.
 
 === Category Routed Aliases
 
-Category Routed Aliases (CRAs) are a feature to manage aliases and a set of dependent collections
-based on the value of a single field.
+Category Routed Aliases (CRAs) are a feature to manage aliases and a set of dependent collections based on the value of a single field.
 
-CRAs automatically create new collections but because the partitioning is on categorical information rather than continuous
-numerically based values there's no logic for automatic deletion. This approach allows for simplified indexing of data
-that must be segregated into collections for cluster management or security reasons.
+CRAs automatically create new collections but because the partitioning is on categorical information rather than continuous numerically based values there's no logic for automatic deletion.
+This approach allows for simplified indexing of data that must be segregated into collections for cluster management or security reasons.
 
 ==== How It Works
 
-First you create a category routed alias using the <<collection-aliasing.adoc#createalias,CREATEALIAS>> command with the
-desired router settings.
- Most of the settings are editable at a later time using the <<collection-aliasing.adoc#aliasprop,ALIASPROP>> command.
+First you create a category routed alias using the <<alias-management.adoc#createalias,CREATEALIAS>> command with the desired router settings.
+Most of the settings are editable at a later time using the <<alias-management.adoc#aliasprop,ALIASPROP>> command.
 
-The alias will be created with a special place-holder collection which will always be named
- `myAlias\__CRA__NEW_CATEGORY_ROUTED_ALIAS_WAITING_FOR_DATA\__TEMP`. The first document indexed into the CRA
- will create a second collection named `myAlias__CRA__foo` (for a routed field value of `foo`). The second document
- indexed will cause the temporary place holder collection to be deleted. Thereafter collections will be created whenever
- a new value for the field is encountered.
+The alias will be created with a special place-holder collection which will always be named `myAlias\__CRA__NEW_CATEGORY_ROUTED_ALIAS_WAITING_FOR_DATA\__TEMP`.
+The first document indexed into the CRA will create a second collection named `myAlias__CRA__foo` (for a routed field value of `foo`). The second document
+ indexed will cause the temporary place holder collection to be deleted.
+Thereafter collections will be created whenever a new value for the field is encountered.
 
-CAUTION: To guard against runaway collection creation options for limiting the total number of categories, and for
-rejecting values that don't match, a regular expression parameter is provided (see <<collection-aliasing.adoc#category-routed-alias-parameters,Category Routed Alias Parameters>> for
-details).
+CAUTION: To guard against runaway collection creation options for limiting the total number of categories, and for rejecting values that don't match, a regular expression parameter is provided (see <<alias-management.adoc#category-routed-alias-parameters,Category Routed Alias Parameters>> for details).
 +
-Note that by providing very large or very permissive values for these options you are accepting the risk that
-garbled data could potentially create thousands of collections and bring your cluster to a grinding halt.
+Note that by providing very large or very permissive values for these options you are accepting the risk that garbled data could potentially create thousands of collections and bring your cluster to a grinding halt.
 
 Field values (and thus the collection names) are case sensitive.
 
-As elsewhere in Solr, manipulation and
-cleaning of the data is expected to be done by external processes before data is sent to Solr, with one exception.
-Throughout Solr there are limitations on the allowable characters in collection names. Any characters other than ASCII
-alphanumeric characters (`A-Za-z0-9`), hyphen (`-`) or underscore (`_`) are replaced with an underscore when calculating
-the collection name for a category. For a CRA named `myAlias` the following table shows how collection names would be
-calculated:
+As elsewhere in Solr, manipulation and cleaning of the data is expected to be done by external processes before data is sent to Solr, with one exception.
+Throughout Solr there are limitations on the allowable characters in collection names.
+Any characters other than ASCII alphanumeric characters (`A-Za-z0-9`), hyphen (`-`) or underscore (`_`) are replaced with an underscore when calculating
+the collection name for a category.
+For a CRA named `myAlias` the following table shows how collection names would be calculated:
 
 |===
 |Value |CRA Collection Name
@@ -226,78 +191,66 @@ calculated:
 
 |===
 
-Since collection creation can take upwards of 1-3 seconds, systems inserting data in a CRA should be
- constructed to handle such pauses whenever a new collection is created.
+Since collection creation can take upwards of 1-3 seconds, systems inserting data in a CRA should be constructed to handle such pauses whenever a new collection is created.
 Unlike time routed aliases, there is no way to predict the next value so such pauses are unavoidable.
 
-There is no automated means of removing a category. If a category needs to be removed from a CRA
-the following procedure is recommended:
+There is no automated means of removing a category.
+If a category needs to be removed from a CRA the following procedure is recommended:
 
 // TODO: This should have example instructions
-. Ensure that no documents with the value corresponding to the category to be removed will be sent
-   either by stopping indexing or by fixing the incoming data stream
+. Ensure that no documents with the value corresponding to the category to be removed will be sent either by stopping indexing or by fixing the incoming data stream
 . Modify the alias definition in ZooKeeper, removing the collection corresponding to the category.
-. Delete the collection corresponding to the category. Note that if the collection is not removed
-   from the alias first, this step will fail.
+. Delete the collection corresponding to the category.
+Note that if the collection is not removed from the alias first, this step will fail.
 
 ==== Limitations & Assumptions
 
 * CRAs are presently unsuitable for non-English data values due to the limits on collection names.
-  This can be worked around by duplicating the route value to a *_url safe_* Base64-encoded field
-  and routing on that value instead.
+This can be worked around by duplicating the route value to a *_url safe_* Base64-encoded field and routing on that value instead.
 
-* The check for the __CRA__ infix is independent of the regular expression validation and occurs after
-  the name of the collection to be created has been calculated. It may not be avoided and is necessary
-  to support future features.
+* The check for the __CRA__ infix is independent of the regular expression validation and occurs after the name of the collection to be created has been calculated.
+It may not be avoided and is necessary to support future features.
 
 === Dimensional Routed Aliases
 
-For cases where the desired segregation of of data relates to two fields and combination into a single
-field during indexing is impractical, or the TRA behavior is desired across multiple categories,
-Dimensional Routed aliases may be used. This feature has been designed to handle an arbitrary number
-and combination of category and time dimensions in any order, but users are cautioned to carefully
-consider the total number of collections that will result from such configurations. Collection counts
-in the high hundreds or low 1000's begin to pose significant challenges with ZooKeeper.
+For cases where the desired segregation of of data relates to two fields and combination into a single field during indexing is impractical, or the TRA behavior is desired across multiple categories, Dimensional Routed aliases may be used.
+This feature has been designed to handle an arbitrary number and combination of category and time dimensions in any order, but users are cautioned to carefully consider the total number of collections that will result from such configurations.
+Collection counts in the high hundreds or low 1000's begin to pose significant challenges with ZooKeeper.
 
-NOTE: DRA's are a new feature and presently only 2 dimensions are supported. More dimensions will
-be supported in the future (see https://issues.apache.org/jira/browse/SOLR-13628 for progress)
+NOTE: DRA's are a new feature and presently only 2 dimensions are supported.
+More dimensions will be supported in the future (see https://issues.apache.org/jira/browse/SOLR-13628 for progress)
 
 ==== How It Works
 
-First you create a dimensional routed alias with the desired router settings for each dimension. See the
-<<collection-aliasing.adoc#createalias,CREATEALIAS>> command documentation for details on how to specify the
-per-dimension configuration. Typical collection names will be of the form (example is for category x time example,
-with 30 minute intervals):
+First you create a dimensional routed alias with the desired router settings for each dimension.
+See the <<alias-management.adoc#createalias,CREATEALIAS>> command documentation for details on how to specify the per-dimension configuration.
+Typical collection names will be of the form (example is for category x time example, with 30 minute intervals):
 
-    myalias__CRA__someCategory__TRA__2019-07-01_00_30
+[source,text]
+myalias__CRA__someCategory__TRA__2019-07-01_00_30
 
 Note that the initial collection will be a throw away place holder for any DRA containing a category based dimension.
-Name generation for each sub-part of a collection name is identical to the corresponding potion of the component
-dimension type. (e.g., a category value generating __CRA__ or __TRA__ would still produce an error)
-
-WARNING: The prior warning about reindexing documents with different route value applies to every dimension of
-a DRA. DRA's are inappropriate for documents where categories or timestamps used in routing will change (this of
-course applies to other route values in future RA types too).
-
-As with all Routed Aliases, DRA's impose some costs if your data is not well behaved. In addition to the
-normal caveats of each component dimension there is a need for care in sending new categories after the DRA has been
-running for a while. Ordered Dimensions (time) behave slightly differently from Unordered (category) dimensions.
-Ordered dimensions rely on the iteration order of the collections in the alias and therefore cannot tolerate the
-generation of collection names out of order. The this means that of this is that when an ordered dimension such as time
-is a component of a DRA and the DRA experiences receipt of a document with a novel category with a time value
-corresponding to a time slice other than the starting time-slice for the time dimension, several collections will
-need to be created before the document can be indexed. This "new category effect" is identical to the behavior
-you would get with a TRA if you picked a start-date too far in the past.
-
-For example given a Dimensional[time,category]  DRA with start time of 2019-07-01T00:00:00Z the pattern of collections
-created for 4 documents might look like this:
+Name generation for each sub-part of a collection name is identical to the corresponding potion of the component dimension type (e.g., a category value generating __CRA__ or __TRA__ would still produce an error).
+
+WARNING: The prior warning about reindexing documents with different route value applies to every dimension of a DRA.
+DRA's are inappropriate for documents where categories or timestamps used in routing will change (this of course applies to other route values in future RA types too).
+
+As with all Routed Aliases, DRA's impose some costs if your data is not well behaved.
+In addition to the normal caveats of each component dimension there is a need for care in sending new categories after the DRA has been running for a while.
+Ordered Dimensions (time) behave slightly differently from Unordered (category) dimensions.
+Ordered dimensions rely on the iteration order of the collections in the alias and therefore cannot tolerate the generation of collection names out of order.
+This means that of this is that when an ordered dimension such as time is a component of a DRA and the DRA experiences receipt of a document with a novel category with a time value corresponding to a time-slice other than the starting time-slice for the time dimension, several collections will need to be created before the document can be indexed.
+This "new category effect" is identical to the behavior you would get with a TRA if you picked a start-date too far in the past.
+
+For example given a Dimensional[time,category] DRA with start time of 2019-07-01T00:00:00Z the pattern of collections created for 4 documents might look like this:
 
 *No documents*
 
 *Aliased collections:*
 
-    // temp avoids empty alias error conditions
-    myalias__TRA__2019-07-01__CRA__NEW_CATEGORY_ROUTED_ALIAS_WAITING_FOR_DATA_TEMP
+[source,text]
+// temp avoids empty alias error conditions
+myalias__TRA__2019-07-01__CRA__NEW_CATEGORY_ROUTED_ALIAS_WAITING_FOR_DATA_TEMP
 
 *Doc 1*
 
@@ -306,9 +259,10 @@ created for 4 documents might look like this:
 
 *Aliased collections:*
 
-    // temp retained to avoid empty alias during race with collection creation
-    myalias__TRA__2019-07-01__CRA__NEW_CATEGORY_ROUTED_ALIAS_WAITING_FOR_DATA_TEMP
-    myalias__TRA__2019-07-01__CRA__someCategory
+[source,text]
+// temp retained to avoid empty alias during race with collection creation
+myalias__TRA__2019-07-01__CRA__NEW_CATEGORY_ROUTED_ALIAS_WAITING_FOR_DATA_TEMP
+myalias__TRA__2019-07-01__CRA__someCategory
 
 *Doc 2*
 
@@ -317,10 +271,11 @@ created for 4 documents might look like this:
 
 *Aliased collections:*
 
-    // temp can now be deleted without risk of having an empty alias.
-    myalias__TRA__2019-07-01__CRA__someCategory
-    myalias__TRA__2019-07-01__CRA__otherCategory // 2 collections created in one update
-    myalias__TRA__2019-07-02__CRA__otherCategory
+[source,text]
+// temp can now be deleted without risk of having an empty alias.
+myalias__TRA__2019-07-01__CRA__someCategory
+myalias__TRA__2019-07-01__CRA__otherCategory // 2 collections created in one update
+myalias__TRA__2019-07-02__CRA__otherCategory
 
 *Doc 3*
 
@@ -329,12 +284,13 @@ created for 4 documents might look like this:
 
 *Aliased collections:*
 
-    myalias__TRA__2019-07-01__CRA__someCategory
-    myalias__TRA__2019-07-01__CRA__otherCategory
-    myalias__TRA__2019-07-02__CRA__otherCategory
-    myalias__TRA__2019-07-01__CRA__thirdCategory // 3 collections created in one update!
-    myalias__TRA__2019-07-02__CRA__thirdCategory
-    myalias__TRA__2019-07-03__CRA__thirdCategory
+[source,text]
+myalias__TRA__2019-07-01__CRA__someCategory
+myalias__TRA__2019-07-01__CRA__otherCategory
+myalias__TRA__2019-07-02__CRA__otherCategory
+myalias__TRA__2019-07-01__CRA__thirdCategory // 3 collections created in one update!
+myalias__TRA__2019-07-02__CRA__thirdCategory
+myalias__TRA__2019-07-03__CRA__thirdCategory
 
 *Doc 4*
 
@@ -343,26 +299,22 @@ created for 4 documents might look like this:
 
 *Aliased collections:*
 
-    myalias__TRA__2019-07-01__CRA__someCategory
-    myalias__TRA__2019-07-01__CRA__otherCategory
-    myalias__TRA__2019-07-02__CRA__otherCategory
-    myalias__TRA__2019-07-01__CRA__thirdCategory
-    myalias__TRA__2019-07-02__CRA__thirdCategory
-    myalias__TRA__2019-07-03__CRA__thirdCategory
-    myalias__TRA__2019-07-02__CRA__someCategory // 2 collections created in one update
-    myalias__TRA__2019-07-03__CRA__someCategory
+[source,text]
+myalias__TRA__2019-07-01__CRA__someCategory
+myalias__TRA__2019-07-01__CRA__otherCategory
+myalias__TRA__2019-07-02__CRA__otherCategory
+myalias__TRA__2019-07-01__CRA__thirdCategory
+myalias__TRA__2019-07-02__CRA__thirdCategory
+myalias__TRA__2019-07-03__CRA__thirdCategory
+myalias__TRA__2019-07-02__CRA__someCategory // 2 collections created in one update
+myalias__TRA__2019-07-03__CRA__someCategory
 
-Therefore the sweet spot for DRA's is for a data set with a well standardized set of dimensions that are not changing
-and where the full set of permutations occur regularly. If a new category is introduced at a later date and
-indexing latency is an important SLA feature, there are a couple strategies to mitigate this effect:
+Therefore the sweet spot for DRA's is for a data set with a well standardized set of dimensions that are not changing and where the full set of permutations occur regularly.
+If a new category is introduced at a later date and indexing latency is an important SLA feature, there are a couple strategies to mitigate this effect:
 
-* If the number of extra time slices to be created is not very large, then sending a single document out of band from
-  regular indexing, and waiting for collection creation to complete before allowing the new category to be sent via the
-  SLA constrained process.
+* If the number of extra time slices to be created is not very large, then sending a single document out of band from regular indexing, and waiting for collection creation to complete before allowing the new category to be sent via the SLA constrained process.
 
-* If the above procedure is likely to create an extreme number of collections, and the earliest possible document in
-  the new category is known, the start time for the time dimension may be adjusted using the
-  <<collection-aliasing.adoc#aliasprop,ALIASPROP>> command
+* If the above procedure is likely to create an extreme number of collections, and the earliest possible document in the new category is known, the start time for the time dimension may be adjusted using the <<alias-management.adoc#aliasprop,ALIASPROP>> command
 
 === Improvement Possibilities
 
@@ -371,8 +323,7 @@ Some _potential_ areas for improvement that _are not implemented yet_ are:
 
 * *TRAs*: Searches with time filters should only go to applicable collections.
 
-* *TRAs*: Ways to automatically optimize (or reduce the resources of) older collections that aren't expected to receive more
-  updates, and might have less search demand.
+* *TRAs*: Ways to automatically optimize (or reduce the resources of) older collections that aren't expected to receive more updates, and might have less search demand.
 
 * *CRAs*: Intrinsic support for non-English text via Base64 encoding.
 
@@ -380,34 +331,29 @@ Some _potential_ areas for improvement that _are not implemented yet_ are:
 
 * *DRAs*: Support for more than 2 dimensions.
 
-* `CloudSolrClient` could route documents to the correct collection based on the route value instead always picking the
-  latest/first.
+* `CloudSolrClient` could route documents to the correct collection based on the route value instead always picking the latest/first.
 
-* Presently only updates are routed and queries are distributed to all collections in the alias, but future
-  features might enable routing of the query to the single appropriate collection based on a special parameter or perhaps
-  a filter on the routed field.
+* Presently only updates are routed and queries are distributed to all collections in the alias, but future features might enable routing of the query to the single appropriate collection based on a special parameter or perhaps a filter on the routed field.
 
 * Collections might be constrained by their size instead of or in addition to time or category value.
-  This might be implemented as another type of routed alias, or possibly as an option on the existing routed aliases
+This might be implemented as another type of routed alias, or possibly as an option on the existing routed aliases
 
-* Option for deletion of aliases that also deletes the underlying collections in one step. Routed Aliases may quickly
-  create more collections than expected during initial testing. Removing them after such events is overly tedious.
+* Option for deletion of aliases that also deletes the underlying collections in one step.
+Routed Aliases may quickly create more collections than expected during initial testing.
+Removing them after such events is overly tedious.
 
 As always, patches and pull requests are welcome!
 
 == Collection Commands and Aliases
-Starting with version 8.1 SolrCloud supports using alias names in collection commands where normally a
-collection name is expected. This works only when the following criteria are satisfied:
+SolrCloud supports using alias names in collection commands where normally a collection name is expected.
+This works only when the following criteria are satisfied:
 
 * a request parameter `followAliases=true` is used
 * an alias must not refer to more than one collection
 * an alias must not refer to a <<Routed Aliases,Routed Alias>>
 
-If all criteria are satisfied then the command will resolve all alias names and operate on the collections the aliases
-refer to as if it was invoked with the collection names instead. Otherwise the command will not be executed and
-an exception will be thrown.
+If all criteria are satisfied then the command will resolve all alias names and operate on the collections the aliases refer to as if it was invoked with the collection names instead.
+Otherwise the command will not be executed and an exception will be thrown.
 
 The `followAliases=true` parameter should be used with care so that the resolved targets are indeed the intended ones.
-In case of multi-level aliases or shadow aliases (an alias with the same name as an existing collection but pointing
-to other collections) the use of this option is strongly discouraged because effects may be difficult to
-predict correctly.
+In case of multi-level aliases or shadow aliases (an alias with the same name as an existing collection but pointing to other collections) the use of this option is strongly discouraged because effects may be difficult to predict correctly.
diff --git a/solr/solr-ref-guide/src/analysis-screen.adoc b/solr/solr-ref-guide/src/analysis-screen.adoc
index ec075c7..39dc213 100644
--- a/solr/solr-ref-guide/src/analysis-screen.adoc
+++ b/solr/solr-ref-guide/src/analysis-screen.adoc
@@ -16,16 +16,56 @@
 // specific language governing permissions and limitations
 // under the License.
 
-The Analysis screen lets you inspect how data will be handled according to the field, field type and dynamic field configurations found in your Schema. You can analyze how content would be handled during indexing or during query processing and view the results separately or at the same time. Ideally, you would want content to be handled consistently, and this screen allows you to validate the settings in the field type or field analysis chains.
+Once you've <<field-type-definitions-and-properties.adoc#,defined a field type in your Schema>>, and specified the analysis steps that you want applied to it, you should test it out to make sure that it behaves the way you expect it to.
 
-Enter content in one or both boxes at the top of the screen, and then choose the field or field type definitions to use for analysis.
+Luckily, there is a very handy page in the Solr <<solr-admin-ui.adoc#,admin interface>> that lets you do just that.
+You can invoke the analyzer for any text field, provide sample input, and display the resulting token stream.
 
-image::images/analysis-screen/analysis_normal.png[image,height=400]
+For example, let's look at some of the "Text" field types available in the `bin/solr -e techproducts` example configuration, and use the Analysis Screen (`\http://localhost:8983/solr/#/techproducts/analysis`) to compare how the tokens produced at index time for the sentence "Running an Analyzer" match up with a slightly different query text of "run my analyzer".
 
-If you click the *Verbose Output* check box, you see more information, including more details on the transformations to the input (such as, convert to lower case, strip extra characters, etc.) including the raw bytes, type and detailed position information at each stage. The information displayed will vary depending on the settings of the field or field type. Each step of the process is displayed in a separate section, with an abbreviation for the tokenizer or filter that is applied in t [...]
+We can begin with `text_ws` - one of the most simplified Text field types available:
 
-image::images/analysis-screen/analysis_verbose.png[image,height=400]
+image::images/analysis-screen/analysis_normal.png[Normal Analysis]
 
-In the example screenshot above, several transformations are applied to the input "Running is a sport." The words "is" and "a" have been removed and the word "running" has been changed to its basic form, "run". This is because we are using the field type `text_en` in this scenario, which is configured to remove stop words (small words that usually do not provide a great deal of context) and "stem" terms when possible to find more possible matches (this is particularly helpful with plural [...]
+This shows a simple output of only the tokens produced by each step of analysis (a single step in this case).
+The tokenizer is shown with an abbreviation, hover or click on this to see the full name.
 
-The section <<understanding-analyzers-tokenizers-and-filters.adoc#,Understanding Analyzers, Tokenizers, and Filters>> describes in detail what each option is and how it may transform your data and the section <<running-your-analyzer.adoc#,Running Your Analyzer>> has specific examples for using the Analysis screen.
+If we enable *Verbose Output* by checking the box, more details are shown:
+
+image::images/analysis-screen/analysis_compare_0.png[Verbose Analysis]
+
+By looking at the start and end positions for each term, we can see that the only thing this field type does is tokenize text on whitespace.
+Notice in this image that the term "Running" has a start position of 0 and an end position of 7, while "an" has a start position of 8 and an end position of 10, and "Analyzer" starts at 11 and ends at 19.
+If the whitespace between the terms was also included, the count would be 21; since it is 19, we know that whitespace has been removed from this query.
+
+Note also that the indexed terms and the query terms are still very different.
+"Running" doesn't match "run", "Analyzer" doesn't match "analyzer" (to a computer), and obviously "an" and "my" are totally different words.
+If our objective is to allow queries like "run my analyzer" to match indexed text like "Running an Analyzer" then we will need to pick a different field type with index and query time text analysis that does more processing of the inputs.
+
+In particular we want:
+
+* Case insensitivity, so "Analyzer" and "analyzer" match.
+* Stemming, so words like "Run" and "Running" are considered equivalent terms.
+* Stop Word Pruning, so small words like "an" and "my" don't affect the query.
+
+For our next attempt, let's try the `text_general` field type:
+
+image::images/analysis-screen/analysis_compare_1.png[image]
+
+With the verbose output enabled, we can see how each stage of our new analyzers modify the tokens they receive before passing them on to the next stage.
+As we scroll down to the final output, we can see that we do start to get a match on "analyzer" from each input string, thanks to the "LCF" stage, which if you hover over with your mouse, you'll see is the `LowerCaseFilter`:
+
+image::images/analysis-screen/analysis_compare_2.png[image]
+
+The `text_general` field type is designed to be generally useful for any language, and it has definitely gotten us closer to our objective than `text_ws` from our first example by solving the problem of case sensitivity.
+It's still not quite what we are looking for because we don't see stemming or stopword rules being applied.
+So now let us try the `text_en` field type:
+
+image::images/analysis-screen/analysis_compare_3.png[image]
+
+Now we can see the "SF" (`StopFilter`) stage of the analyzers solving the problem of removing Stop Words ("an"), and as we scroll down, we also see the "PSF" (`PorterStemFilter`) stage apply stemming rules suitable for our English language input, such that the terms produced by our "index analyzer" and the terms produced by our "query analyzer" match the way we expect.
+
+image::images/analysis-screen/analysis_compare_4.png[image]
+
+
+At this point, we can continue to experiment with additional inputs, verifying that our analyzers produce matching tokens when we expect them to match, and disparate tokens when we do not expect them to match, as we iterate and tweak our field type configuration.
diff --git a/solr/solr-ref-guide/src/analytics-expression-sources.adoc b/solr/solr-ref-guide/src/analytics-expression-sources.adoc
index 7ac6812..4096afa 100644
--- a/solr/solr-ref-guide/src/analytics-expression-sources.adoc
+++ b/solr/solr-ref-guide/src/analytics-expression-sources.adoc
@@ -77,7 +77,7 @@ There are two possible ways of specifying constant strings, as shown below.
 === Dates
 
 Dates can be specified in the same way as they are in Solr queries. Just use ISO-8601 format.
-For more information, refer to the <<working-with-dates.adoc#,Working with Dates>> section.
+For more information, refer to the <<date-formatting-math.adoc#,Working with Dates>> section.
 
 * `2017-07-17T19:35:08Z`
 
diff --git a/solr/solr-ref-guide/src/analytics.adoc b/solr/solr-ref-guide/src/analytics.adoc
index 3670826..6bcbe96 100644
--- a/solr/solr-ref-guide/src/analytics.adoc
+++ b/solr/solr-ref-guide/src/analytics.adoc
@@ -57,7 +57,7 @@ For these changes to take effect, restart Solr or reload the core or collection.
 == Request Syntax
 
 An Analytics request is passed to Solr with the parameter `analytics` in a request sent to the
-<<requesthandlers-and-searchcomponents-in-solrconfig.adoc#search-handlers,Search Handler>>.
+<<requesthandlers-searchcomponents.adoc#search-handlers,Search Handler>>.
 Since the analytics request is sent inside of a search handler request, it will compute results based on the result set determined by the search handler.
 
 For example, this curl command encodes and POSTs a simple analytics request to the the search handler:
diff --git a/solr/solr-ref-guide/src/analyzers.adoc b/solr/solr-ref-guide/src/analyzers.adoc
index 36ca4c5..ac5b26e 100644
--- a/solr/solr-ref-guide/src/analyzers.adoc
+++ b/solr/solr-ref-guide/src/analyzers.adoc
@@ -20,7 +20,10 @@ An analyzer examines the text of fields and generates a token stream.
 
 Analyzers are specified as a child of the `<fieldType>` element in the `schema.xml` configuration file (in the same `conf/` directory as `solrconfig.xml`).
 
-In normal usage, only fields of type `solr.TextField` or `solr.SortableTextField` will specify an analyzer. The simplest way to configure an analyzer is with a single `<analyzer>` element whose class attribute is a fully qualified Java class name. The named class must derive from `org.apache.lucene.analysis.Analyzer`. For example:
+In normal usage, only fields of type `solr.TextField` or `solr.SortableTextField` will specify an analyzer.
+The simplest way to configure an analyzer is with a single `<analyzer>` element whose class attribute is a fully qualified Java class name.
+The named class must derive from `org.apache.lucene.analysis.Analyzer`.
+For example:
 
 [source,xml]
 ----
@@ -29,9 +32,13 @@ In normal usage, only fields of type `solr.TextField` or `solr.SortableTextField
 </fieldType>
 ----
 
-In this case a single class, `WhitespaceAnalyzer`, is responsible for analyzing the content of the named text field and emitting the corresponding tokens. For simple cases, such as plain English prose, a single analyzer class like this may be sufficient. But it's often necessary to do more complex analysis of the field content.
+In this case a single class, `WhitespaceAnalyzer`, is responsible for analyzing the content of the named text field and emitting the corresponding tokens.
+For simple cases, such as plain English prose, a single analyzer class like this may be sufficient.
+But it's often necessary to do more complex analysis of the field content.
 
-Even the most complex analysis requirements can usually be decomposed into a series of discrete, relatively simple processing steps. As you will soon discover, the Solr distribution comes with a large selection of tokenizers and filters that covers most scenarios you are likely to encounter. Setting up an analyzer chain is very straightforward; you specify a simple `<analyzer>` element (no class attribute) with child elements that name factory classes for the tokenizer and filters to use [...]
+Even the most complex analysis requirements can usually be decomposed into a series of discrete, relatively simple processing steps.
+As you will soon discover, the Solr distribution comes with a large selection of tokenizers and filters that covers most scenarios you are likely to encounter.
+Setting up an analyzer chain is very straightforward; you specify a simple `<analyzer>` element (no class attribute) with child elements that name factory classes for the tokenizer and filters to use, in the order you want them to run.
 
 For example:
 
@@ -71,21 +78,30 @@ Note that classes in the `org.apache.lucene.analysis` package may be referred to
 ====
 --
 
-In this case, no Analyzer class was specified on the `<analyzer>` element. Rather, a sequence of more specialized classes are wired together and collectively act as the Analyzer for the field. The text of the field is passed to the first item in the list (`solr.StandardTokenizerFactory`), and the tokens that emerge from the last one (`solr.EnglishPorterFilterFactory`) are the terms that are used for indexing or querying any fields that use the "nametext" `fieldType`.
+In this case, no Analyzer class was specified on the `<analyzer>` element.
+Rather, a sequence of more specialized classes are wired together and collectively act as the Analyzer for the field.
+The text of the field is passed to the first item in the list (`solr.StandardTokenizerFactory`), and the tokens that emerge from the last one (`solr.EnglishPorterFilterFactory`) are the terms that are used for indexing or querying any fields that use the "nametext" `fieldType`.
 
 .Field Values versus Indexed Terms
 [IMPORTANT]
 ====
-The output of an Analyzer affects the _terms_ indexed in a given field (and the terms used when parsing queries against those fields) but it has no impact on the _stored_ value for the fields. For example: an analyzer might split "Brown Cow" into two indexed terms "brown" and "cow", but the stored value will still be a single String: "Brown Cow"
+The output of an Analyzer affects the _terms_ indexed in a given field (and the terms used when parsing queries against those fields) but it has no impact on the _stored_ value for the fields.
+For example: an analyzer might split "Brown Cow" into two indexed terms "brown" and "cow", but the stored value will still be a single String: "Brown Cow"
 ====
 
 == Analysis Phases
 
-Analysis takes place in two contexts. At index time, when a field is being created, the token stream that results from analysis is added to an index and defines the set of terms (including positions, sizes, and so on) for the field. At query time, the values being searched for are analyzed and the terms that result are matched against those that are stored in the field's index.
+Analysis takes place in two contexts.
+At index time, when a field is being created, the token stream that results from analysis is added to an index and defines the set of terms (including positions, sizes, and so on) for the field.
+At query time, the values being searched for are analyzed and the terms that result are matched against those that are stored in the field's index.
 
-In many cases, the same analysis should be applied to both phases. This is desirable when you want to query for exact string matches, possibly with case-insensitivity, for example. In other cases, you may want to apply slightly different analysis steps during indexing than those used at query time.
+In many cases, the same analysis should be applied to both phases.
+This is desirable when you want to query for exact string matches, possibly with case-insensitivity, for example.
+In other cases, you may want to apply slightly different analysis steps during indexing than those used at query time.
 
-If you provide a simple `<analyzer>` definition for a field type, as in the examples above, then it will be used for both indexing and queries. If you want distinct analyzers for each phase, you may include two `<analyzer>` definitions distinguished with a type attribute. For example:
+If you provide a simple `<analyzer>` definition for a field type, as in the examples above, then it will be used for both indexing and queries.
+If you want distinct analyzers for each phase, you may include two `<analyzer>` definitions distinguished with a type attribute.
+For example:
 
 [.dynamic-tabs]
 --
@@ -129,21 +145,22 @@ If you provide a simple `<analyzer>` definition for a field type, as in the exam
 ====
 --
 
-In this theoretical example, at index time the text is tokenized, the tokens are set to lowercase, any that are not listed in `keepwords.txt` are discarded and those that remain are mapped to alternate values as defined by the synonym rules in the file `syns.txt`. This essentially builds an index from a restricted set of possible values and then normalizes them to values that may not even occur in the original text.
+In this theoretical example, at index time the text is tokenized, the tokens are set to lowercase, any that are not listed in `keepwords.txt` are discarded and those that remain are mapped to alternate values as defined by the synonym rules in the file `syns.txt`.
+This essentially builds an index from a restricted set of possible values and then normalizes them to values that may not even occur in the original text.
 
-At query time, the only normalization that happens is to convert the query terms to lowercase. The filtering and mapping steps that occur at index time are not applied to the query terms. Queries must then, in this example, be very precise, using only the normalized terms that were stored at index time.
+At query time, the only normalization that happens is to convert the query terms to lowercase.
+The filtering and mapping steps that occur at index time are not applied to the query terms.
+Queries must then, in this example, be very precise, using only the normalized terms that were stored at index time.
 
 === Analysis for Multi-Term Expansion
 
-In some types of queries (i.e., Prefix, Wildcard, Regex, etc.) the input provided
-by the user is not natural language intended for Analysis. Things like Synonyms
-or Stop word filtering do not work in a logical way in these types of Queries.
+In some types of queries (i.e., Prefix, Wildcard, Regex, etc.) the input provided by the user is not natural language intended for Analysis.
+Things like Synonyms or Stop word filtering do not work in a logical way in these types of Queries.
 
-When Solr needs to perform analysis for a query that results in multi-term
-expansion, then the `normalize` method is called for each factory in the filter
-chain.  Factories that provide filters that do not make sense in this context
-will return their inputs unchanged.  Normalization applies to both CharFilters
-and TokenFilters
+When Solr needs to perform analysis for a query that results in multi-term expansion, then the `normalize` method is called for each factory in the filter chain.
+Factories that provide filters that do not make sense in this context
+will return their inputs unchanged.
+Normalization applies to both CharFilters and TokenFilters.
 
 For most use cases, this provides the best possible behavior, but if you wish for absolute control over the analysis performed on these types of queries, you may explicitly define a `multiterm` analyzer to use, such as in the following example:
 
diff --git a/solr/solr-ref-guide/src/audit-logging.adoc b/solr/solr-ref-guide/src/audit-logging.adoc
index 3d299d2..86b3baf 100644
--- a/solr/solr-ref-guide/src/audit-logging.adoc
+++ b/solr/solr-ref-guide/src/audit-logging.adoc
@@ -20,12 +20,12 @@ Solr has the ability to log an audit trail of all HTTP requests entering the sys
 Audit loggers are pluggable to suit any possible format or log destination.
 
 [quote]
-An audit trail (also called audit log) is a security-relevant chronological record, set of records, and/or destination and source of records that provide documentary evidence of the sequence of activities that have affected at any time a specific operation, procedure, or event. (https://en.wikipedia.org/wiki/Audit_trail[Wikipedia])
+An audit trail (also called audit log) is a security-relevant chronological record, set of records, and/or destination and source of records that provide documentary evidence of the sequence of activities that have affected at any time a specific operation, procedure, event, or device. (https://en.wikipedia.org/wiki/Audit_trail[Wikipedia])
 
-== Configuration in security.json
+== Configuring Audit Logging
 Audit logging is configured in `security.json` under the `auditlogging` key.
 
-The example `security.json` below configures synchronous audit logging to Solr default log file.
+The example below uses plugin defaults to configure synchronous audit logging to Solr's default log file.
 
 [source,json]
 ----
@@ -36,7 +36,10 @@ The example `security.json` below configures synchronous audit logging to Solr d
 }
 ----
 
-By default any AuditLogger plugin configured will log asynchronously in the background to avoid slowing down the requests. To make audit logging happen synchronously, add the parameter `async: false`. For async logging, you may optionally also configure queue size, number of threads and whether it should block when the queue is full or discard events:
+By default any audit logging plugin will log asynchronously in the background to avoid slowing down the requests.
+To make audit logging happen synchronously, add the parameter `async` with a value of `false`.
+
+When using asynchronous logging, you may optionally also configure queue size, number of threads, and whether it should block when the queue is full or discard events:
 
 [source,json]
 ----
@@ -52,49 +55,130 @@ By default any AuditLogger plugin configured will log asynchronously in the back
 }
 ----
 
-The defaults are `async: true`, `blockAsync: false`, `queueSize: 4096`. The default for `numThreads` is 2, or if the server has more than 4 CPU-cores then we use CPU-cores/2.
+=== Audit Logging Parameters
+These parameters are:
+
+`class`::
++
+[%autowidth,frame=none]
+|===
+s|Required |Default: none
+|===
++
+The audit logging plugin class name.
+Either `solr.SolrLogAuditLoggingPlugin` or `solr.MultiDestinationAuditLogger` (described below in the section <<Chaining Multiple Loggers>>).
+
+`async`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: `true`
+|===
++
+Defines if events are logged asynchronously.
+This defaults to `true` to avoid slowing down requests.
+However, if you are confident in the performance characteristics of your system and need events logged synchronously, you can change this to `false`.
+
+`blockAsync`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: `false`
+|===
++
+Defines if requests should be blocked if the queue is full.
+The default of `false` will discard unlogged events.
+Only used when `async=true`.
+
+`numThreads`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: `2`
+|===
++
+The number of threads available to audit logging.
+If the number of CPU-cores available to the server is higher than 4, then the default is modified to `CPU-cores / 2`.
+Only used when `async=true`.
+
+`queueSize`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: `4096`
+|===
++
+The size of the queue.
+Only used when `async=true`.
+
+`eventTypes`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: `["REJECTED", "ANONYMOUS_REJECTED", "UNAUTHORIZED", "COMPLETED", "ERROR"]`
+|===
++
+The event types to log.
+See the section <<Event Types>> below for type options.
+
+`muteRules`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: none
+|===
++
+Defines the circumstances when events should not be logged (muted).
+Possible rules can exclude requests from certain users, IPs, paths, or request parameters.
+See the section <<Muting Certain Events>> below for mute rule options.
 
 [#audit-event-types]
 === Event Types
+
+The event types logged can be configured with the `eventTypes` parameter.
+By default only the final event types `REJECTED`, `ANONYMOUS_REJECTED`, `UNAUTHORIZED`, `COMPLETED` and `ERROR` are logged.
+
 These are the event types triggered by the framework:
 
 [%header,format=csv,separator=;]
 |===
 EventType;Usage
-AUTHENTICATED;User successfully authenticated
-REJECTED;Authentication request rejected
-ANONYMOUS;Request proceeds with unknown user
-ANONYMOUS_REJECTED;Request from unknown user rejected
-AUTHORIZED;Authorization succeeded
-UNAUTHORIZED;Authorization failed
-COMPLETED;Request completed successfully
-ERROR;Request was not executed due to an error
+`AUTHENTICATED`;User successfully authenticated
+`REJECTED`;Authentication request rejected
+`ANONYMOUS`;Request proceeds with unknown user
+`ANONYMOUS_REJECTED`;Request from unknown user rejected
+`AUTHORIZED`;Authorization succeeded
+`UNAUTHORIZED`;Authorization failed
+`COMPLETED`;Request completed successfully
+`ERROR`;Request was not executed due to an error
 |===
 
-By default only the final event types `REJECTED`, `ANONYMOUS_REJECTED`, `UNAUTHORIZED`, `COMPLETED` and `ERROR` are logged. What eventTypes are logged can be configured with the `eventTypes` configuration parameter.
-
 === Muting Certain Events
-The configuration parameter `muteRules` lets you mute logging for certain events. You may specify multiple rules and combination of rules that will cause muting. You can mute by request type, username, collection name, path, request parameters or IP address. We'll explain through examples:
 
-The below example will mute logging for all `SEARCH` requests as well as all requests made my user `johndoe` or from IP address `192.168.0.10`:
+The configuration parameter `muteRules` lets you mute logging for certain events.
+You may specify multiple rules and combination of rules that will cause muting.
+You can mute by request type, username, collection name, path, request parameters or IP address.
+
+The following example uses `muteRules` to mute audit logging for three categories of requests: any `SEARCH` requests, any requests made by user `johndoe`, and any requests from IP address `192.168.0.10`:
 
 [source,json]
 ----
 {
   "auditlogging":{
-    "class": "solr.SolrLogAuditLoggerPlugin"
+    "class": "solr.SolrLogAuditLoggerPlugin",
     "muteRules": [ "type:SEARCH", "user:johndoe", "ip:192.168.0.10" ]
   }
 }
 ----
 
-An mute rule may also be a list, in which case all must be true for muting to happen. The configuration below has three mute rules:
+A mute rule may also be a list, in which case all items in the list must be true for muting to happen.
+The configuration below has three mute rules:
 
 [source,json]
 ----
 {
   "auditlogging":{
-    "class": "solr.SolrLogAuditLoggerPlugin"
+    "class": "solr.SolrLogAuditLoggerPlugin",
     "muteRules": [
       "ip:192.168.0.10", <1>
       [ "path:/admin/collections", "param:action=LIST" ], <2>
@@ -104,24 +188,27 @@ An mute rule may also be a list, in which case all must be true for muting to ha
 }
 ----
 
-<1> The first will mute all events from client IP `192.168.0.10`
-<2> The second rule will mute collection admin requests with `action=LIST`
-<3> The third rule will mute collection admin requests for the collection named `test`
+<1> This will mute all events from client IP `192.168.0.10`.
+<2> This rule will mute Collection API requests with `action=LIST`.
+<3> The final rule will mute Collection API requests for the collection named `test`.
 
 Note how you can mix single string rules with lists of rules that must all match:
 
-*Valid mute rules are:*
+Options for mute rules are:
 
-* `type:<request-type>` (request-type by name: `ADMIN`, `SEARCH`, `UPDATE`, `STREAMING`, `UNKNOWN`)
-* `collection:<collection-name>` (collection by name)
-* `user:<userid>` (user by userid)
-* `path:</path/to/handler>` (request path relative to `/solr` or for search/update requests relative to collection. Path is prefix matched, i.e., `/admin` will mute any sub path as well.
-* `ip:<ip-address>` (IPv4-address)
-* `param:<param>=<value>` (request parameter)
+* `type:<request-type>`: A request-type by name: `ADMIN`, `SEARCH`, `UPDATE`, `STREAMING`, or `UNKNOWN`.
+* `collection:<collection-name>`: A collection by name.
+* `user:<userid>`: A user by userid.
+* `path:</path/to/handler>`: A request path relative to `/solr` or for search or update requests relative to collection.
+Path is prefix matched, i.e., `/admin` will mute any sub path as well.
+* `ip:<ip-address>`: An IPv4 address.
+* `param:<param>=<value>`: A request parameter.
+This will likely mostly be used in conjunction with the `path` rule, as shown in the example above.
 
 
 === Chaining Multiple Loggers
-Using the `MultiDestinationAuditLogger` you can configure multiple audit logger plugins in a chain, to log to multiple destinations, as follows:
+
+Using the `MultiDestinationAuditLogger` multiple audit logger plugins can be configured in a chain to log to multiple destinations.
 
 [source,json]
 ----
@@ -138,17 +225,25 @@ Using the `MultiDestinationAuditLogger` you can configure multiple audit logger
 }
 ----
 
+Note that logging to alternate destinations would need to be defined with a custom audit logging plugin.
+See the javadocs for the base class at {solr-javadocs}/core/org/apache/solr/security/AuditLoggerPlugin.html[AuditLoggerPlugin].
+
 == Metrics
-AuditLoggerPlugins record metrics about count and timing of log requests, as well as queue size for async loggers. The metrics keys are all recorded on the `SECURITY` category, and each metric name are prefixed with a scope of `/auditlogging` and the class name of the logger, e.g., `SolrLogAuditLoggerPlugin`. The individual metrics are:
-
-* `count` (type: meter. Records number and rate of audit logs done)
-* `errors` (type: meter. Records number and rate of errors)
-* `lost` (type: meter. Records number and rate of events lost due to queue full and `blockAsync=false`)
-* `requestTimes` (type: timer. Records latency and perceniles for logging performance)
-* `totalTime` (type: counter. Records total time spent)
-* `queueCapacity` (type: gauge. Records the max size of the async logging queue)
-* `queueSize` (type: gauge. Records the number of events currently waiting in the queue)
-* `queuedTime` (type: timer. Records the amount of time events waited in queue. Adding this with requestTimes you get total time from event to logging complete)
-* `async` (type: gauge. Tells whether this logger is in async mode)
-
-TIP: If you expect a very high request rate and have a slow audit logger plugin, you may see that the `queueSize` and `queuedTime` metrics increase, and in worst case start dropping events and see an increase in `lost` count. In this case you may want to increas the `numThreads` setting.
+
+Audit logging plugins record metrics about count and timing of log requests, as well as queue size for async loggers.
+The metrics keys are all recorded on the `SECURITY` category, and each metric name are prefixed with a scope of `/auditlogging` and the class name of the logger, e.g., `SolrLogAuditLoggerPlugin`.
+The individual metrics are:
+
+* `count`: (_meter_) Records number and rate of audit logs written.
+* `errors`: (_meter_) Records number and rate of errors.
+* `lost`: (_meter_) Records number and rate of events lost if the queue is full and `blockAsync=false`.
+* `requestTimes`: (_timer_) Records latency and percentiles for audit logging performance.
+* `totalTime`: (_counter_) Records total time spent logging.
+* `queueCapacity`: (_gauge_). Records the maximum size of the async logging queue.
+* `queueSize`: (_gauge_) Records the number of events currently waiting in the queue.
+* `queuedTime`: (_timer_) Records the amount of time events waited in queue.
+Adding this with the `requestTimes` metric will show the total time from event to logging complete.
+* `async`: (_gauge_) Tells whether this logger is in async mode.
+
+TIP: If you experience a very high request rate and have a slow audit logger plugin, you may see the `queueSize` and `queuedTime` metrics increase, and possibly start dropping events (shown by an increase in `lost` count).
+In this case you may want to increase the `numThreads` setting.
diff --git a/solr/solr-ref-guide/src/authentication-and-authorization-plugins.adoc b/solr/solr-ref-guide/src/authentication-and-authorization-plugins.adoc
index 965006b..62a38ac 100644
--- a/solr/solr-ref-guide/src/authentication-and-authorization-plugins.adoc
+++ b/solr/solr-ref-guide/src/authentication-and-authorization-plugins.adoc
@@ -1,5 +1,10 @@
-= Configuring Authentication, Authorization and Audit Logging
-:page-children: basic-authentication-plugin, hadoop-authentication-plugin, kerberos-authentication-plugin,  jwt-authentication-plugin, cert-authentication-plugin, rule-based-authorization-plugin, audit-logging
+= Configuring Authentication and Authorization
+:page-children: basic-authentication-plugin, \
+    kerberos-authentication-plugin,  \
+    jwt-authentication-plugin, \
+    cert-authentication-plugin, \
+    hadoop-authentication-plugin, \
+    rule-based-authorization-plugin
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -17,17 +22,20 @@
 // specific language governing permissions and limitations
 // under the License.
 
-Solr has security frameworks for supporting authentication, authorization and auditing of users. This allows for verifying a user's identity and for restricting access to resources in a Solr cluster.
+Solr has security frameworks for supporting authentication, authorization and auditing of users.
+This allows for verifying a user's identity and for restricting access to resources in a Solr cluster.
 
 Solr includes some plugins out of the box, and additional plugins can be developed using the authentication, authorization and audit logging frameworks described below.
 
-All authentication, authorization and audit logging plugins can work with Solr whether they are running in SolrCloud mode or standalone mode. All related configuration, including users and permission rules, are stored in a file named `security.json`. When using Solr in standalone mode, this file must be in the `$SOLR_HOME` directory (usually `server/solr`). When using SolrCloud, this file must be located in ZooKeeper.
+All authentication, authorization and audit logging plugins can work with Solr whether it is running as a cluster or a single-node installation.
+All related configuration, including users and permission rules, are stored in a file named `security.json`.
+When running Solr as a user-managed cluster or a single-node installation, this file must be in the `$SOLR_HOME` directory (usually `server/solr`).
+When using SolrCloud, this file must be located in ZooKeeper.
 
-The following section describes how to enable plugins with `security.json` and place them in the proper locations for your mode of operation.
+== Configuring security.json
 
-== Enable Plugins with security.json
-
-All of the information required to initialize either type of security plugin is stored in a `security.json` file. This file contains 3 sections, one each for authentication, authorization, and audit logging.
+All of the information required to initialize security plugins is stored in a `security.json` file.
+This file contains 3 sections, one each for authentication, authorization, and audit logging.
 
 .Sample security.json
 [source,json]
@@ -45,11 +53,14 @@ All of the information required to initialize either type of security plugin is
 }
 ----
 
-The `/security.json` file needs to be in the proper location before a Solr instance comes up so Solr starts with the security plugin enabled. See the section <<Using security.json with Solr>> below for information on how to do this.
+The `/security.json` file needs to be in the proper location before a Solr instance comes up so Solr starts with the security plugin enabled.
+See the section <<Using security.json with Solr>> below for information on how to do this.
 
-Depending on the plugin(s) in use, other information will be stored in `security.json` such as user information or rules to create roles and permissions. This information is added through the APIs for each plugin provided by Solr, or, in the case of a custom plugin, the approach designed by you.
+Depending on the plugin(s) in use, other information will be stored in `security.json` such as user information or rules to create roles and permissions.
+This information is added through the APIs for each plugin provided by Solr, or, in the case of a custom plugin, the approach designed by you.
 
-Here is a more detailed `security.json` example. In this, the Basic authentication and rule-based authorization plugins are enabled, and some data has been added:
+Here is a more detailed `security.json` example.
+In this, the Basic authentication and rule-based authorization plugins are enabled, and some data has been added:
 
 [source,json]
 ----
@@ -68,7 +79,7 @@ Here is a more detailed `security.json` example. In this, the Basic authenticati
 
 == Using security.json with Solr
 
-=== In SolrCloud Mode
+=== In a SolrCloud Cluster
 
 While configuring Solr to use an authentication or authorization plugin, you will need to upload a `security.json` file to ZooKeeper.
 
@@ -79,7 +90,8 @@ Create the file `security.json` with the contents:
 {"authentication": {"class": "org.apache.solr.security.KerberosPlugin"}}
 ----
 
-Note that this example defines the `KerberosPlugin` for authentication. You will want to modify this section as appropriate for the plugin you are using.
+Note that this example defines the `KerberosPlugin` for authentication.
+You will want to modify this section as appropriate for the plugin you are using.
 
 Then use the `bin/solr zk` command to upload the file:
 
@@ -88,27 +100,32 @@ Then use the `bin/solr zk` command to upload the file:
 >bin/solr zk cp ./security.json zk:security.json -z localhost:2181
 ----
 
-NOTE: If you have defined `ZK_HOST` in `solr.in.sh`/`solr.in.cmd` (see <<setting-up-an-external-zookeeper-ensemble#updating-solr-include-files,instructions>>) you can omit `-z <zk host string>` from the above command.
+NOTE: If you have defined `ZK_HOST` in `solr.in.sh`/`solr.in.cmd` (see <<zookeeper-ensemble#updating-solr-include-files,instructions>>) you can omit `-z <zk host string>` from the above command.
 
 [WARNING]
 ====
-Whenever you use any security plugins and store `security.json` in ZooKeeper, we highly recommend that you implement access control in your ZooKeeper nodes. Information about how to enable this is available in the section <<zookeeper-access-control.adoc#,ZooKeeper Access Control>>.
+Whenever you use any security plugins and store `security.json` in ZooKeeper, we highly recommend that you implement access control in your ZooKeeper nodes.
+Information about how to enable this is available in the section <<zookeeper-access-control.adoc#,ZooKeeper Access Control>>.
 ====
 
-Once `security.json` has been uploaded to ZooKeeper, you should use the appropriate APIs for the plugins you're using to update it. You can edit it manually, but you must take care to remove any version data so it will be properly updated across all ZooKeeper nodes. The version data is found at the end of the `security.json` file, and will appear as the letter "v" followed by a number, such as `{"v":138}`.
+Once `security.json` has been uploaded to ZooKeeper, you should use the appropriate APIs for the plugins you're using to update it.
+You can edit it manually, but you must take care to remove any version data so it will be properly updated across all ZooKeeper nodes.
+The version data is found at the end of the `security.json` file, and will appear as the letter "v" followed by a number, such as `{"v":138}`.
 
-=== In Standalone Mode
+=== In a User-Managed Cluster or Single-Node Installation
 
-When running Solr in standalone mode, you need to create the `security.json` file and put it in the `$SOLR_HOME` directory for your installation (this is the same place you have located `solr.xml` and is usually `server/solr`).
+When running Solr in either a user-managed cluster or a single-node installation, you create the `security.json` file and put it in the `$SOLR_HOME` directory for your installation (this is the same place you have located `solr.xml` and is usually `server/solr`).
 
-If you are using <<legacy-scaling-and-distribution.adoc#,Legacy Scaling and Distribution>>, you will need to place `security.json` on each node of the cluster.
+With a user-managed cluster, you will need to place `security.json` on each node of the cluster.
 
-You can use the authentication and authorization APIs, but if you are using the legacy scaling model, you will need to make the same API requests on each node separately. You can also edit `security.json` by hand if you prefer.
+You can use the authentication and authorization APIs, but with a user-managed cluster you will need to make the same API requests on each node separately.
+You can also edit `security.json` by hand if you prefer.
 
 [#configuring-authentication]
 == Authentication
 
-Authentication plugins help in securing the endpoints of Solr by authenticating incoming requests. A custom plugin can be implemented by extending the AuthenticationPlugin class.
+Authentication plugins help in securing the endpoints of Solr by authenticating incoming requests.
+A custom plugin can be implemented by extending the AuthenticationPlugin class.
 
 An authentication plugin consists of two parts:
 
@@ -117,8 +134,8 @@ An authentication plugin consists of two parts:
 
 === Enabling an Authentication Plugin
 
-* Specify the authentication plugin in `/security.json` as in this example:
-+
+Specify the authentication plugin in `/security.json` as in this example:
+
 [source,json]
 ----
 {
@@ -127,8 +144,10 @@ An authentication plugin consists of two parts:
     "other_data" : "..."}
 }
 ----
-* All of the content in the authentication block of `security.json` would be passed on as a map to the plugin during initialization.
-* An authentication plugin can also be used with a standalone Solr instance by passing in `-DauthenticationPlugin=<plugin class name>` during startup.
+
+All of the content in the `authentication` block of `security.json` will be passed as a map to the plugin during initialization.
+
+An authentication plugin can also be used with a single-node Solr instance by passing in `-DauthenticationPlugin=<plugin class name>` during startup.
 
 Currently available authentication plugins are:
 
@@ -141,8 +160,8 @@ An authorization plugin can be written for Solr by extending the {solr-javadocs}
 
 === Enabling an Authorization Plugin
 
-* Make sure that the plugin implementation is in the classpath.
-* The plugin can then be initialized by specifying the same in `security.json` in the following manner:
+The plugin implementation must be in the classpath.
+The plugin can then be initialized by specifying the same in `security.json` in the following manner:
 
 [source,json]
 ----
@@ -153,45 +172,52 @@ An authorization plugin can be written for Solr by extending the {solr-javadocs}
 }
 ----
 
-All of the content in the `authorization` block of `security.json` would be passed on as a map to the plugin during initialization.
+All of the content in the `authorization` block of `security.json` will be passed on as a map to the plugin during initialization.
 
 [IMPORTANT]
 ====
-Reloading the plugin isn't yet supported and requires a restart of the Solr installation (meaning, the JVM should be restarted, not simply a core reload).
+Reloading a plugin isn't yet supported and requires a restart of the Solr installation (meaning, the JVM should be restarted, not simply a core reload).
 ====
 
 Currently available authorization plugins are:
 
 include::securing-solr.adoc[tag=list-of-authorization-plugins]
 
-[#configuring-audit-logging]
-== Audit Logging
-
-<<audit-logging.adoc#,Audit logging>> plugins help you keep an audit trail of events happening in your Solr cluster.
-Audit logging may e.g., ship data to an external audit service.
-A custom plugin can be implemented by extending the `AuditLoggerPlugin` class.
-
 == Authenticating in the Admin UI
 
-Whenever an authentication plugin is enabled, authentication is also required for all or some operations in the Admin UI. The Admin UI is an AngularJS application running inside your browser, and is treated as any other external client by Solr.
+Whenever an authentication plugin is enabled, authentication is also required for all or some operations in the Admin UI.
+The Admin UI is an AngularJS application running inside your browser, and is treated as any other external client by Solr.
 
 When authentication is required the Admin UI will presented you with a login dialogue. The authentication plugins currently supported by the Admin UI are:
 
 * <<basic-authentication-plugin.adoc#,Basic Authentication Plugin>>
 * <<jwt-authentication-plugin.adoc#,JWT Authentication Plugin>>
 
-If your plugin of choice is not supported, the Admin UI will still let you perform unrestricted operations, while for restricted operations you will need to interact with Solr by sending HTTP requests instead of through the graphical user interface of the Admin UI. All operations supported by Admin UI can be performed through Solr's RESTful APIs.
+If your plugin of choice is not supported, the Admin UI will still let you perform unrestricted operations, while for restricted operations you will need to interact with Solr by sending HTTP requests instead of through the graphical user interface of the Admin UI.
+All operations supported by Admin UI can be performed through Solr's APIs.
 
 == Securing Inter-Node Requests
 
-There are a lot of requests that originate from the Solr nodes itself. For example, requests from overseer to nodes, recovery threads, etc. We call these 'inter-node' request. Solr has a special built-in `PKIAuthenticationPlugin` (see below) that will always be available to secure inter-node traffic.
+There are a lot of requests that originate from the Solr nodes itself.
+For example, requests from overseer to nodes, recovery threads, etc.
+We call these 'inter-node' requests.
+Solr has a built-in `PKIAuthenticationPlugin` (described below) that is always  available to secure inter-node traffic.
 
-Each Authentication plugin may also decide to secure inter-node requests on its own. They may do this through the so-called `HttpClientBuilder` mechanism, or they may alternatively choose on a per-request basis whether to delegate to PKI or not by overriding a `interceptInternodeRequest()` method from the base class, where any HTTP headers can be set.
+Each Authentication plugin may also decide to secure inter-node requests on its own.
+They may do this through the so-called `HttpClientBuilder` mechanism, or they may alternatively choose on a per-request basis whether to delegate to PKI or not by overriding a `interceptInternodeRequest()` method from the base class, where any HTTP headers can be set.
 
 === PKIAuthenticationPlugin
 
-The `PKIAuthenticationPlugin` provides a built-in authentication mechanism where each Solr node is a super user and is fully trusted by other Solr nodes through the use of Public Key Infrastructure (PKI). Each Authentication plugn may choose to delegate all or some inter-node traffic to the PKI plugin.
+The `PKIAuthenticationPlugin` provides a built-in authentication mechanism where each Solr node is a super user and is fully trusted by other Solr nodes through the use of Public Key Infrastructure (PKI).
+Each Authentication plugin may choose to delegate all or some inter-node traffic to the PKI plugin.
 
-For each outgoing request `PKIAuthenticationPlugin` adds a special header `'SolrAuth'` which carries the timestamp and principal encrypted using the private key of that node. The public key is exposed through an API so that any node can read it whenever it needs it. Any node who gets the request with that header, would get the public key from the sender and decrypt the information. If it is able to decrypt the data, the request trusted. It is invalid if the timestamp is more than 5 secs  [...]
+For each outgoing request `PKIAuthenticationPlugin` adds a special header `'SolrAuth'` which carries the timestamp and principal encrypted using the private key of that node.
+The public key is exposed through an API so that any node can read it whenever it needs it.
+Any node who gets the request with that header, would get the public key from the sender and decrypt the information.
+If it is able to decrypt the data, the request trusted.
+It is invalid if the timestamp is more than 5 seconds old.
+This assumes that the clocks of different nodes in the cluster are synchronized.
+Only traffic from other Solr nodes registered with ZooKeeper is trusted.
 
-The timeout is configurable through a system property called `pkiauth.ttl`. For example, if you wish to bump up the time-to-live to 10 seconds (10000 milliseconds), start each node with a property `'-Dpkiauth.ttl=10000'`.
+The timeout is configurable through a system property called `pkiauth.ttl`.
+For example, if you wish to increase the time-to-live to 10 seconds (10000 milliseconds), start each node with a property `'-Dpkiauth.ttl=10000'`.
diff --git a/solr/solr-ref-guide/src/making-and-restoring-backups.adoc b/solr/solr-ref-guide/src/backup-restore.adoc
similarity index 78%
rename from solr/solr-ref-guide/src/making-and-restoring-backups.adoc
rename to solr/solr-ref-guide/src/backup-restore.adoc
index 4a156f1..5c05cfd 100644
--- a/solr/solr-ref-guide/src/making-and-restoring-backups.adoc
+++ b/solr/solr-ref-guide/src/backup-restore.adoc
@@ -1,4 +1,4 @@
-= Making and Restoring Backups
+= Backup and Restore
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -18,31 +18,42 @@
 
 If you are worried about data loss, and of course you _should_ be, you need a way to back up your Solr indexes so that you can recover quickly in case of catastrophic failure.
 
-Solr provides two approaches to backing up and restoring Solr cores or collections, depending on how you are running Solr. If you run in SolrCloud mode, you will use the Collections API. If you run Solr in standalone mode, you will use the replication handler.
+Solr provides two approaches to backing up and restoring Solr cores or collections, depending on how you are running Solr.
+If you run a SolrCloud cluster, you will use the Collections API.
+If you run a user-managed cluster or a single-node installation, you will use the replication handler.
 
 [NOTE]
 ====
-Backups (and Snapshots) capture data that has been <<near-real-time-searching.adoc#commits-and-searching,_hard_ commited>>. Commiting changes using `softCommit=true` may result in changes that are visible in search results but not included in subsequent backups.
+Backups (and Snapshots) capture data that has been <<commits-transaction-logs.adoc#hard-commits-vs-soft-commits,hard commited>>.
+Committing changes using `softCommit=true` may result in changes that are visible in search results but not included in subsequent backups.
 
 Likewise, committing changes using `openSearcher=false` may result in changes committed to disk and included in subsequent backups, even if they are not currently visible in search results.
 ====
 
-== SolrCloud Backups
+== SolrCloud Clusters
 
-Support for backups when running SolrCloud is provided with the <<collection-management.adoc#,Collections API>>. This allows the backups to be generated across multiple shards, and restored to the same number of shards and replicas as the original collection.
+Support for backups in SolrCloud is provided with the <<collection-management.adoc#,Collections API>>.
+This allows the backups to be generated across multiple shards, and restored to the same number of shards and replicas as the original collection.
 
 NOTE: SolrCloud Backup/Restore requires a shared file system mounted at the same path on all nodes, or HDFS.
 
 Four different API commands are supported:
 
-* `action=BACKUP`: This command backs up Solr indexes and configurations. More information is available in the section <<collection-management.adoc#backup,Backup Collection>>.
-* `action=RESTORE`: This command restores Solr indexes and configurations. More information is available in the section <<collection-management.adoc#restore,Restore Collection>>.
-* `action=LISTBACKUP`: This command lists the backup points available at a specified location, displaying metadata for each.  More information is available in the section <<collection-management.adoc#listbackup,List Backups>>.
-* `action=DELETEBACKUP`: This command allows deletion of backup files or whole backups.  More information is available in the section <<collection-management.adoc#deletebackup,Delete Backups>>.
+* `action=BACKUP`: This command backs up Solr indexes and configurations.
+More information is available in the section <<collection-management.adoc#backup,Backup Collection>>.
+* `action=RESTORE`: This command restores Solr indexes and configurations.
+More information is available in the section <<collection-management.adoc#restore,Restore Collection>>.
+* `action=LISTBACKUP`: This command lists the backup points available at a specified location, displaying metadata for each.
+More information is available in the section <<collection-management.adoc#listbackup,List Backups>>.
+* `action=DELETEBACKUP`: This command allows deletion of backup files or whole backups.
+More information is available in the section <<collection-management.adoc#deletebackup,Delete Backups>>.
 
-== Standalone Mode Backups
+== User-Managed Clusters and Single-Node Installations
 
-Backups and restoration uses Solr's replication handler. Out of the box, Solr includes implicit support for replication so this API can be used. Configuration of the replication handler can, however, be customized by defining your own replication handler in `solrconfig.xml`. For details on configuring the replication handler, see the section <<index-replication.adoc#configuring-the-replicationhandler,Configuring the ReplicationHandler>>.
+Backups and restoration uses Solr's replication handler.
+Out of the box, Solr includes implicit support for replication so this API can be used.
+Configuration of the replication handler can, however, be customized by defining your own replication handler in `solrconfig.xml`.
+For details on configuring the replication handler, see the section <<user-managed-index-replication.adoc#configuring-the-replicationhandler,Configuring the ReplicationHandler>>.
 
 === Backup API
 
@@ -56,25 +67,63 @@ You can trigger a back-up with an HTTP command like this (replace "gettingstarte
 http://localhost:8983/solr/gettingstarted/replication?command=backup
 ----
 
-The `backup` command is an asynchronous call, and it will represent data from the latest index commit point. All indexing and search operations will continue to be executed against the index as usual.
+The `backup` command is an asynchronous call, and it will represent data from the latest index commit point.
+All indexing and search operations will continue to be executed against the index as usual.
 
-Only one backup call can be made against a core at any point in time. While an ongoing backup operation is happening subsequent calls for restoring will throw an exception.
+Only one backup call can be made against a core at one time.
+While an ongoing backup operation is happening subsequent calls for restoring will throw an exception.
 
 The backup request can also take the following additional parameters:
 
 `location`::
-The path where the backup will be created. If the path is not absolute then the backup path will be relative to Solr's instance directory.
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: none
+|===
++
+The path where the backup will be created.
+If the path is not absolute then the backup path will be relative to Solr's instance directory.
 
 `name`::
-The snapshot will be created in a directory called `snapshot.<name>`. If a name is not specified then the directory name will have the following format: `snapshot.<yyyyMMddHHmmssSSS>`.
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: none
+|===
++
+The snapshot will be created in a directory called `snapshot.<name>`.
+If a name is not specified then the directory name will have the following format: `snapshot.<_yyyyMMddHHmmssSSS_>`.
 
 `numberToKeep`::
-The number of backups to keep. If `maxNumberOfBackups` has been specified on the replication handler in `solrconfig.xml`, `maxNumberOfBackups` is always used and attempts to use `numberToKeep` will cause an error. Also, this parameter is not taken into consideration if the backup name is specified. More information about `maxNumberOfBackups` can be found in the section <<index-replication.adoc#configuring-the-replicationhandler,Configuring the ReplicationHandler>>.
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: none
+|===
++
+The number of backups to keep.
+If `maxNumberOfBackups` has been specified on the replication handler in `solrconfig.xml`, `maxNumberOfBackups` is always used and attempts to use `numberToKeep` will cause an error.
+Also, this parameter is not taken into consideration if the backup name is specified.
+More information about `maxNumberOfBackups` can be found in the section <<user-managed-index-replication.adoc#configuring-the-replicationhandler,Configuring the ReplicationHandler>>.
 
 `repository`::
-The name of the repository to be used for the backup. If no repository is specified then the local filesystem repository will be used automatically.
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: none
+|===
++
+The name of the repository to be used for the backup.
+If no repository is specified then the local filesystem repository will be used automatically.
 
 `commitName`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: none
+|===
++
 The name of the commit which was used while taking a snapshot using the CREATESNAPSHOT command.
 
 === Backup Status
@@ -113,20 +162,44 @@ You can restore from a backup with a command like this:
 http://localhost:8983/solr/gettingstarted/replication?command=restore&name=backup_name
 ----
 
-This will restore the named index snapshot into the current core. Searches will start reflecting the snapshot data once the restore is complete.
+This will restore the named index snapshot into the current core.
+Searches will start reflecting the snapshot data once the restore is complete.
 
 The `restore` request can take these additional parameters:
 
 `location`::
-The location of the backup snapshot file. If not specified, it looks for backups in Solr's data directory.
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: none
+|===
++
+The location of the backup snapshot file.
+If not specified, it looks for backups in Solr's data directory.
 
 `name`::
-The name of the backup index snapshot to be restored. If the name is not provided it looks for backups with `snapshot.<timestamp>` format in the location directory. It picks the latest timestamp backup in that case.
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: none
+|===
++
+The name of the backup index snapshot to be restored.
+If the name is not provided it looks for backups with `snapshot.<timestamp>` format in the location directory.
+It picks the latest timestamp backup in that case.
 
 `repository`::
-The name of the repository to be used for the backup. If no repository is specified then the local filesystem repository will be used automatically.
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: none
+|===
++
+The name of the repository to be used for the backup.
+If no repository is specified then the local filesystem repository will be used automatically.
 
-The `restore` command is an asynchronous call. Once the restore is complete the data reflected will be of the backed up index which was restored.
+The `restore` command is an asynchronous call.
+Once the restore is complete the data reflected will be of the backed up index which was restored.
 
 Only one `restore` call can can be made against a core at one point in time. While an ongoing restore operation is happening subsequent calls for restoring will throw an exception.
 
@@ -159,7 +232,8 @@ The status value can be "In Progress", "success" or "failed". If it failed then
 
 === Create Snapshot API
 
-The snapshot functionality is different from the backup functionality as the index files aren't copied anywhere. The index files are snapshotted in the same index directory and can be referenced while taking backups.
+The snapshot functionality is different from the backup functionality as the index files aren't copied anywhere.
+The index files are snapshotted in the same index directory and can be referenced while taking backups.
 
 You can trigger a snapshot command with an HTTP command like this (replace "techproducts" with the name of the core you are working with):
 
@@ -172,11 +246,31 @@ http://localhost:8983/solr/admin/cores?action=CREATESNAPSHOT&core=techproducts&c
 The `CREATESNAPSHOT` request parameters are:
 
 `commitName`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: none
+|===
++
 The name to store the snapshot as.
 
-`core`:: The name of the core to perform the snapshot on.
+`core`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: none
+|===
++
+The name of the core to perform the snapshot on.
 
-`async`:: Request ID to track this action which will be processed asynchronously.
+`async`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: none
+|===
++
+Request ID to track this action which will be processed asynchronously.
 
 === List Snapshot API
 
@@ -193,9 +287,21 @@ http://localhost:8983/solr/admin/cores?action=LISTSNAPSHOTS&core=techproducts&co
 The list snapshot request parameters are:
 
 `core`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: none
+|===
++
 The name of the core to whose snapshots we want to list.
 
 `async`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: none
+|===
++
 Request ID to track this action which will be processed asynchronously.
 
 === Delete Snapshot API
@@ -213,12 +319,30 @@ http://localhost:8983/solr/admin/cores?action=DELETESNAPSHOT&core=techproducts&c
 The delete snapshot request parameters are:
 
 `commitName`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: none
+|===
++
 Specify the commit name to be deleted.
 
 `core`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: none
+|===
++
 The name of the core whose snapshot we want to delete.
 
 `async`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: none
+|===
++
 Request ID to track this action which will be processed asynchronously.
 
 == Backup/Restore Storage Repositories
diff --git a/solr/solr-ref-guide/src/basic-authentication-plugin.adoc b/solr/solr-ref-guide/src/basic-authentication-plugin.adoc
index fd7c724..5751f67 100644
--- a/solr/solr-ref-guide/src/basic-authentication-plugin.adoc
+++ b/solr/solr-ref-guide/src/basic-authentication-plugin.adoc
@@ -16,19 +16,20 @@
 // specific language governing permissions and limitations
 // under the License.
 
-Solr can support Basic authentication for users with the use of the BasicAuthPlugin.
+Solr can support Basic authentication for users with the use of the `BasicAuthPlugin`.
 
-An authorization plugin is also available to configure Solr with permissions to perform various activities in the system. The authorization plugin is described in the section <<rule-based-authorization-plugin.adoc#,Rule-Based Authorization Plugin>>.
+This plugin only provides user authentication.
+To control user permissions, you may need to configure an authorization plugin as described in the section <<rule-based-authorization-plugin.adoc#,Rule-Based Authorization Plugin>>.
 
 == Enable Basic Authentication
 
-To use Basic authentication, you must first create a `security.json` file. This file and where to put it is described in detail in the section <<authentication-and-authorization-plugins.adoc#enable-plugins-with-security-json,Enable Plugins with security.json>>.
+To use Basic authentication, you must first create a `security.json` file.
+This file and where to put it is described in detail in the section <<authentication-and-authorization-plugins.adoc#configuring-security-json,Configuring security.json>>.
 
-For Basic authentication, the `security.json` file must have an `authentication` part which defines the class being used for authentication. Usernames and passwords (as a sha256(password+salt) hash) could be added when the file is created, or can be added later with the Basic authentication API, described below.
+For Basic authentication, `security.json` must have an `authentication` block which defines the class being used for authentication.
+Usernames and passwords (as a sha256(password+salt) hash) could be added when the file is created, or can be added later with the Authentication API, described below.
 
-The `authorization` part is not related to Basic authentication, but is a separate authorization plugin designed to support fine-grained user access control. For more information, see the section <<rule-based-authorization-plugin.adoc#,Rule-Based Authorization Plugin>>.
-
-An example `security.json` showing both sections is shown below to show how these plugins can work together:
+An example `security.json` showing `authentication` and `authorization` blocks is shown below to show how authentication and authorization plugins can work together:
 
 [source,json]
 ----
@@ -43,51 +44,62 @@ An example `security.json` showing both sections is shown below to show how thes
 "authorization":{
    "class":"solr.RuleBasedAuthorizationPlugin",
    "permissions":[{"name":"security-edit",
-      "role":"admin"}], <6>
-   "user-role":{"solr":"admin"} <7>
+      "role":"admin"}],
+   "user-role":{"solr":"admin"}
 }}
 ----
 
-There are several things defined in this file:
+There are several options defined in this example:
 
-<1> Basic authentication and rule-based authorization plugins are enabled.
+<1> The first block defines the authentication plugin to be used and its parameters.
 <2> The parameter `"blockUnknown":true` means that unauthenticated requests are not allowed to pass through.
 <3> A user called 'solr', with a password `'SolrRocks'` has been defined.
 <4> We override the `realm` property to display another text on the login prompt.
 <5> The parameter `"forwardCredentials":false` means we let Solr's PKI authenticaion handle distributed request instead of forwarding the Basic Auth header.
-<6> The 'admin' role has been defined, and it has permission to edit security settings.
-<7> The 'solr' user has been defined to the 'admin' role.
 
-Save your settings to a file called `security.json` locally. If you are using Solr in standalone mode, you should put this file in `$SOLR_HOME`.
+Save your settings to a file called `security.json` locally.
+If you are using Solr in single-node installation, you should put this file in `$SOLR_HOME`.
 
-If `blockUnknown` does not appear in the `security.json` file, it will default to `true`. This has the effect of requiring authentication for HTTP access to Solr. In some cases, you may not want authentication after enabling the plugin; for example, if you want to have `security.json` in place but aren't ready to enable authentication. However, you will want to ensure that `blockUnknown` is set to `true` or omitted entirely in order for authentication to be enforced for all requests to y [...]
+If `blockUnknown` is not defined in the `security.json` file, it will default to `true`.
+This has the effect of requiring authentication for HTTP access to Solr.
+In some cases, you may not want authentication after enabling the plugin; for example, if you want to have `security.json` in place but aren't ready to enable authentication.
+However, you will want to ensure that `blockUnknown` is set to `true` or omitted entirely in order for authentication to be enforced for all requests to your system.
 
 If `realm` is not defined, it will default to `solr`.
 
-If you are using SolrCloud, you must upload `security.json` to ZooKeeper. An example command and more information about securing your setup can be found at <<authentication-and-authorization-plugins#in-solrcloud-mode,Authentication and Authorization Plugins In SolrCloud Mode>>.
+If you are using SolrCloud, you must upload `security.json` to ZooKeeper.
+An example command and more information about securing your setup can be found at <<authentication-and-authorization-plugins#in-a-solrcloud-cluster,Authentication and Authorization Plugins In a SolrCloud Cluster>>.
 
 === Caveats
 
 There are a few things to keep in mind when using the Basic authentication plugin.
 
-* Credentials are sent in plain text by default. It's recommended to use SSL for communication when Basic authentication is enabled, as described in the section <<enabling-ssl.adoc#,Enabling SSL>>.
-* A user who has access to write permissions to `security.json` will be able to modify all the permissions and how users have been assigned permissions. Special care should be taken to only grant access to editing security to appropriate users.
-* Your network should, of course, be secure. Even with Basic authentication enabled, you should not unnecessarily expose Solr to the outside world.
+* Credentials are sent in plain text by default.
+It's recommended to use SSL for communication when Basic authentication is enabled, as described in the section <<enabling-ssl.adoc#,Enabling SSL>>.
+
+* A user who has access to write permissions to `security.json` will be able to modify all permissions and user permission assignments.
+Special care should be taken to only grant access to editing security to appropriate users.
+
+* Your network should, of course, be secure.
+Even with Basic authentication enabled, you should not unnecessarily expose Solr to the outside world.
 
 == Editing Basic Authentication Plugin Configuration
 
-An Authentication API allows modifying user IDs and passwords. The API provides an endpoint with specific commands to set user details or delete a user.
+An Authentication API allows modifying user IDs and passwords.
+The API provides an endpoint with specific commands to set user details or delete a user.
 
 === API Entry Point
 
 * v1: `\http://localhost:8983/solr/admin/authentication`
 * v2: `\http://localhost:8983/api/cluster/security/authentication`
 
-This endpoint is not collection-specific, so users are created for the entire Solr cluster. If users need to be restricted to a specific collection, that can be done with the authorization rules.
+This endpoint is not collection-specific, so users are created for the entire Solr cluster.
+If users need to be restricted to a specific collection, that can be done with the authorization rules.
 
 === Add a User or Edit a Password
 
-The `set-user` command allows you to add users and change their passwords. For example, the following defines two users and their passwords:
+The `set-user` command allows you to add users and change their passwords.
+For example, the following defines two users and their passwords:
 
 [.dynamic-tabs]
 --
@@ -113,7 +125,9 @@ curl --user solr:SolrRocks http://localhost:8983/api/cluster/security/authentica
 
 === Delete a User
 
-The `delete-user` command allows you to remove a user. The user password does not need to be sent to remove a user. In the following example, we've asked that user IDs 'tom' and 'harry' be removed from the system.
+The `delete-user` command allows you to remove a user.
+The user password does not need to be sent to remove a user.
+In the following example, we've asked that user IDs 'tom' and 'harry' be removed from the system.
 
 [.dynamic-tabs]
 --
@@ -138,7 +152,8 @@ curl --user solr:SolrRocks http://localhost:8983/api/cluster/security/authentica
 
 === Set a Property
 
-Set properties for the authentication plugin. The currently supported properties for the Basic Authentication plugin are `blockUnknown`, `realm` and `forwardCredentials`.
+Set properties for the authentication plugin.
+The currently supported properties for the Basic Authentication plugin are `blockUnknown`, `realm`, and `forwardCredentials`.
 
 [.dynamic-tabs]
 --
@@ -163,7 +178,8 @@ curl --user solr:SolrRocks http://localhost:8983/api/cluster/security/authentica
 ====
 --
 
-The authentication realm defaults to `solr` and is displayed in the `WWW-Authenticate` HTTP header and in the Admin UI login page. To change the realm, set the `realm` property:
+The authentication realm defaults to `solr` and is displayed in the `WWW-Authenticate` HTTP header and in the Admin UI login page.
+To change the realm, set the `realm` property:
 
 [.dynamic-tabs]
 --
@@ -211,10 +227,12 @@ req.setBasicAuthCredentials(userName, password);
 QueryResponse rsp = req.process(solrClient);
 ----
 
-While this is method is simple, it can often be inconvenient to ensure the credentials are provided everywhere they're needed.  It also doesn't work with the many `SolrClient` methods which don't consume `SolrRequest` objects.
+While this is method is simple, it can often be inconvenient to ensure the credentials are provided everywhere they're needed.
+It also doesn't work with the many `SolrClient` methods which don't consume `SolrRequest` objects.
 
 === Per-Client Credentials
-Http2SolrClient supports setting the credentials at the client level when building it. This will ensure all requests issued with this particular client get the Basic Authentication headers set.
+Http2SolrClient supports setting the credentials at the client level when building it.
+This will ensure all requests issued with this particular client get the Basic Authentication headers set.
 
 [source,java]
 ----
@@ -234,12 +252,15 @@ QueryResponse rsp = req.process(client);
 ----
 
 === Global (JVM) Basic Auth Credentials
+
 Alternatively, users can use SolrJ's `PreemptiveBasicAuthClientBuilderFactory` to add basic authentication credentials to _all_ requests automatically.
 To enable this feature, users should set the following system property `-Dsolr.httpclient.builder.factory=org.apache.solr.client.solrj.impl.PreemptiveBasicAuthClientBuilderFactory`.
 `PreemptiveBasicAuthClientBuilderFactory` allows applications to provide credentials in two different ways:
 
-. The `basicauth` system property can be passed, containing the credentials directly (e.g., `-Dbasicauth=username:password`).  This option is straightforward, but may expose the credentials in the command line, depending on how they're set.
-. The `solr.httpclient.config` system property can be passed, containing a path to a properties file holding the credentials.  Inside this file the username and password can be specified as `httpBasicAuthUser` and `httpBasicAuthPassword`, respectively.
+. The `basicauth` system property can be passed, containing the credentials directly (e.g., `-Dbasicauth=username:password`).
+This option is straightforward, but may expose the credentials in the command line, depending on how they're set.
+. The `solr.httpclient.config` system property can be passed, containing a path to a properties file holding the credentials.
+Inside this file the username and password can be specified as `httpBasicAuthUser` and `httpBasicAuthPassword`, respectively.
 +
 [source,bash]
 ----
@@ -249,10 +270,19 @@ httpBasicAuthPassword=secretPassword
 
 == Using the Solr Control Script with Basic Auth
 
-Add the following line to the `solr.in.sh` or `solr.in.cmd` file. This example tells the `bin/solr` command line to to use "basic" as the type of authentication, and to pass credentials with the user-name "solr" and password "SolrRocks":
+Once Basic authentication is enabled, all requests to the Solr Control Script (`bin/solr`) must contain user credentials.
+To ensure this, add the following line to the `solr.in.sh` or `solr.in.cmd` file.
+
+This example tells the `bin/solr` command line to to use "basic" as the type of authentication, and to pass credentials with the user-name "solr" and password "SolrRocks":
 
 [source,bash]
 ----
 SOLR_AUTH_TYPE="basic"
 SOLR_AUTHENTICATION_OPTS="-Dbasicauth=solr:SolrRocks"
 ----
+
+Alternatively, the `SOLR_AUTHENTICATION_OPTS` can take a path to a file, as in:
+
+[source,bash]
+SOLR_AUTH_TYPE="basic"
+SOLR_AUTHENTICATION_OPTS="-Dsolr.httpclient.config=/path/to/solr-{solr-docs-version}.0/server/solr/basicAuth.conf"
diff --git a/solr/solr-ref-guide/src/block-join-query-parser.adoc b/solr/solr-ref-guide/src/block-join-query-parser.adoc
new file mode 100644
index 0000000..47ab4bf
--- /dev/null
+++ b/solr/solr-ref-guide/src/block-join-query-parser.adoc
@@ -0,0 +1,210 @@
+= Block Join Query Parser
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License
+
+There are two query parsers that support block joins.
+These parsers allow indexing and searching for relational content that has been <<indexing-nested-documents.adoc#,indexed as Nested Documents>>.
+
+The example usage of the query parsers below assumes the following documents have been indexed:
+
+[source,xml]
+----
+<add>
+  <doc>
+    <field name="id">1</field>
+    <field name="content_type">parent</field>
+    <field name="title">Solr has block join support</field>
+    <doc>
+      <field name="id">2</field>
+      <field name="content_type">child</field>
+      <field name="comments">SolrCloud supports it too!</field>
+    </doc>
+  </doc>
+  <doc>
+    <field name="id">3</field>
+    <field name="content_type">parent</field>
+    <field name="title">New Lucene and Solr release</field>
+    <doc>
+      <field name="id">4</field>
+      <field name="content_type">child</field>
+      <field name="comments">Lots of new features</field>
+    </doc>
+  </doc>
+</add>
+----
+
+== Block Join Children Query Parser
+
+This parser wraps a query that matches some parent documents and returns the children of those documents.
+
+The syntax for this parser is: `q={!child of=<blockMask>}<someParents>`.
+
+* The inner subordinate query string (`someParents`) must be a query that will match some parent documents
+* The `of` parameter must be a query string to use as a <<#block-mask,Block Mask>> -- typically a query that matches the set of all possible parent documents
+
+The resulting query will match all documents which do _not_ match the `<blockMask>` query and are children (or descendents) of the documents matched by `<someParents>`.
+
+Using the example documents above, we can construct a query such as `q={!child of="content_type:parent"}title:lucene`.
+We only get one document in response:
+
+[source,xml]
+----
+<result name="response" numFound="1" start="0">
+  <doc>
+    <str name="id">4</str>
+    <arr name="content_type"><str>child</str></arr>
+    <str name="comments">Lots of new features</str>
+  </doc>
+</result>
+----
+
+[CAUTION]
+====
+The query for `someParents` *MUST* match a strict subset of the documents matched by the <<#block-mask,Block Mask>> or your query may result in an Error:
+
+[literal]
+Parent query must not match any docs besides parent filter.
+Combine them as must (+) and must-not (-) clauses to find a problem doc.
+
+You can search for `q=+(someParents) -(blockMask)` to find a cause if you encounter this type of error.
+====
+
+=== Filtering and Tagging
+
+`{!child}` also supports `filters` and `excludeTags` local params like the following:
+
+[source,text]
+?q={!child of=<blockMask> filters=$parentfq excludeTags=certain}<someParents>
+&parentfq=BRAND:Foo
+&parentfq=NAME:Bar
+&parentfq={!tag=certain}CATEGORY:Baz
+
+This is equivalent to:
+
+[source,text]
+q={!child of=<blockMask>}+<someParents> +BRAND:Foo +NAME:Bar
+
+Notice "$" syntax in `filters` for referencing queries; comma-separated tags `excludeTags` allows to exclude certain queries by tagging.
+Overall the idea is similar to <<faceting.adoc#tagging-and-excluding-filters, excluding fq in facets>>. Note, that filtering is applied to the subordinate clause (`<someParents>`), and the intersection result is joined to the children.
+
+=== All Children Syntax
+
+When subordinate clause (`<someParents>`) is omitted, it's parsed as a _segmented_ and _cached_ filter for children documents.
+More precisely, `q={!child of=<blockMask>}` is equivalent to `q=\*:* -<blockMask>`.
+
+== Block Join Parent Query Parser
+
+This parser takes a query that matches child documents and returns their parents.
+
+The syntax for this parser is similar to the `child` parser: `q={!parent which=<blockMask>}<someChildren>`.
+
+* The inner subordinate query string (`someChildren`) must be a query that will match some child documents
+* The `which` parameter must be a query string to use as a <<#block-mask,Block Mask>> -- typically a query that matches the set of all possible parent documents
+
+The resulting query will match all documents which _do_ match the `<blockMask>` query and are parents (or ancestors) of the documents matched by `<someChildren>`.
+
+Again using the example documents above, we can construct a query such as `q={!parent which="content_type:parent"}comments:SolrCloud`.
+We get this document in response:
+
+[source,xml]
+----
+<result name="response" numFound="1" start="0">
+  <doc>
+    <str name="id">1</str>
+    <arr name="content_type"><str>parent</str></arr>
+    <arr name="title"><str>Solr has block join support</str></arr>
+  </doc>
+</result>
+----
+
+
+[CAUTION]
+====
+The query for `someChildren` *MUST NOT* match any documents matched by the <<#block-mask,Block Mask>> or your query may result in an Error:
+
+[literal]
+Child query must not match same docs with parent filter.
+Combine them as must clauses (+) to find a problem doc.
+
+You can search for `q=+(blockMask) +(someChildren)` to find a cause.
+====
+
+
+=== Filtering and Tagging
+
+The `{!parent}` query supports `filters` and `excludeTags` local params like the following:
+
+[source,text]
+?q={!parent which=<blockMask> filters=$childfq excludeTags=certain}<someChildren>
+&childfq=COLOR:Red
+&childfq=SIZE:XL
+&childfq={!tag=certain}PRINT:Hatched
+
+This is equivalent to:
+
+[source,text]
+q={!parent which=<blockMask>}+<someChildren> +COLOR:Red +SIZE:XL
+
+Notice the "$" syntax in `filters` for referencing queries.
+Comma-separated tags in `excludeTags` allow excluding certain queries by tagging.
+Overall the idea is similar to <<faceting.adoc#tagging-and-excluding-filters, excluding fq in facets>>. Note that filtering is applied to the subordinate clause (`<someChildren>`) first, and the intersection result is joined to the parents.
+
+=== Scoring with the Block Join Parent Query Parser
+
+You can optionally use the `score` local parameter to return scores of the subordinate query.
+The values to use for this parameter define the type of aggregation, which are `avg` (average), `max` (maximum), `min` (minimum), `total (sum)`. Implicit default is `none` which returns `0.0`.
+
+=== All Parents Syntax
+
+When subordinate clause (`<someChildren>`) is omitted, it's parsed as a _segmented_ and _cached_ filter for all parent documents, or more precisely `q={!parent which=<blockMask>}` is equivalent to `q=<blockMask>`.
+
+[#block-mask]
+== Block Masks: The `of` and `which` local params
+
+The purpose of the "Block Mask" query specified as either an `of` or `which` param (depending on the parser used) is to identy the set of all documents in the index which should be treated as "parents" _(or their ancestors)_ and which documents should be treated as "children".
+This is important because in the "on disk" index, the relationships are flattened into "blocks" of documents, so the `of` / `which` params are needed to serve as a "mask" against the flat document blocks to identify the boundaries of every hierarchical relationship.
+
+In the example queries above, we were able to use a very simple Block Mask of `doc_type:parent` because our data is very simple: every document is either a `parent` or a `child`.
+So this query string easily distinguishes _all_ of our documents.
+
+A common mistake is to try and use a `which` parameter that is more restrictive then the set of all parent documents, in order to filter the parents that are matched, as in this bad example:
+
+----
+// BAD! DO NOT USE!
+q={!parent which="title:join"}comments:support
+----
+
+This type of query will frequently not work the way you might expect.
+Since the `which` param only identifies _some_ of the "parent" documents, the resulting query can match "parent" documents it should not, because it will mistakenly identify all documents which do _not_ match the `which="title:join"` Block Mask as children of the next "parent" document in the index (that does match this Mask).
+
+A similar problematic situation can arise when mixing parent/child documents with "simple" documents that have no children _and do not match the query used to identify 'parent' documents_.  For example, if we add the following document to our existing parent/child example documents:
+
+[source,xml]
+----
+<add>
+  <doc>
+    <field name="id">0</field>
+    <field name="content_type">plain</field>
+    <field name="title">Lucene and Solr are cool</field>
+  </doc>
+</add>
+----
+
+...then our simple `doc_type:parent` Block Mask would no longer be adequate.
+ We would instead need to use `\*:* -doc_type:child` or `doc_type:(simple parent)` to prevent our "simple" document from mistakenly being treated as a "child" of an adjacent "parent" document.
+
+The <<searching-nested-documents#searching-nested-documents,Searching Nested Documents>> section contains more detailed examples of specifing Block Mask queries with non trivial hierarchicies of documents.
diff --git a/solr/solr-ref-guide/src/caches-warming.adoc b/solr/solr-ref-guide/src/caches-warming.adoc
new file mode 100644
index 0000000..7013c18
--- /dev/null
+++ b/solr/solr-ref-guide/src/caches-warming.adoc
@@ -0,0 +1,326 @@
+= Caches and Query Warming
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+Solr's caches provide an essential way to improve query performance.
+Caches can store documents, filters used in queries, and results from previous queries.
+
+Caches are cleared after a <<commits-transaction-logs.adoc#commits,commit>> and usually need to be re-populated before their benefit can be seen again.
+To counteract this, caches can be "warmed" before a new searcher is considered opened by automatically populating the new cache with values from the old cache.
+
+Cache management is critical to a successful Solr implementation, so it should be noted that caches will need to be fine-tuned as your application grows.
+
+== <query> in solrconfig.xml
+
+The settings in this section affect the way that Solr will process and respond to queries.
+
+These settings are all configured in child elements of the `<query>` element in `solrconfig.xml`.
+
+[source,xml]
+----
+<config>
+  <query>
+    ...
+  </query>
+</config>
+----
+
+== Caches
+
+Solr caches are associated with a specific instance of an Index Searcher, a specific view of an index that doesn't change during the lifetime of that searcher.
+As long as that Index Searcher is being used, any items in its cache will be valid and available for reuse.
+By default cached Solr objects do not expire after a time interval; instead, they remain valid for the lifetime of the Index Searcher.
+Idle time-based expiration can be enabled by using `maxIdleTime` option.
+
+When a new searcher is opened, the current searcher continues servicing requests while the new one auto-warms its cache.
+The new searcher uses the current searcher's cache to pre-populate its own.
+When the new searcher is ready, it is registered as the current searcher and begins handling all new search requests.
+The old searcher will be closed once it has finished servicing all its requests.
+
+=== Cache Implementations
+
+Solr comes with a default `SolrCache` implementation that is used for different types of caches.
+
+The `CaffeineCache` is an implementation backed by the https://github.com/ben-manes/caffeine[Caffeine caching library].
+By default it uses a Window TinyLFU (W-TinyLFU) eviction policy, which allows the eviction based on both frequency and recency of use in O(1) time with a small footprint.
+Generally this cache usually offers lower memory footprint, higher hit ratio, and better multi-threaded performance over legacy caches.
+
+`CaffeineCache` uses an auto-warm count that supports both integers and percentages which get evaluated relative to the current size of the cache when warming happens.
+
+The <<plugins-stats-screen.adoc#,Plugins/Stats>> page in the Solr Admin UI will display information about the performance of all the active caches.
+This information can help you fine-tune the sizes of the various caches appropriately for your particular application.
+When a Searcher terminates, a summary of its cache usage is also written to the log.
+
+=== Cache Parameters
+
+Each cache has settings to define its initial size (`initialSize`), maximum size (`size`), and number of items to use for during warming (`autowarmCount`).
+For `autowarmCount` this can be also expressed as a percentage instead of an absolute value.
+
+A `maxIdleTime` attribute controls the automatic eviction of entries that haven't been used for a while.
+This attribute is expressed in seconds, with the default value of `0` meaning no entries are automatically evicted due to exceeded idle time.
+Smaller values of this attribute will cause older entries to be evicted quickly, which will reduce cache memory usage but may instead cause thrashing due to a repeating eviction-lookup-miss-insertion cycle of the same entries.
+Larger values will cause entries to stay around longer, waiting to be reused, at the cost of increased memory usage.
+Reasonable values, depending on the query volume and patterns, may lie somewhere between 60-3600.
+
+The `maxRamMB` attribute limits the maximum amount of memory a cache may consume.
+When both `size` and `maxRamMB` limits are specified the `maxRamMB` limit will take precedence and the `size` limit will be ignored.
+
+All caches can be disabled using the parameter `enabled` with a value of `false`.
+Caches can also be disabled on a query-by-query basis with the `cache` parameter, as described in the section <<common-query-parameters.adoc#cache-local-parameter,cache Local Parameter>>.
+
+Details of each cache are described below.
+
+=== Filter Cache
+
+This cache holds parsed queries paired with an unordered set of all documents that match it.
+Unless such a set is trivially small, the set implementation is a bitset.
+
+The most typical way Solr uses the `filterCache` is to cache results of each `fq` search parameter, though there are some other cases as well.
+Subsequent queries using the same parameter filter query result in cache hits and rapid returns of results.
+See <<common-query-parameters.adoc#fq-filter-query-parameter,fq (Filter Query) Parameter>> for a detailed discussion of `fq`.
+Use of this cache can be disabled for a `fq` using the <<common-query-parameters.adoc#cache-local-parameter,`cache` local parameter>>.
+
+Another Solr feature using this cache is the `filter(...)` syntax in the default Lucene query parser.
+
+Solr also uses this cache for faceting when the configuration parameter `facet.method` is set to `fc`.
+For a discussion of faceting parameters, see <<faceting.adoc#field-value-faceting-parameters,Field-Value Faceting Parameters>>.
+
+[source,xml]
+----
+<filterCache class="solr.CaffeineCache"
+             size="512"
+             initialSize="512"
+             autowarmCount="128"/>
+----
+
+The cache supports a `maxRamMB` parameter which restricts the maximum amount of heap used by this cache.
+The `CaffeineCache` only supports evictions by either heap usage or size, but not both.
+Therefore, the `size` parameter is ignored if `maxRamMB` is specified.
+
+[source,xml]
+----
+<filterCache class="solr.CaffeineCache"
+             maxRamMB="1000"
+             autowarmCount="128"/>
+----
+
+=== Query Result Cache
+
+The `queryResultCache` holds the results of previous searches: ordered lists of document IDs (DocList) based on a query, a sort, and the range of documents requested.
+
+The `queryResultCache` has an optional setting to limit the maximum amount of RAM used (`maxRamMB`).
+This lets you specify the maximum heap size, in megabytes, used by the contents of this cache.
+When the cache grows beyond this size, oldest accessed queries will be evicted until the heap usage of the cache decreases below the specified limit.
+If a `size` is specified in addition to `maxRamMB` then only the heap usage limit is respected.
+
+Use of this cache can be disabled on a query-by-query basis in `q` using the <<common-query-parameters.adoc#cache-local-parameter,cache local parameter>>.
+
+[source,xml]
+----
+<queryResultCache class="solr.CaffeineCache"
+                  size="512"
+                  initialSize="512"
+                  autowarmCount="128"/>
+----
+
+=== Document Cache
+
+The `documentCache` holds Lucene Document objects (the stored fields for each document).
+Since Lucene internal document IDs are transient, this cache is not auto-warmed.
+
+The size for the `documentCache` should always be greater than `max_results` times the `max_concurrent_queries`, to ensure that Solr does not need to refetch a document during a request.
+The more fields you store in your documents, the higher the memory usage of this cache will be.
+
+[source,xml]
+----
+<documentCache class="solr.CaffeineCache"
+               size="512"
+               initialSize="512"
+               autowarmCount="0"/>
+----
+
+=== User Defined Caches
+
+You can also define named caches for your own application code to use.
+You can locate and use your cache object by name by calling the `SolrIndexSearcher` methods `getCache()`, `cacheLookup()` and `cacheInsert()`.
+
+[source,xml]
+----
+<cache name="myUserCache" class="solr.CaffeineCache"
+                          size="4096"
+                          initialSize="1024"
+                          autowarmCount="1024"
+                          regenerator="org.mycompany.mypackage.MyRegenerator" />
+----
+
+If you want auto-warming of your cache, include a `regenerator` attribute with the fully qualified name of a class that implements {solr-javadocs}/core/org/apache/solr/search/CacheRegenerator.html[`solr.search.CacheRegenerator`].
+You can also use the `NoOpRegenerator`, which simply repopulates the cache with old items.
+Define it with the `regenerator` parameter as `regenerator="solr.NoOpRegenerator"`.
+
+=== Monitoring Cache Sizes and Usage
+
+The section <<performance-statistics-reference.adoc#cache-statistics,Cache Statistics>> describes the metrics available for each cache.
+The metrics can be accessed in the <<plugins-stats-screen.adoc#,Solr Admin UI>> or in the <<metrics-reporting.adoc#metrics-api,Metrics API>>.
+
+The most important metrics to review when assessing caches are the size and the hit ratio.
+
+The size indicates how many items are in the cache.
+Some caches support setting the maximum cache size in MB of RAM.
+
+The hit ratio is a percentage of queries served by the cache, shown as a number between 0 and 1.
+Higher values indicate that the cache is being used often, while lower values would show that the cache isn't helping queries very much.
+Ideally, this number should be as close to 1 as possible.
+
+If you find that you have a low hit ratio but you've set your cache size high, you can optimize by reducing the cache size - there's no need to keep those objects in memory when they are not being used.
+
+Another useful metric is the cache evictions, which measures the ojects removed from the cache.
+A high rate of evictions can indicate that your cache is too small and increasing it may show a higher hit ratio.
+Alternatively, if your hit ratio is high but your evictions are low, your cache might be too large and you may benefit from reducing the size.
+
+A low hit ratio is not always a sign of a specific cache problem.
+If your queries are not repeated often, a low hit ratio would be expected because it's less likely that cached objects will need to be reused.
+In these cases, a smaller cache size may be ideal for your system.
+
+== Query Sizing and Warming
+
+Several elements are available to control the size of queries and how caches are warmed.
+
+=== <maxBooleanClauses> Element
+
+Sets the maximum number of clauses allowed when parsing a boolean query string.
+
+This limit only impacts boolean queries specified by a user as part of a query string, and provides per-collection controls on how complex user specified boolean queries can be.
+Query strings that specify more clauses than this will result in an error.
+
+If this per-collection limit is greater than the <<configuring-solr-xml#global-maxbooleanclauses,global `maxBooleanClauses` limit>> specified in `solr.xml`, it will have no effect, as that setting also limits the size of user specified boolean queries.
+
+In default configurations this property uses the value of the `solr.max.booleanClauses` system property if specified.
+This is the same system property used in the <<configuring-solr-xml#global-maxbooleanclauses,global `maxBooleanClauses` setting>> in the default `solr.xml` making it easy for Solr administrators to increase both values (in all collections) without needing to search through and update the `solrconfig.xml` files in each collection.
+
+[source,xml]
+----
+<maxBooleanClauses>${solr.max.booleanClauses:1024}</maxBooleanClauses>
+----
+
+=== <enableLazyFieldLoading> Element
+
+When this parameter is set to `true`, fields that are not directly requested will be loaded only as needed.
+
+This can boost performance if the most common queries only need a small subset of fields, especially if infrequently accessed fields are large in size.
+
+[source,xml]
+----
+<enableLazyFieldLoading>true</enableLazyFieldLoading>
+----
+
+=== <useFilterForSortedQuery> Element
+
+This parameter configures Solr to use a filter to satisfy a search.
+If the requested sort does not include "score", the `filterCache` will be checked for a filter matching the query.
+For most situations, this is only useful if the same search is requested often with different sort options and none of them ever use "score".
+
+[source,xml]
+----
+<useFilterForSortedQuery>true</useFilterForSortedQuery>
+----
+
+=== <queryResultWindowSize> Element
+
+Used with the `queryResultCache`, this will cache a superset of the requested number of document IDs.
+
+For example, if a query requests documents 10 through 19, and `queryWindowSize` is 50, documents 0 through 49 will be cached.
+
+[source,xml]
+----
+<queryResultWindowSize>20</queryResultWindowSize>
+----
+
+=== <queryResultMaxDocsCached> Element
+
+This parameter sets the maximum number of documents to cache for any entry in the `queryResultCache`.
+
+[source,xml]
+----
+<queryResultMaxDocsCached>200</queryResultMaxDocsCached>
+----
+
+=== <useColdSearcher> Element
+
+This setting controls whether search requests for which there is not a currently registered searcher should wait for a new searcher to warm up (`false`) or proceed immediately (`true`).
+When set to "false`, requests will block until the searcher has warmed its caches.
+
+[source,xml]
+----
+<useColdSearcher>false</useColdSearcher>
+----
+
+=== <maxWarmingSearchers> Element
+
+This parameter sets the maximum number of searchers that may be warming up in the background at any given time.
+Exceeding this limit will raise an error.
+
+For read-only followers, a value of `2` is reasonable.
+Leaders should probably be set a little higher.
+
+[source,xml]
+----
+<maxWarmingSearchers>2</maxWarmingSearchers>
+----
+
+== Query-Related Listeners
+
+As described in the section on <<Caches>>, new Searchers are cached.
+It's possible to use the triggers for listeners to perform query-related tasks.
+The most common use of this is to define queries to further "warm" the Searchers while they are starting.
+One benefit of this approach is that field caches are pre-populated for faster sorting.
+
+Good query selection is key with this type of listener.
+It's best to choose your most common and/or heaviest queries and include not just the keywords used, but any other parameters such as sorting or filtering requests.
+
+There are two types of events that can trigger a listener.
+
+. A `firstSearcher` event occurs when a new searcher is being prepared but there is no current registered searcher to handle requests or to gain auto-warming data from (i.e., on Solr startup).
+. A `newSearcher` event is fired whenever a new searcher is being prepared, such as after a commit, and there is a current searcher handling requests.
+
+The (commented out) examples below can be found in the `solrconfig.xml` file of the `sample_techproducts_configs` <<config-sets.adoc#,configset>> included with Solr, and demonstrate using the `solr.QuerySenderListener` class to warm a set of explicit queries:
+
+[source,xml]
+----
+<listener event="newSearcher" class="solr.QuerySenderListener">
+  <arr name="queries">
+  <!--
+    <lst><str name="q">solr</str><str name="sort">price asc</str></lst>
+    <lst><str name="q">rocks</str><str name="sort">weight asc</str></lst>
+   -->
+  </arr>
+</listener>
+
+<listener event="firstSearcher" class="solr.QuerySenderListener">
+  <arr name="queries">
+    <lst><str name="q">static firstSearcher warming in solrconfig.xml</str></lst>
+  </arr>
+</listener>
+----
+
+[IMPORTANT]
+====
+The above code comes from a _sample_ `solrconfig.xml`.
+
+A key best practice is to modify these defaults before taking your application to production, but please note: while the sample queries are commented out in the section for the "newSearcher", the sample query is not commented out for the "firstSearcher" event.
+
+There is no point in auto-warming your Searcher with the query string "static firstSearcher warming in solrconfig.xml" if that is not relevant to your search application.
+====
diff --git a/solr/solr-ref-guide/src/cert-authentication-plugin.adoc b/solr/solr-ref-guide/src/cert-authentication-plugin.adoc
index 292ee5c..1c968bd 100644
--- a/solr/solr-ref-guide/src/cert-authentication-plugin.adoc
+++ b/solr/solr-ref-guide/src/cert-authentication-plugin.adoc
@@ -16,11 +16,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-Solr can support extracting the user principal out of the client's certificate with the use of the CertAuthPlugin.
+Solr can support extracting the user principal out of the client's certificate with the use of the `CertAuthPlugin`.
 
 == Enable Certificate Authentication
 
-For Certificate authentication, the `security.json` file must have an `authentication` part which defines the class being used for authentication.
+For certificate authentication, the `security.json` file must have an `authentication` part which defines the class being used for authentication.
 
 An example `security.json` is shown below:
 
@@ -50,7 +50,8 @@ Authorization plugins will need to accept and handle the full subject name, for
 CN=Solr User,OU=Engineering,O=Example Inc.,C=US
 ----
 
-A list of possible tags that can be present in the subject name is available in https://tools.ietf.org/html/rfc5280#section-4.1.2.4[RFC-5280, Section 4.1.2.4]. Values may have spaces, punctuation, and other characters.
+A list of possible tags that can be present in the subject name is available in https://tools.ietf.org/html/rfc5280#section-4.1.2.4[RFC-5280, Section 4.1.2.4].
+Values may have spaces, punctuation, and other characters.
 
 It is best practice to verify the actual contents of certificates issued by your trusted certificate authority before configuring authorization based on the contents.
 
diff --git a/solr/solr-ref-guide/src/charfilterfactories.adoc b/solr/solr-ref-guide/src/charfilterfactories.adoc
index 031706c..ae06264 100644
--- a/solr/solr-ref-guide/src/charfilterfactories.adoc
+++ b/solr/solr-ref-guide/src/charfilterfactories.adoc
@@ -18,7 +18,8 @@
 
 CharFilter is a component that pre-processes input characters.
 
-CharFilters can be chained like Token Filters and placed in front of a Tokenizer. CharFilters can add, change, or remove characters while preserving the original character offsets to support features like highlighting.
+CharFilters can be chained like Token Filters and placed in front of a Tokenizer.
+CharFilters can add, change, or remove characters while preserving the original character offsets to support features like highlighting.
 
 == solr.MappingCharFilterFactory
 
@@ -65,9 +66,7 @@ Mapping file syntax:
 * The source string must contain at least one character, but the target string may be empty.
 * The following character escape sequences are recognized within source and target strings:
 +
-// TODO: Change column width to %autowidth.spread when https://github.com/asciidoctor/asciidoctor-pdf/issues/599 is fixed
-+
-[cols="20,30,20,30",options="header"]
+[%autowidth.stretch,options="header"]
 |===
 |Escape Sequence |Resulting Character (http://www.ecma-international.org/publications/standards/Ecma-048.htm[ECMA-48] alias) |Unicode Character |Example Mapping Line
 |`\\` |`\` |U+005C |`"\\" \=> "/"`
@@ -83,7 +82,8 @@ Mapping file syntax:
 
 == solr.HTMLStripCharFilterFactory
 
-This filter creates `org.apache.solr.analysis.HTMLStripCharFilter`. This CharFilter strips HTML from the input stream and passes the result to another CharFilter or a Tokenizer.
+This filter creates `org.apache.solr.analysis.HTMLStripCharFilter`.
+This CharFilter strips HTML from the input stream and passes the result to another CharFilter or a Tokenizer.
 
 This filter:
 
@@ -103,7 +103,9 @@ This filter:
 * Inline tags, such as `<b>`, `<i>`, or `<span>` will be removed.
 * Uppercase character entities like `quot`, `gt`, `lt` and `amp` are recognized and handled as lowercase.
 
-TIP: The input need not be an HTML document. The filter removes only constructs that look like HTML. If the input doesn't include anything that looks like HTML, the filter won't remove any input.
+TIP: The input need not be an HTML document.
+The filter removes only constructs that look like HTML.
+If the input doesn't include anything that looks like HTML, the filter won't remove any input.
 
 The table below presents examples of HTML stripping.
 
@@ -154,11 +156,16 @@ This filter performs pre-tokenization Unicode normalization using http://site.ic
 
 Arguments:
 
-`form`:: A http://unicode.org/reports/tr15/[Unicode Normalization Form], one of `nfc`, `nfkc`, `nfkc_cf`. Default is `nfkc_cf`.
+`form`:: A http://unicode.org/reports/tr15/[Unicode Normalization Form], one of `nfc`, `nfkc`, `nfkc_cf`.
+Default is `nfkc_cf`.
 
-`mode`:: Either `compose` or `decompose`. Default is `compose`. Use `decompose` with `name="nfc"` or `name="nfkc"` to get NFD or NFKD, respectively.
+`mode`:: Either `compose` or `decompose`.
+Default is `compose`.
+Use `decompose` with `name="nfc"` or `name="nfkc"` to get NFD or NFKD, respectively.
 
-`filter`:: A http://www.icu-project.org/apiref/icu4j/com/ibm/icu/text/UnicodeSet.html[UnicodeSet] pattern. Codepoints outside the set are always left unchanged. Default is `[]` (the null set, no filtering - all codepoints are subject to normalization).
+`filter`:: A http://www.icu-project.org/apiref/icu4j/com/ibm/icu/text/UnicodeSet.html[UnicodeSet] pattern.
+Codepoints outside the set are always left unchanged.
+Default is `[]` (the null set, no filtering - all codepoints are subject to normalization).
 
 Example:
 
@@ -234,9 +241,7 @@ You can configure this filter in `schema.xml` like this:
 
 The table below presents examples of regex-based pattern replacement:
 
-// TODO: Change column width to %autowidth.spread when https://github.com/asciidoctor/asciidoctor-pdf/issues/599 is fixed
-
-[cols="20,20,10,20,30",options="header"]
+[%autowidth.stretch,options="header"]
 |===
 |Input |Pattern |Replacement |Output |Description
 |see-ing looking |`(\w+)(ing)` |`$1` |see-ing look |Removes "ing" from the end of word.
diff --git a/solr/solr-ref-guide/src/choosing-an-output-format.adoc b/solr/solr-ref-guide/src/choosing-an-output-format.adoc
deleted file mode 100644
index 1b9eb74..0000000
--- a/solr/solr-ref-guide/src/choosing-an-output-format.adoc
+++ /dev/null
@@ -1,23 +0,0 @@
-= Choosing an Output Format
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-Many programming environments are able to send HTTP requests and retrieve responses. Parsing the responses is a slightly more thorny problem. Fortunately, Solr makes it easy to choose an output format that will be easy to handle on the client side.
-
-Specify a response format using the `wt` parameter in a query. The available response formats are documented in <<response-writers.adoc#,Response Writers>>.
-
-Most client APIs hide this detail for you, so for many types of client applications, you won't ever have to specify a `wt` parameter. In JavaScript, however, the interface to Solr is a little closer to the metal, so you will need to add this parameter yourself.
diff --git a/solr/solr-ref-guide/src/client-api-lineup.adoc b/solr/solr-ref-guide/src/client-api-lineup.adoc
deleted file mode 100644
index edcbea4..0000000
--- a/solr/solr-ref-guide/src/client-api-lineup.adoc
+++ /dev/null
@@ -1,19 +0,0 @@
-= Other Clients
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-The Solr Wiki contains a list of client APIs at https://cwiki.apache.org/confluence/display/solr/IntegratingSolr.
diff --git a/solr/solr-ref-guide/src/client-apis.adoc b/solr/solr-ref-guide/src/client-apis.adoc
index 99d1cbb..f47d9a1 100644
--- a/solr/solr-ref-guide/src/client-apis.adoc
+++ b/solr/solr-ref-guide/src/client-apis.adoc
@@ -1,5 +1,8 @@
 = Client APIs
-:page-children: introduction-to-client-apis, choosing-an-output-format, using-solrj, using-javascript, using-python, using-solr-from-ruby, client-api-lineup 
+:page-children: solrj, \
+  javascript, \
+  python, \
+  ruby
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -17,18 +20,54 @@
 // specific language governing permissions and limitations
 // under the License.
 
-This section discusses the available client APIs for Solr. It covers the following topics:
+At its heart, Solr is a Web application, but because it is built on open protocols, any type of client application can use Solr.
 
-<<introduction-to-client-apis.adoc#,Introduction to Client APIs>>: A conceptual overview of Solr client APIs.
+Solr offers documentation on the following client integrations:
 
-<<choosing-an-output-format.adoc#,Choosing an Output Format>>: Information about choosing a response format in Solr.
+****
+// This tags the below list so it can be used in the parent page section list
+// tag::client-sections[]
+[width=100%,cols="1,1",frame=none,grid=none,stripes=none]
+|===
+| <<solrj.adoc#,SolrJ>>: SolrJ, an API for working with Java applications.
+| <<javascript.adoc#,JavaScript>>: JavaScript clients.
+| <<python.adoc#,Python>>: Python and JSON responses.
+| <<ruby.adoc#,Ruby>>: Solr with Ruby applications.
+|===
+//end::client-sections[]
+****
 
-<<using-solrj.adoc#,Using SolrJ>>: Detailed information about SolrJ, an API for working with Java applications.
+The Solr Wiki contains a list of client APIs at https://cwiki.apache.org/confluence/display/solr/IntegratingSolr.
 
-<<using-javascript.adoc#,Using JavaScript>>: Explains why a client API is not needed for JavaScript responses.
+== Introduction to Client APIs
 
-<<using-python.adoc#,Using Python>>: Information about Python and JSON responses.
+HTTP is the fundamental protocol used between client applications and Solr.
+The client makes a request and Solr does some work and provides a response.
+Clients use requests to ask Solr to do things like perform queries or index documents.
 
-<<using-solr-from-ruby.adoc#,Using Solr From Ruby>>: Detailed information about using Solr with Ruby applications.
+Client applications can reach Solr by creating HTTP requests and parsing the HTTP responses.
+Client APIs encapsulate much of the work of sending requests and parsing responses, which makes it much easier to write client applications.
 
-<<client-api-lineup.adoc#,Other Clients>>: How to find links to 3rd-party client libraries.
+Clients use Solr's five fundamental operations to work with Solr.
+The operations are query, index, delete, commit, and optimize.
+
+Queries are executed by creating a URL that contains all the query parameters.
+Solr examines the request URL, performs the query, and returns the results.
+The other operations are similar, although in certain cases the HTTP request is a POST operation and contains information beyond whatever is included in the request URL.
+An index operation, for example, may contain a document in the body of the request.
+
+Solr also features an EmbeddedSolrServer that offers a Java API without requiring an HTTP connection.
+For details, see <<solrj.adoc#,SolrJ>>.
+
+
+== Choosing an Output Format
+
+Many programming environments are able to send HTTP requests and retrieve responses.
+Parsing the responses is a slightly more thorny problem.
+Fortunately, Solr makes it easy to choose an output format that will be easy to handle on the client side.
+
+Specify a response format using the `wt` parameter in a query.
+The available response formats are documented in <<response-writers.adoc#,Response Writers>>.
+
+Most client APIs hide this detail for you, so for many types of client applications, you won't ever have to specify a `wt` parameter.
+In JavaScript, however, the interface to Solr is a little closer to the metal, so you will need to add this parameter yourself.
diff --git a/solr/solr-ref-guide/src/cloud-screens.adoc b/solr/solr-ref-guide/src/cloud-screens.adoc
index 0563bfe..f8b256c 100644
--- a/solr/solr-ref-guide/src/cloud-screens.adoc
+++ b/solr/solr-ref-guide/src/cloud-screens.adoc
@@ -16,41 +16,49 @@
 // specific language governing permissions and limitations
 // under the License.
 
-When running in <<solrcloud.adoc#,SolrCloud>> mode, a "Cloud" option will appear in the Admin UI between <<logging.adoc#,Logging>> and <<collections-core-admin.adoc#,Collections>>.
-
-This screen provides status information about each collection & node in your cluster, as well as access to the low level data being stored in <<using-zookeeper-to-manage-configuration-files.adoc#,ZooKeeper>>.
+This screen provides status information about each collection & node in your cluster, as well as access to the low level data being stored in <<zookeeper-file-management.adoc#,ZooKeeper files>>.
 
 .Only Visible When using SolrCloud
 [NOTE]
 ====
-The "Cloud" menu option is only available on Solr instances running in <<getting-started-with-solrcloud.adoc#,SolrCloud mode>>. Single node or leader/follower replication instances of Solr will not display this option.
+The "Cloud" menu option is only available when Solr is running <<cluster-types.adoc#solrcloud-mode,SolrCloud>>.
+User-managed clusters or single-node installations will not display this option.
 ====
 
-Click on the "Cloud" option in the left-hand navigation, and a small sub-menu appears with options called "Nodes", "Tree", "ZK Status" and "Graph". The sub-view selected by default is "Nodes".
+Click on the "Cloud" option in the left-hand navigation, and a small sub-menu appears with options called "Nodes", "Tree", "ZK Status" and "Graph".
+The sub-view selected by default is "Nodes".
 
 == Nodes View
 The "Nodes" view shows a list of the hosts and nodes in the cluster along with key information for each: "CPU", "Heap", "Disk usage", "Requests", "Collections" and "Replicas".
 
-The example below shows the default "cloud" example with some documents added to the "gettingstarted" collection. Details are expanded for node on port 7574, showing more metadata and more metrics details. The screen provides links to navigate to nodes, collections and replicas. The table supports paging and filtering on host/node names and collection names.
+The example below shows the default "cloud" example with some documents added to the "gettingstarted" collection.
+Details are expanded for node on port 7574, showing more metadata and more metrics details.
+The screen provides links to navigate to nodes, collections and replicas.
+The table supports paging and filtering on host/node names and collection names.
 
 image::images/cloud-screens/cloud-nodes.png[image,width=900,height=415]
 
 == Tree View
-The "Tree" view shows a directory structure of the data in ZooKeeper, including cluster wide information regarding the `live_nodes` and `overseer` status, as well as collection specific information such as the `state.json`, current shard leaders, and configuration files in use. In this example, we see part of the `state.json`  definition for the "tlog" collection:
+The "Tree" view shows a directory structure of the data in ZooKeeper, including cluster wide information regarding the `live_nodes` and `overseer` status.
+Collection-specific information such as the `state.json`, current shard leaders, and configuration files in use are also available.
+
+In this example, we see part of the `state.json`  definition for the "tlog" collection:
 
 image::images/cloud-screens/cloud-tree.png[image,width=487,height=250]
 
 As an aid to debugging, the data shown in the "Tree" view can be exported locally using the following command `bin/solr zk ls -r /`
 
 == ZK Status View
-The "ZK Status" view gives an overview over the ZooKeeper servers or ensemble used by Solr. It lists whether running in `standalone` or `ensemble` mode, shows how many ZooKeeper nodes are configured, and then displays a table listing detailed monitoring status for each node, including who is the leader, configuration parameters, and more.
+The "ZK Status" view gives an overview over the ZooKeeper servers or ensemble used by Solr.
+It lists whether running in `standalone` or `ensemble` mode, shows how many ZooKeeper nodes are configured, and then displays a table listing detailed monitoring status for each node, including who is the leader, configuration parameters, and more.
 
 image::images/cloud-screens/cloud-zkstatus.png[image,width=512,height=509]
 
 == Graph View
-The "Graph" view shows a graph of each collection, the shards that make up those collections, and the addresses and type ("NRT", "TLOG" or "PULL") of each replica for each shard.
+The "Graph" view shows a graph of each collection, its shards, and the addresses and type ("NRT", "TLOG", or "PULL") of each replica for each shard.
 
-This example shows a simple cluster. In addition to the 2 shard, 2 replica "gettingstarted" collection, there is an additional "tlog" collection consisting of mixed TLOG and PULL replica types.
+This example shows a simple cluster.
+In addition to the 2 shard, 2 replica "gettingstarted" collection, there is an additional "tlog" collection consisting of mixed TLOG and PULL replica types.
 
 image::images/cloud-screens/cloud-graph.png[image,width=512,height=250]
 
diff --git a/solr/solr-ref-guide/src/cluster-node-management.adoc b/solr/solr-ref-guide/src/cluster-node-management.adoc
index eb0ad53..2f37b5a 100644
--- a/solr/solr-ref-guide/src/cluster-node-management.adoc
+++ b/solr/solr-ref-guide/src/cluster-node-management.adoc
@@ -27,7 +27,8 @@ These API commands work with a SolrCloud cluster at the entire cluster level, or
 Fetch the cluster status including collections, shards, replicas, configuration name as well as collection aliases and cluster properties.
 
 Additionally, this command reports a `health` status of each collection and shard, in
-order to make it easier to monitor the operational state of the collections. The
+order to make it easier to monitor the operational state of the collections.
+The
 following health state values are defined, ordered from the best to worst, based on
 the percentage of active replicas (`active`):
 
@@ -40,9 +41,7 @@ the percentage of active replicas (`active`):
 `RED`::
 No active replicas *OR* there's no shard leader.
 
-The collection health state is reported as the worst state of any shard, e.g., for a
-collection with all shards GREEN except for one YELLOW the collection health will be
-reported as YELLOW.
+The collection health state is reported as the worst state of any shard, e.g., for a collection with all shards GREEN except for one YELLOW the collection health will be reported as YELLOW.
 
 [.dynamic-tabs]
 --
@@ -69,10 +68,13 @@ We do not currently have a V2 equivalent.
 === CLUSTERSTATUS Parameters
 
 `collection`::
-The collection or alias name for which information is requested. If omitted, information on all collections in the cluster will be returned. If an alias is supplied, information on the collections in the alias will be returned.
+The collection or alias name for which information is requested.
+If omitted, information on all collections in the cluster will be returned.
+If an alias is supplied, information on the collections in the alias will be returned.
 
 `shard`::
-The shard(s) for which information is requested. Multiple shard names can be specified as a comma-separated list.
+The shard(s) for which information is requested.
+Multiple shard names can be specified as a comma-separated list.
 
 `\_route_`::
 This can be used if you need the details of the shard where a particular document belongs to and you don't know which shard it falls under.
@@ -201,18 +203,21 @@ curl -X POST http://localhost:8983/api/cluster -H 'Content-Type: application/jso
 === CLUSTERPROP Parameters
 
 `name`::
-The name of the property. Supported properties names are `location`, `maxCoresPerNode`, `urlScheme`, and `defaultShardPreferences`.
-If the <<solr-tracing.adoc#,Jaeger tracing contrib>> has been enabled, the property `samplePercentage` is also available.
+The name of the property.
+Supported properties names are `location`, `maxCoresPerNode`, `urlScheme`, and `defaultShardPreferences`.
+If the <<distributed-tracing.adoc#,Jaeger tracing contrib>> has been enabled, the property `samplePercentage` is also available.
 +
 Other properties can be set (for example, if you need them for custom plugins) but they must begin with the prefix `ext.`.
 Unknown properties that don't begin with `ext.` will be rejected.
 
 `val`::
-The value of the property. If the value is empty or null, the property is unset.
+The value of the property.
+If the value is empty or null, the property is unset.
 
 === CLUSTERPROP Response
 
-The response will include the status of the request and the properties that were updated or removed. If the status is anything other than "0", an error message will explain why the request failed.
+The response will include the status of the request and the properties that were updated or removed.
+If the status is anything other than "0", an error message will explain why the request failed.
 
 === Examples using CLUSTERPROP
 
@@ -301,13 +306,15 @@ curl -X POST -H 'Content-type:application/json' --data-binary '
 ----
 
 NOTE: Until Solr 7.5, cluster properties supported a `collectionDefaults` key which is now deprecated and
-replaced with `defaults`. Using the `collectionDefaults` parameter in Solr 7.4 or 7.5 will continue to work
+replaced with `defaults`.
+Using the `collectionDefaults` parameter in Solr 7.4 or 7.5 will continue to work
  but the format of the properties will automatically be converted to the new nested structure.
 Support for the "collectionDefaults" key will be removed in Solr 9.
 
 === Default Shard Preferences
 
-Using the `defaultShardPreferences` parameter, you can implement rack or availability zone awareness. First, make sure to "label" your nodes using a <<configuring-solrconfig-xml.adoc#jvm-system-properties,system property>> (e.g., `-Drack=rack1`). Then, set the value of `defaultShardPreferences` to `node.sysprop:sysprop.YOUR_PROPERTY_NAME` like this:
+Using the `defaultShardPreferences` parameter, you can implement rack or availability zone awareness.
+First, make sure to "label" your nodes using a <<property-substitution.adoc#jvm-system-properties,system property>> (e.g., `-Drack=rack1`). Then, set the value of `defaultShardPreferences` to `node.sysprop:sysprop.YOUR_PROPERTY_NAME` like this:
 
 [source,bash]
 ----
@@ -325,7 +332,9 @@ At this point, if you run a query on a node having e.g., `rack=rack1`, Solr will
 [[balanceshardunique]]
 == BALANCESHARDUNIQUE: Balance a Property Across Nodes
 
-Insures that a particular property is distributed evenly amongst the physical nodes that make up a collection. If the property already exists on a replica, every effort is made to leave it there. If the property is *not* on any replica on a shard, one is chosen and the property is added.
+Insures that a particular property is distributed evenly amongst the physical nodes that make up a collection.
+If the property already exists on a replica, every effort is made to leave it there.
+If the property is *not* on any replica on a shard, one is chosen and the property is added.
 
 [.dynamic-tabs]
 --
@@ -360,20 +369,28 @@ curl -X POST http://localhost:8983/api/collections/techproducts -H 'Content-Type
 === BALANCESHARDUNIQUE Parameters
 
 `collection`::
-The name of the collection to balance the property in. This parameter is required.
+The name of the collection to balance the property in.
+This parameter is required.
 
 `property`::
-The property to balance. The literal `property.` is prepended to this property if not specified explicitly. This parameter is required.
+The property to balance.
+The literal `property.` is prepended to this property if not specified explicitly.
+This parameter is required.
 
 `onlyactivenodes`::
-Defaults to `true`. Normally, the property is instantiated on active nodes only. If this parameter is specified as `false`, then inactive nodes are also included for distribution.
+Defaults to `true`.
+Normally, the property is instantiated on active nodes only.
+If this parameter is specified as `false`, then inactive nodes are also included for distribution.
 
 `shardUnique`::
-Something of a safety valve. There is one pre-defined property (`preferredLeader`) that defaults this value to `true`. For all other properties that are balanced, this must be set to `true` or an error message will be returned.
+Something of a safety valve.
+There is one pre-defined property (`preferredLeader`) that defaults this value to `true`.
+For all other properties that are balanced, this must be set to `true` or an error message will be returned.
 
 === BALANCESHARDUNIQUE Response
 
-The response will include the status of the request. If the status is anything other than "0", an error message will explain why the request failed.
+The response will include the status of the request.
+If the status is anything other than "0", an error message will explain why the request failed.
 
 === Examples using BALANCESHARDUNIQUE
 
@@ -434,29 +451,36 @@ We do not currently have a V2 equivalent.
 === REPLACENODE Parameters
 
 `sourceNode`::
-The source node from which the replicas need to be copied from. This parameter is required.
+The source node from which the replicas need to be copied from.
+This parameter is required.
 
 `targetNode`::
-The target node where replicas will be copied. If this parameter is not provided, Solr will identify nodes automatically based on policies or number of cores in each node.
+The target node where replicas will be copied.
+If this parameter is not provided, Solr will identify nodes automatically based on policies or number of cores in each node.
 
 `parallel`::
-If this flag is set to `true`, all replicas are created in separate threads. Keep in mind that this can lead to very high network and disk I/O if the replicas have very large indices. The default is `false`.
+If this flag is set to `true`, all replicas are created in separate threads.
+Keep in mind that this can lead to very high network and disk I/O if the replicas have very large indices.
+The default is `false`.
 
 `async`::
 Request ID to track this action which will be <<collections-api.adoc#asynchronous-calls,processed asynchronously>>.
 
 `timeout`::
-Time in seconds to wait until new replicas are created, and until leader replicas are fully recovered. The default is `300`, or 5 minutes.
+Time in seconds to wait until new replicas are created, and until leader replicas are fully recovered.
+The default is `300`, or 5 minutes.
 
 [IMPORTANT]
 ====
-This operation does not hold necessary locks on the replicas that belong to on the source node. So don't perform other collection operations in this period.
+This operation does not hold necessary locks on the replicas that belong to on the source node.
+So don't perform other collection operations in this period.
 ====
 
 [[deletenode]]
 == DELETENODE: Delete Replicas in a Node
 
-Deletes all replicas of all collections in that node. Please note that the node itself will remain as a live node after this operation.
+Deletes all replicas of all collections in that node.
+Please note that the node itself will remain as a live node after this operation.
 [.dynamic-tabs]
 --
 [example.tab-pane#v1deletenode]
@@ -482,7 +506,8 @@ We do not currently have a V2 equivalent.
 === DELETENODE Parameters
 
 `node`::
-The node to be removed. This parameter is required.
+The node to be removed.
+This parameter is required.
 
 `async`::
 Request ID to track this action which will be <<collections-api.adoc#asynchronous-calls,processed asynchronously>>.
@@ -490,11 +515,14 @@ Request ID to track this action which will be <<collections-api.adoc#asynchronou
 [[addrole]]
 == ADDROLE: Add a Role
 
+Assigns a role to a given node in the cluster.
+The only supported role is `overseer`.
 
-
-Assigns a role to a given node in the cluster. The only supported role is `overseer`.
-
-Use this command to dedicate a particular node as Overseer. Invoke it multiple times to add more nodes. This is useful in large clusters where an Overseer is likely to get overloaded. If available, one among the list of nodes which are assigned the 'overseer' role would become the overseer. The system would assign the role to any other node if none of the designated nodes are up and running.
+Use this command to dedicate a particular node as Overseer.
+Invoke it multiple times to add more nodes.
+This is useful in large clusters where an Overseer is likely to get overloaded.
+If available, one among the list of nodes which are assigned the 'overseer' role would become the overseer.
+The system would assign the role to any other node if none of the designated nodes are up and running.
 
 [.dynamic-tabs]
 --
@@ -530,14 +558,19 @@ curl -X POST http://localhost:8983/api/cluster -H 'Content-Type: application/jso
 === ADDROLE Parameters
 
 `role`::
-The name of the role. The only supported role as of now is `overseer`. This parameter is required.
+The name of the role.
+The only supported role as of now is `overseer`.
+This parameter is required.
 
 `node`::
-The name of the node that will be assigned the role. It is possible to assign a role even before that node is started. This parameter is started.
+The name of the node that will be assigned the role.
+It is possible to assign a role even before that node is started.
+This parameter is started.
 
 === ADDROLE Response
 
-The response will include the status of the request and the properties that were updated or removed. If the status is anything other than "0", an error message will explain why the request failed.
+The response will include the status of the request and the properties that were updated or removed.
+If the status is anything other than "0", an error message will explain why the request failed.
 
 === Examples using ADDROLE
 
@@ -563,7 +596,8 @@ http://localhost:8983/solr/admin/collections?action=ADDROLE&role=overseer&node=1
 [[removerole]]
 == REMOVEROLE: Remove Role
 
-Remove an assigned role. This API is used to undo the roles assigned using ADDROLE operation
+Remove an assigned role.
+This API is used to undo the roles assigned using ADDROLE operation
 
 [.dynamic-tabs]
 --
@@ -599,7 +633,9 @@ curl -X POST http://localhost:8983/api/cluster -H 'Content-Type: application/jso
 === REMOVEROLE Parameters
 
 `role`::
-The name of the role. The only supported role as of now is `overseer`. This parameter is required.
+The name of the role.
+The only supported role as of now is `overseer`.
+This parameter is required.
 
 `node`::
 The name of the node where the role should be removed.
@@ -607,7 +643,8 @@ The name of the node where the role should be removed.
 
 === REMOVEROLE Response
 
-The response will include the status of the request and the properties that were updated or removed. If the status is anything other than "0", an error message will explain why the request failed.
+The response will include the status of the request and the properties that were updated or removed.
+If the status is anything other than "0", an error message will explain why the request failed.
 
 === Examples using REMOVEROLE
 
diff --git a/solr/solr-ref-guide/src/cluster-types.adoc b/solr/solr-ref-guide/src/cluster-types.adoc
new file mode 100644
index 0000000..edde3f5
--- /dev/null
+++ b/solr/solr-ref-guide/src/cluster-types.adoc
@@ -0,0 +1,111 @@
+= Solr Cluster Types
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+A Solr cluster is a group of servers (_nodes_) that each run Solr.
+
+There are two general modes of operating a cluster of Solr nodes.
+One mode provides central coordination of the Solr nodes (<<SolrCloud Mode>>), while the other allows you to operate a cluster without this central coordination (<<User-Managed Mode>>).
+
+Both modes share general concepts, but ultimately differ in how those concepts are reflected in functionality and features.
+
+First let's cover a few general concepts and then outline the differences between the two modes.
+
+== Cluster Concepts
+
+=== Shards
+
+In both cluster modes, a single logical index can be split across nodes as _shards_.
+Each shard contains a subset of overall index.
+
+The number of shards dictates the theoretical limit to the number of documents that can be indexed to Solr.
+It also determines the amount of parallelization possible for an individual search request.
+
+=== Replicas
+
+In order to provide some failover for each shard, each shard can be copied as a _replica_.
+A replica has the same configuration as the shard and any other replicas for the same index.
+
+It's possible to have replicas without having created shards.
+In this case, each replica would be a full copy of the entire index, instead of being only a copy of a part of the entire index.
+
+The number of replicas determines the level of fault tolerance the entire cluster has in the event of a node failure.
+It also dictates the theoretical limit on the number of concurrent search requests that can be processed under heavy load.
+
+=== Leaders
+
+Once replicas have been created, a _leader_ must be identified.
+The responsibility of the leader is to be a source-of-truth for each replica.
+When updates are made to the index, they are first processed by the leader and then by each replica (the exact mechanism for how this happens varies).
+
+The replicas which are not leaders are _followers_.
+
+=== Cores
+
+Each replica, whether it is a leader or a follower, is called a _core_.
+Multiple cores can be hosted on any one node.
+
+== SolrCloud Mode
+
+SolrCloud mode (also called "SolrCloud") uses Apache ZooKeeper to provide the centralized cluster management that is its main feature.
+ZooKeeper tracks each node of the cluster and the state of each core on each node.
+
+In this mode, configuration files are stored in ZooKeeper and not on the file system of each node.
+When configuration changes are made, they must be uploaded to ZooKeeper, which in turn makes sure each node knows changes have been made.
+
+SolrCloud introduces an additional concept, a _collection_.
+A collection is the entire group of cores that represent an index: the logical shards and the physical replicas for each shard.
+Collections all share the same configurations (schema, `solrconfig.xml`, etc.).
+This is an additional centralization of the cluster management, as operations can be performed on the entire collection at one time.
+
+When changes are made to configurations, a single command to reload the collection would automatically reload each individual core that is a member of the collection.
+
+Sharding is handled automatically, simply by telling Solr during collection creation how many shards you'd like the collection to have.
+Index updates are then generally balanced between each shard automatically.
+Some degree of control over what documents are stored in which shards is also available, if needed.
+
+ZooKeeper also handles load balancing and failover.
+Incoming requests, either to index documents or for user queries, can be sent to any node of the cluster and ZooKeeper will route the request to an appropriate replica of each shard.
+
+In SolrCloud, the leader is flexible, with built-in mechanisms for automatic leader election in case of failure in the leader.
+This means another core can become the leader, and from that point forward it is the source-of-truth for all replicas.
+
+As long as one replica of each relevant shard is available, a user query or indexing request can still be satisfied when running in SolrCloud mode.
+
+== User-Managed Mode
+
+Solr's user-managed mode requires that cluster coordination activities that SolrCloud uses ZooKeeper for must performed manually or with local scripts.
+
+If the corpus of documents is too large for a single-sharded index, the logic to create shards is entirely left to the user.
+There are no automated or programmatic ways for Solr to create shards during indexing.
+
+Routing documents to shards are handled manually, either with a simple hashing system or a simple round-robin list of shards that sends each document to a different shard.
+Document updates must be sent to the right shard or duplicate documents could result.
+
+In user-managed mode, the concept of leader and follower becomes critical.
+Identifying which node will host the leader replica and which host(s) will be replicas dictate how each node is configured.
+In this mode, all index updates are sent to the leader only.
+Once the leader has completed indexing, the replica will request the index updates and copy them from the leader.
+
+Load balancing is achieved with an external tool or process, unless request traffic can be managed by the leader or one of its replicas alone.
+
+If the leader goes down, there is no built-in failover mechanism.
+A replica could continue to serve queries if the queries were specifically directed to it.
+Changing a replica to serve as the leader would require changing `solrconfig.xml` configurations on all replicas and reloading each core.
+
+User-managed mode has no concept of a collection, so for all intents and purposes each Solr node is distinct from other nodes.
+Only some configuration parameters keep each node from behaving as independent entities.
diff --git a/solr/solr-ref-guide/src/collapse-and-expand-results.adoc b/solr/solr-ref-guide/src/collapse-and-expand-results.adoc
index 6f32487..ce39c1c 100644
--- a/solr/solr-ref-guide/src/collapse-and-expand-results.adoc
+++ b/solr/solr-ref-guide/src/collapse-and-expand-results.adoc
@@ -22,7 +22,7 @@ The Collapsing query parser groups documents (collapsing the result set) accordi
 
 [IMPORTANT]
 ====
-In order to use these features with SolrCloud, the documents must be located on the same shard. To ensure document co-location, you can define the `router.name` parameter as `compositeId` when creating the collection. For more information on this option, see the section <<shards-and-indexing-data-in-solrcloud.adoc#document-routing,Document Routing>>.
+In order to use these features with SolrCloud, the documents must be located on the same shard. To ensure document co-location, you can define the `router.name` parameter as `compositeId` when creating the collection. For more information on this option, see the section <<solrcloud-shards-indexing.adoc#document-routing,Document Routing>>.
 ====
 
 == Collapsing Query Parser
@@ -33,7 +33,7 @@ The CollapsingQParserPlugin fully supports the QueryElevationComponent.
 
 === Collapsing Query Parser Options
 
-The CollapsingQParser accepts the following local parameters:
+The CollapsingQParser accepts the following local params:
 
 `field`::
 The field that is being collapsed on. The field must be a single valued String, Int or Float-type of field.
@@ -73,7 +73,6 @@ The `hint=top_fc` hint is only available when collapsing on String fields. `top_
 +
 The default is none.
 
-
 `size`::
 Sets the initial size of the collapse data structures when collapsing on a *numeric field only*.
 +
diff --git a/solr/solr-ref-guide/src/collection-management.adoc b/solr/solr-ref-guide/src/collection-management.adoc
index 22c1f4c..788e0d5 100644
--- a/solr/solr-ref-guide/src/collection-management.adoc
+++ b/solr/solr-ref-guide/src/collection-management.adoc
@@ -70,41 +70,59 @@ curl -X POST http://localhost:8983/api/collections -H 'Content-Type: application
 The CREATE action allows the following parameters:
 
 `name`::
-The name of the collection to be created. This parameter is required.
+The name of the collection to be created.
+This parameter is required.
 
 `router.name`::
-The router name that will be used. The router defines how documents will be distributed among the shards. Possible values are `implicit` or `compositeId`, which is the default.
+The router name that will be used.
+The router defines how documents will be distributed among the shards.
+Possible values are `implicit` or `compositeId`, which is the default.
 +
-The `implicit` router does not automatically route documents to different shards. Whichever shard you indicate on the indexing request (or within each document) will be used as the destination for those documents.
+The `implicit` router does not automatically route documents to different shards.
+Whichever shard you indicate on the indexing request (or within each document) will be used as the destination for those documents.
 +
 The `compositeId` router hashes the value in the uniqueKey field and looks up that hash in the collection's clusterstate to determine which shard will receive the document, with the additional ability to manually direct the routing.
 +
-When using the `implicit` router, the `shards` parameter is required. When using the `compositeId` router, the `numShards` parameter is required.
+When using the `implicit` router, the `shards` parameter is required.
+When using the `compositeId` router, the `numShards` parameter is required.
 +
-For more information, see also the section <<shards-and-indexing-data-in-solrcloud.adoc#document-routing,Document Routing>>.
+For more information, see also the section <<solrcloud-shards-indexing.adoc#document-routing,Document Routing>>.
 
 `numShards`::
-The number of shards to be created as part of the collection. This is a required parameter when the `router.name` is `compositeId`.
+The number of shards to be created as part of the collection.
+This is a required parameter when the `router.name` is `compositeId`.
 
 `shards`::
-A comma separated list of shard names, e.g., `shard-x,shard-y,shard-z`. This is a required parameter when the `router.name` is `implicit`.
+A comma separated list of shard names, e.g., `shard-x,shard-y,shard-z`.
+This is a required parameter when the `router.name` is `implicit`.
 
 `replicationFactor`::
-The number of replicas to be created for each shard. The default is `1`.
+The number of replicas to be created for each shard.
+The default is `1`.
 +
-This will create a NRT type of replica. If you want another type of replica, see the `tlogReplicas` and `pullReplica` parameters below. See the section <<shards-and-indexing-data-in-solrcloud.adoc#types-of-replicas,Types of Replicas>> for more information about replica types.
+This will create a NRT type of replica.
+If you want another type of replica, see the `tlogReplicas` and `pullReplica` parameters below.
+See the section <<solrcloud-shards-indexing.adoc#types-of-replicas,Types of Replicas>> for more information about replica types.
 
 `nrtReplicas`::
-The number of NRT (Near-Real-Time) replicas to create for this collection. This type of replica maintains a transaction log and updates its index locally. If you want all of your replicas to be of this type, you can simply use `replicationFactor` instead.
+The number of NRT (Near-Real-Time) replicas to create for this collection.
+This type of replica maintains a transaction log and updates its index locally.
+If you want all of your replicas to be of this type, you can simply use `replicationFactor` instead.
 
 `tlogReplicas`::
-The number of TLOG replicas to create for this collection. This type of replica maintains a transaction log but only updates its index via replication from a leader. See the section <<shards-and-indexing-data-in-solrcloud.adoc#types-of-replicas,Types of Replicas>> for more information about replica types.
+The number of TLOG replicas to create for this collection.
+This type of replica maintains a transaction log but only updates its index via replication from a leader.
+See the section <<solrcloud-shards-indexing.adoc#types-of-replicas,Types of Replicas>> for more information about replica types.
 
 `pullReplicas`::
-The number of PULL replicas to create for this collection. This type of replica does not maintain a transaction log and only updates its index via replication from a leader. This type is not eligible to become a leader and should not be the only type of replicas in the collection. See the section <<shards-and-indexing-data-in-solrcloud.adoc#types-of-replicas,Types of Replicas>> for more information about replica types.
+The number of PULL replicas to create for this collection.
+This type of replica does not maintain a transaction log and only updates its index via replication from a leader.
+This type is not eligible to become a leader and should not be the only type of replicas in the collection.
+See the section <<solrcloud-shards-indexing.adoc#types-of-replicas,Types of Replicas>> for more information about replica types.
 
 `createNodeSet` (v1), `nodeSet` (v2)::
-Allows defining the nodes to spread the new collection across. The format is a comma-separated list of node_names, such as `localhost:8983_solr,localhost:8984_solr,localhost:8985_solr`.
+Allows defining the nodes to spread the new collection across.
+The format is a comma-separated list of node_names, such as `localhost:8983_solr,localhost:8984_solr,localhost:8985_solr`.
 +
 If not provided, the CREATE operation will create shard-replicas spread across all live Solr nodes.
 +
@@ -113,7 +131,8 @@ Alternatively, use the special value of `EMPTY` to initially create no shard-rep
 `createNodeSet.shuffle` (v1), `shuffleNodes` (v2)::
 Controls whether or not the shard-replicas created for this collection will be assigned to the nodes specified by the `createNodeSet` in a sequential manner, or if the list of nodes should be shuffled prior to creating individual replicas.
 +
-A `false` value makes the results of a collection creation predictable and gives more exact control over the location of the individual shard-replicas, but `true` can be a better choice for ensuring replicas are distributed evenly across nodes. The default is `true`.
+A `false` value makes the results of a collection creation predictable and gives more exact control over the location of the individual shard-replicas, but `true` can be a better choice for ensuring replicas are distributed evenly across nodes.
+The default is `true`.
 +
 This parameter is ignored if `createNodeSet` is not also specified.
 
@@ -124,7 +143,8 @@ If not provided, Solr will use the configuration of `_default` configset to crea
 When such a collection is deleted, its autocreated configset will be deleted by default when it is not in use by any other collection.
 
 `router.field` (v1), `router` (v2)::
-If this parameter is specified, the router will look at the value of the field in an input document to compute the hash and identify a shard instead of looking at the `uniqueKey` field. If the field specified is null in the document, the document will be rejected.
+If this parameter is specified, the router will look at the value of the field in an input document to compute the hash and identify a shard instead of looking at the `uniqueKey` field.
+If the field specified is null in the document, the document will be rejected.
 +
 Please note that <<realtime-get.adoc#,RealTime Get>> or retrieval by document ID would also require the parameter `\_route_` (or `shard.keys`) to avoid a distributed search.
 
@@ -132,37 +152,43 @@ Please note that <<realtime-get.adoc#,RealTime Get>> or retrieval by document ID
 If `true` the states of individual replicas will be maintained as individual child of the `state.json`. The default is `false`.
 
 `property._name_=_value_`::
-Set core property _name_ to _value_. See the section <<defining-core-properties.adoc#,Defining core.properties>> for details on supported properties and values.
+Set core property _name_ to _value_. See the section <<core-discovery.adoc#,Core Discovery>> for details on supported properties and values.
 
 [WARNING]
 ====
-The entries in each core.properties file are vital for Solr to function correctly. Overriding entries can result in unusable collections. Altering these entries by specifying `property._name_=_value_` is an expert-level option and should only be used if you have a thorough understanding of the consequences.
+The entries in each core.properties file are vital for Solr to function correctly.
+Overriding entries can result in unusable collections.
+Altering these entries by specifying `property._name_=_value_` is an expert-level option and should only be used if you have a thorough understanding of the consequences.
 ====
 
 `async`::
 Request ID to track this action which will be <<collections-api.adoc#asynchronous-calls,processed asynchronously>>.
 
 `waitForFinalState`::
-If `true`, the request will complete only when all affected replicas become active. The default is `false`, which means that the API will return the status of the single action, which may be before the new replica is online and active.
+If `true`, the request will complete only when all affected replicas become active.
+The default is `false`, which means that the API will return the status of the single action, which may be before the new replica is online and active.
 
 `alias`::
 Starting with version 8.1 when a collection is created additionally an alias can be created
-that points to this collection. This parameter allows specifying the name of this alias, effectively combining
-this operation with <<collection-aliasing.adoc#createalias,CREATEALIAS>>
+that points to this collection.
+This parameter allows specifying the name of this alias, effectively combining
+this operation with <<alias-management.adoc#createalias,CREATEALIAS>>
 
 Collections are first created in read-write mode but can be put in `readOnly`
 mode using the <<collection-management.adoc#modifycollection,MODIFYCOLLECTION>> action.
 
 === CREATE Response
 
-The response will include the status of the request and the new core names. If the status is anything other than "success", an error message will explain why the request failed.
+The response will include the status of the request and the new core names.
+If the status is anything other than "success", an error message will explain why the request failed.
 
 [[reload]]
 == RELOAD: Reload a Collection
 
 The RELOAD action is used when you have changed a configuration file in ZooKeeper, like uploading a new `schema.xml`.
 Solr automatically reloads collections when certain files, monitored via a watch in ZooKeeper are changed,
-such as `security.json`.  However, for changes to files in configsets, like uploading a new `schema.xml`, you
+such as `security.json`.
+ However, for changes to files in configsets, like uploading a new `schema.xml`, you
 will need to manually trigger the RELOAD.
 
 [.dynamic-tabs]
@@ -213,21 +239,26 @@ curl -X POST http://localhost:8983/api/collections/techproducts_v2 -H 'Content-T
 === RELOAD Parameters
 
 `name`::
-The name of the collection to reload. This parameter is required by the V1 API.
+The name of the collection to reload.
+This parameter is required by the V1 API.
 
 `async`::
 Request ID to track this action which will be <<collections-api.adoc#asynchronous-calls,processed asynchronously>>.
 
 === RELOAD Response
 
-The response will include the status of the request and the cores that were reloaded. If the status is anything other than "success", an error message will explain why the request failed.
+The response will include the status of the request and the cores that were reloaded.
+If the status is anything other than "success", an error message will explain why the request failed.
 
 [[modifycollection]]
 == MODIFYCOLLECTION: Modify Attributes of a Collection
 
-It's possible to edit multiple attributes at a time. Changing these values only updates the znode on ZooKeeper, they do not change the topology of the collection. For instance, increasing `replicationFactor` will _not_ automatically add more replicas to the collection but _will_ allow more ADDREPLICA commands to succeed.
+It's possible to edit multiple attributes at a time.
+Changing these values only updates the znode on ZooKeeper, they do not change the topology of the collection.
+For instance, increasing `replicationFactor` will _not_ automatically add more replicas to the collection but _will_ allow more ADDREPLICA commands to succeed.
 
-An attribute can be deleted by passing an empty value. For example, `yet_another_attribute_name=` (with no value) will delete the `yet_another_attribute_name` parameter from the collection.
+An attribute can be deleted by passing an empty value.
+For example, `yet_another_attribute_name=` (with no value) will delete the `yet_another_attribute_name` parameter from the collection.
 
 [.dynamic-tabs]
 --
@@ -268,7 +299,8 @@ curl -X POST http://localhost:8983/api/collections/techproducts_v2 -H 'Content-T
 === MODIFYCOLLECTION Parameters
 
 `collection`::
-The name of the collection to be modified. This parameter is required.
+The name of the collection to be modified.
+This parameter is required.
 
 `async`::
 Request ID to track this action which will be <<collections-api.adoc#asynchronous-calls,processed asynchronously>>.
@@ -290,7 +322,8 @@ See the <<create,CREATE action>> section above for details on these attributes.
 [[readonlymode]]
 ==== Read-Only Mode
 Setting the `readOnly` attribute to `true` puts the collection in read-only mode,
-in which any index update requests are rejected. Other collection-level actions (e.g., adding /
+in which any index update requests are rejected.
+Other collection-level actions (e.g., adding /
 removing / moving replicas) are still available in this mode.
 
 The transition from the (default) read-write to read-only mode consists of the following steps:
@@ -361,7 +394,8 @@ using the new name.
 
 This command does NOT actually rename the underlying Solr collection - it sets up a new one-to-one alias
 using the new name, or renames the existing alias so that it uses the new name, while still referring to
-the same underlying Solr collection. However, from the user's point of view the collection can now be
+the same underlying Solr collection.
+However, from the user's point of view the collection can now be
 accessed using the new name, and the new name can be also referred to in other aliases.
 
 The following limitations apply:
@@ -400,9 +434,11 @@ Name of the existing SolrCloud collection or an alias that refers to exactly one
 a Routed Alias.
 
 `target`::
-Target name of the collection. This will be the new alias that refers to the underlying SolrCloud collection.
+Target name of the collection.
+This will be the new alias that refers to the underlying SolrCloud collection.
 The original name (or alias) of the collection will be replaced also in the existing aliases so that they
-also refer to the new name. Target name must not be an existing alias.
+also refer to the new name.
+Target name must not be an existing alias.
 
 === Examples using RENAME
 Assuming there are two actual SolrCloud collections named `collection1` and `collection2`,
@@ -470,14 +506,16 @@ curl -X DELETE http://localhost:8983/api/collections/techproducts_v2?async=aaaa
 === DELETE Parameters
 
 `name`::
-The name of the collection to delete. This parameter is required.
+The name of the collection to delete.
+This parameter is required.
 
 `async`::
 Request ID to track this action which will be <<collections-api.adoc#asynchronous-calls,processed asynchronously>>.
 
 === DELETE Response
 
-The response will include the status of the request and the cores that were deleted. If the status is anything other than "success", an error message will explain why the request failed.
+The response will include the status of the request and the cores that were deleted.
+If the status is anything other than "success", an error message will explain why the request failed.
 
 *Output*
 
@@ -552,16 +590,20 @@ The name of the collection for which the property would be set.
 The name of the property.
 
 `propertyValue` (v1), `value` (v2)::
-The value of the property. When not provided, the property is deleted.
+The value of the property.
+When not provided, the property is deleted.
 
 === COLLECTIONPROP Response
 
-The response will include the status of the request and the properties that were updated or removed. If the status is anything other than "0", an error message will explain why the request failed.
+The response will include the status of the request and the properties that were updated or removed.
+If the status is anything other than "0", an error message will explain why the request failed.
 
 [[migrate]]
 == MIGRATE: Migrate Documents to Another Collection
 
-The MIGRATE command is used to migrate all documents having a given routing key to another collection. The source collection will continue to have the same data as-is but it will start re-routing write requests to the target collection for the number of seconds specified by the `forward.timeout` parameter. It is the responsibility of the user to switch to the target collection for reads and writes after the MIGRATE action completes.
+The MIGRATE command is used to migrate all documents having a given routing key to another collection.
+The source collection will continue to have the same data as-is but it will start re-routing write requests to the target collection for the number of seconds specified by the `forward.timeout` parameter.
+It is the responsibility of the user to switch to the target collection for reads and writes after the MIGRATE action completes.
 
 [.dynamic-tabs]
 --
@@ -595,30 +637,40 @@ curl -X POST http://localhost:8983/api/collections/techproducts_v2 -H 'Content-T
 --
 
 
-The routing key specified by the `split.key` parameter may span multiple shards on both the source and the target collections. The migration is performed shard-by-shard in a single thread. One or more temporary collections may be created by this command during the ‘migrate’ process but they are cleaned up at the end automatically.
+The routing key specified by the `split.key` parameter may span multiple shards on both the source and the target collections.
+The migration is performed shard-by-shard in a single thread.
+One or more temporary collections may be created by this command during the ‘migrate’ process but they are cleaned up at the end automatically.
 
-This is a long running operation and therefore using the `async` parameter is highly recommended. If the `async` parameter is not specified then the operation is synchronous by default and keeping a large read timeout on the invocation is advised. Even with a large read timeout, the request may still timeout but that doesn’t necessarily mean that the operation has failed. Users should check logs, cluster state, source and target collections before invoking the operation again.
+This is a long running operation and therefore using the `async` parameter is highly recommended.
+If the `async` parameter is not specified then the operation is synchronous by default and keeping a large read timeout on the invocation is advised.
+Even with a large read timeout, the request may still timeout but that doesn’t necessarily mean that the operation has failed.
+Users should check logs, cluster state, source and target collections before invoking the operation again.
 
-This command works only with collections using the compositeId router. The target collection must not receive any writes during the time the MIGRATE command is running otherwise some writes may be lost.
+This command works only with collections using the compositeId router.
+The target collection must not receive any writes during the time the MIGRATE command is running otherwise some writes may be lost.
 
 Please note that the MIGRATE API does not perform any de-duplication on the documents so if the target collection contains documents with the same uniqueKey as the documents being migrated then the target collection will end up with duplicate documents.
 
 === MIGRATE Parameters
 
 `collection`::
-The name of the source collection from which documents will be split. This parameter is required.
+The name of the source collection from which documents will be split.
+This parameter is required.
 
 `target.collection` (v1), `target` (v2)::
-The name of the target collection to which documents will be migrated. This parameter is required.
+The name of the target collection to which documents will be migrated.
+This parameter is required.
 
 `split.key` (v1), `splitKey` (v2)::
-The routing key prefix. For example, if the uniqueKey of a document is "a!123", then you would use `split.key=a!`. This parameter is required.
+The routing key prefix.
+For example, if the uniqueKey of a document is "a!123", then you would use `split.key=a!`. This parameter is required.
 
 `forward.timeout` (v1), `forwardTimeout` (v2)::
-The timeout, in seconds, until which write requests made to the source collection for the given `split.key` will be forwarded to the target shard. The default is 60 seconds.
+The timeout, in seconds, until which write requests made to the source collection for the given `split.key` will be forwarded to the target shard.
+The default is 60 seconds.
 
 `property._name_=_value_`::
-Set core property _name_ to _value_. See the section <<defining-core-properties.adoc#,Defining core.properties>> for details on supported properties and values.
+Set core property _name_ to _value_. See the section <<core-discovery.adoc#,Core Discovery>> for details on supported properties and values.
 
 `async`::
 Request ID to track this action which will be <<collections-api.adoc#asynchronous-calls,processed asynchronously>>.
@@ -661,73 +713,72 @@ with caution, evaluating the potential impact by using different source and targ
 collection names first, and preserving the source collection until the evaluation is
 complete.
 
-The target collection must not exist (and may not be an alias). If the target
-collection name is the same as the source collection then first a unique sequential name
-will be generated for the target collection, and then after reindexing is done an alias
-will be created that points from the source name to the actual sequentially-named target collection.
+The target collection must not exist (and may not be an alias).
+If the target collection name is the same as the source collection then first a unique sequential name will be generated for the target collection, and then after reindexing is done an alias will be created that points from the source name to the actual sequentially-named target collection.
 
-When reindexing is started the source collection is put in <<readonlymode,read-only mode>> to ensure that
-all source documents are properly processed.
+When reindexing is started the source collection is put in <<readonlymode,read-only mode>> to ensure that all source documents are properly processed.
 
-Using optional parameters a different index schema, collection shape (number of shards and replicas)
-or routing parameters can be requested for the target collection.
+Using optional parameters a different index schema, collection shape (number of shards and replicas) or routing parameters can be requested for the target collection.
 
-Reindexing is executed as a streaming expression daemon, which runs on one of the
-source collection's replicas. It is usually a time-consuming operation so it's recommended to execute
-it as an asynchronous request in order to avoid request time outs. Only one reindexing operation may
-execute concurrently for a given source collection. Long-running, erroneous or crashed reindexing
-operations may be terminated by using the `abort` option, which also removes partial results.
+Reindexing is executed as a streaming expression daemon, which runs on one of the source collection's replicas.
+It is usually a time-consuming operation so it's recommended to execute it as an asynchronous request in order to avoid request time outs.
+Only one reindexing operation may execute concurrently for a given source collection.
+Long-running, erroneous or crashed reindexing operations may be terminated by using the `abort` option, which also removes partial results.
 
 === REINDEXCOLLECTION Parameters
 
 `name`::
-Source collection name, may be an alias. This parameter is required.
+Source collection name, may be an alias.
+This parameter is required.
 
 `cmd`::
-Optional command. Default command is `start`. Currently supported commands are:
+Optional command.
+Default command is `start`.
+Currently supported commands are:
 * `start` - default, starts processing if not already running,
 * `abort` - aborts an already running reindexing (or clears a left-over status after a crash),
 and deletes partial results,
 * `status` - returns detailed status of a running reindexing command.
 
 `target`::
-Target collection name, optional. If not specified a unique name will be generated and
-after all documents have been copied an alias will be created that points from the source
-collection name to the unique sequentially-named collection, effectively "hiding"
+Target collection name, optional.
+If not specified a unique name will be generated and after all documents have been copied an alias will be created that points from the source collection name to the unique sequentially-named collection, effectively "hiding"
 the original source collection from regular update and search operations.
 
 `q`::
-Optional query to select documents for reindexing. Default value is `\*:*`.
+Optional query to select documents for reindexing.
+Default value is `\*:*`.
 
 `fl`::
-Optional list of fields to reindex. Default value is `*`.
+Optional list of fields to reindex.
+Default value is `*`.
 
 `rows`::
-Documents are transferred in batches. Depending on the average size of the document large
-batch sizes may cause memory issues. Default value is 100.
+Documents are transferred in batches.
+Depending on the average size of the document large batch sizes may cause memory issues.
+Default value is 100.
 
 `configName`::
 `collection.configName`::
-Optional name of the configset for the target collection. Default is the same as the
-source collection.
+Optional name of the configset for the target collection.
+Default is the same as the source collection.
 
-There's a number of optional parameters that determine the target collection layout. If they
-are not specified in the request then their values are copied from the source collection.
+There's a number of optional parameters that determine the target collection layout.
+If they are not specified in the request then their values are copied from the source collection.
 The following parameters are currently supported (described in detail in the <<create,CREATE collection>> section):
 `numShards`, `replicationFactor`, `nrtReplicas`, `tlogReplicas`, `pullReplicas`,
 `shards`, `policy`, `createNodeSet`, `createNodeSet.shuffle`, `router.*`.
 
 `removeSource`::
-Optional boolean. If true then after the processing is successfully finished the source collection will
-be deleted.
+Optional boolean.
+If true then after the processing is successfully finished the source collection will be deleted.
 
 `async`::
 Optional request ID to track this action which will be <<collections-api.adoc#asynchronous-calls,processed asynchronously>>.
 
 When the reindexing process has completed the target collection is marked using
 `property.rx: "finished"`, and the source collection state is updated to become read-write.
-On any errors the command will delete any temporary and target collections and also reset the
-state of the source collection's read-only flag.
+On any errors the command will delete any temporary and target collections and also reset the state of the source collection's read-only flag.
 
 === Examples using REINDEXCOLLECTION
 
@@ -737,11 +788,9 @@ state of the source collection's read-only flag.
 ----
 http://localhost:8983/solr/admin/collections?action=REINDEXCOLLECTION&name=techproducts_v2&numShards=3&configName=conf2&q=id:aa*&fl=id,string_s
 ----
-This request specifies a different schema for the target collection, copies only some of the fields, selects only the documents
-matching a query, and also potentially re-shapes the collection by explicitly specifying 3 shards. Since the target collection
-hasn't been specified in the parameters, a collection with a unique name, e.g., `.rx_techproducts_v2_2`, will be created and on success
-an alias pointing from `techproducts_v2` to `.rx_techproducts_v2_2` will be created, effectively replacing the source collection
-for the purpose of indexing and searching. The source collection is assumed to be small so a synchronous request was made.
+This request specifies a different schema for the target collection, copies only some of the fields, selects only the documents matching a query, and also potentially re-shapes the collection by explicitly specifying 3 shards.
+Since the target collection hasn't been specified in the parameters, a collection with a unique name, e.g., `.rx_techproducts_v2_2`, will be created and on success an alias pointing from `techproducts_v2` to `.rx_techproducts_v2_2` will be created, effectively replacing the source collection for the purpose of indexing and searching.
+The source collection is assumed to be small so a synchronous request was made.
 
 *Output*
 
@@ -762,15 +811,14 @@ for the purpose of indexing and searching. The source collection is assumed to b
   }
 }
 ----
-As a result a new collection `.rx_techproducts_v2_2` has been created, with selected documents reindexed to 3 shards, and
-with an alias pointing from `techproducts_v2` to this one. The status also shows that the source collection
-was already an alias to `.rx_techproducts_v2_1`, which was likely a result of a previous reindexing.
+As a result a new collection `.rx_techproducts_v2_2` has been created, with selected documents reindexed to 3 shards, and with an alias pointing from `techproducts_v2` to this one.
+The status also shows that the source collection was already an alias to `.rx_techproducts_v2_1`, which was likely a result of a previous reindexing.
 
 [[colstatus]]
 == COLSTATUS: Detailed Status of a Collection's Indexes
 
-The COLSTATUS command provides a detailed description of the collection status, including low-level index
-information about segments and field data.  There isn't a good equivalent V2 API that supports all the parameters below.
+The COLSTATUS command provides a detailed description of the collection status, including low-level index information about segments and field data.
+There isn't a good equivalent V2 API that supports all the parameters below.
 
 [.dynamic-tabs]
 --
@@ -797,13 +845,8 @@ curl -X GET http://localhost:8983/api/collections/techproducts_v2
 ====
 --
 
-This command also checks the compliance of Lucene index field types with the current Solr collection
-schema and indicates the names of non-compliant fields, i.e., Lucene fields with field types incompatible
-(or different) from the corresponding Solr field types declared in the current schema. Such incompatibilities may
-result from incompatible schema changes or after migration of
-data to a different major Solr release.
-
-
+This command also checks the compliance of Lucene index field types with the current Solr collection schema and indicates the names of non-compliant fields, i.e., Lucene fields with field types incompatible (or different) from the corresponding Solr field types declared in the current schema.
+Such incompatibilities may result from incompatible schema changes or after migration of data to a different major Solr release.
 
 === COLSTATUS Parameters
 
@@ -811,62 +854,64 @@ data to a different major Solr release.
 Collection name (optional). If missing then it means all collections.
 
 `coreInfo`::
-Optional boolean. If true then additional information will be provided about
+Optional boolean.
+If true then additional information will be provided about
 SolrCore of shard leaders.
 
 `segments`::
-Optional boolean. If true then segment information will be provided.
+Optional boolean.
+If true then segment information will be provided.
 
 `fieldInfo`::
-Optional boolean. If true then detailed Lucene field information will be provided
+Optional boolean.
+If true then detailed Lucene field information will be provided
 and their corresponding Solr schema types.
 
 `sizeInfo`::
-Optional boolean. If true then additional information about the index files
+Optional boolean.
+If true then additional information about the index files
 size and their RAM usage will be provided.
 
 ==== Index Size Analysis Tool
-The `COLSTATUS` command also provides a tool for analyzing and estimating the composition of raw index data. Please note that
-this tool should be used with care because it generates a significant IO load on all shard leaders of the
-analyzed collections. A sampling threshold and a sampling percent parameters can be adjusted to reduce this
-load to some degree.
+The `COLSTATUS` command also provides a tool for analyzing and estimating the composition of raw index data.
+Please note that this tool should be used with care because it generates a significant IO load on all shard leaders of the analyzed collections.
+A sampling threshold and a sampling percent parameters can be adjusted to reduce this load to some degree.
 
-Size estimates produced by this tool are only approximate and represent the aggregated size of uncompressed
-index data. In reality these values would never occur, because Lucene (and Solr) always stores data in a
-compressed format - still, these values help to understand what occupies most of the space and the relative size
-of each type of data and each field in the index.
+Size estimates produced by this tool are only approximate and represent the aggregated size of uncompressed index data.
+In reality these values would never occur, because Lucene (and Solr) always stores data in a compressed format - still, these values help to understand what occupies most of the space and the relative size of each type of data and each field in the index.
 
-In the following sections whenever "size" is mentioned it means an estimated aggregated size of
-uncompressed (raw) data.
+In the following sections whenever "size" is mentioned it means an estimated aggregated size of uncompressed (raw) data.
 
 The following parameters are specific to this tool:
 
 `rawSize`::
-Optional boolean. If true then run the raw index data analysis tool (other boolean options below imply
-this option if any of them are true). Command response will include sections that show estimated breakdown of
-data size per field and per data type.
+Optional boolean.
+If `true` then run the raw index data analysis tool (other boolean options below imply this option if any of them are true).
+Command response will include sections that show estimated breakdown of data size per field and per data type.
 
 `rawSizeSummary`::
-Optional boolean. If true then include also a more detailed breakdown of data size per field and per type.
+Optional boolean.
+If `true` then include also a more detailed breakdown of data size per field and per type.
 
 `rawSizeDetails`::
-Optional boolean. If true then provide exhaustive details that include statistical distribution of items per
-field and per type as well as top 20 largest items per field.
+Optional boolean.
+If `true` then provide exhaustive details that include statistical distribution of items per field and per type as well as top 20 largest items per field.
 
 `rawSizeSamplingPercent`::
-Optional float. When the index is larger than a certain threshold (100k documents per shard) only a part of
-data is actually retrieved and analyzed in order to reduce the IO load, and then the final results are extrapolated.
-Values must be greater than 0 and less or equal to 100.0. Default value is 5.0. Very small values (between 0.0 and 1.0)
-may introduce significant estimation errors. Also, values that would result in less than 10 documents being sampled
-are rejected with an exception.
+Optional float.
+When the index is larger than a certain threshold (100k documents per shard) only a part of data is actually retrieved and analyzed in order to reduce the IO load, and then the final results are extrapolated.
+Values must be greater than 0 and less or equal to 100.0.
+Default value is `5.0`.
+Very small values (between 0.0 and 1.0) may introduce significant estimation errors.
+Also, values that would result in less than 10 documents being sampled are rejected with an exception.
 
 Response for this command always contains two sections:
 
-* `fieldsBySize` is a map where field names are keys and values are estimated sizes of raw (uncompressed) data
-that belongs to the field. The map is sorted by size so that it's easy to see what field occupies most space.
+* `fieldsBySize` is a map where field names are keys and values are estimated sizes of raw (uncompressed) data that belongs to the field.
+The map is sorted by size so that it's easy to see what field occupies most space.
 
-* `typesBySize` is a map where data types are the keys and values are estimates sizes of raw (uncompressed) data
-of particular type. This map is also sorted by size.
+* `typesBySize` is a map where data types are the keys and values are estimates sizes of raw (uncompressed) data of particular type.
+This map is also sorted by size.
 
 Optional sections include:
 
@@ -877,19 +922,19 @@ This section also shows `topN` values by size from each field.
 
 Data types shown in the response can be roughly divided into the following groups:
 
-* `storedFields` - represents the raw uncompressed data in stored fields. For example, for UTF-8 strings this represents
-the aggregated sum of the number of bytes in the strings' UTF-8 representation, for long numbers this is 8 bytes per value, etc.
+* `storedFields` - represents the raw uncompressed data in stored fields.
+For example, for UTF-8 strings this represents the aggregated sum of the number of bytes in the strings' UTF-8 representation, for long numbers this is 8 bytes per value, etc.
 
-* `terms_terms` - represents the aggregated size of the term dictionary. The size of this data is affected by the
-the number and length of unique terms, which in turn depends on the field size and the analysis chain.
+* `terms_terms` - represents the aggregated size of the term dictionary.
+The size of this data is affected by the the number and length of unique terms, which in turn depends on the field size and the analysis chain.
 
 * `terms_postings` - represents the aggregated size of all term position and offset information, if present.
 This information may be absent if position-based searching, such as phrase queries, is not needed.
 
 * `terms_payloads` - represents the aggregated size of all per-term payload data, if present.
 
-* `norms` - represents the aggregated size of field norm information. This information may be omitted if a field
-has an `omitNorms` flag in the schema, which is common for fields that don't need weighting or scoring by field length.
+* `norms` - represents the aggregated size of field norm information.
+This information may be omitted if a field has an `omitNorms` flag in the schema, which is common for fields that don't need weighting or scoring by field length.
 
 * `termVectors` - represents the aggregated size of term vectors.
 
@@ -1250,7 +1295,9 @@ curl -X POST http://localhost:8983/api/collections -H 'Content-Type: application
 ====
 --
 
-The BACKUP command will backup Solr indexes and configurations for a specified collection. The BACKUP command <<making-and-restoring-backups.adoc#,takes one copy from each shard for the indexes>>. For configurations, it backs up the configset that was associated with the collection and metadata.
+The BACKUP command will backup Solr indexes and configurations for a specified collection.
+The BACKUP command <<backup-restore.adoc#,takes one copy from each shard for the indexes>>.
+For configurations, it backs up the configset that was associated with the collection and metadata.
 
 Backup data is stored in the repository based on the provided `name` and `location`.
 Each backup location can hold multiple backups for the same collection, allowing users to later restore from any of these "backup points" as desired.
@@ -1266,24 +1313,30 @@ See the `incremental` parameter below for more information.
 === BACKUP Parameters
 
 `collection`::
-The name of the collection to be backed up. This parameter is required.
+The name of the collection to be backed up.
+This parameter is required.
 
 `name`::
-What to name the backup that is created.  This is checked to make sure it doesn't already exist, and otherwise an error message is raised. This parameter is required.
+What to name the backup that is created.
+This is checked to make sure it doesn't already exist, and otherwise an error message is raised.
+This parameter is required.
 
 `location`::
-The location on a shared drive for the backup command to write to. This parameter is required, unless a default location is defined on the repository configuration, or set as a <<cluster-node-management.adoc#clusterprop,cluster property>>.
+The location on a shared drive for the backup command to write to.
+This parameter is required, unless a default location is defined on the repository configuration, or set as a <<cluster-node-management.adoc#clusterprop,cluster property>>.
 +
 If the location path is on a mounted drive, the mount must be available on the node that serves as the overseer, even if the overseer node does not host a replica of the collection being backed up.
 Since any node can take the overseer role at any time, a best practice to avoid possible backup failures is to ensure the mount point is available on all nodes of the cluster.
 +
-Each backup location can only hold a backup for one collection, however the same location can be used for repeated backups of the same collection.  Repeated backups of the same collection are done incrementally, so that files unchanged since the last backup are not duplicated in the backup repository.
+Each backup location can only hold a backup for one collection, however the same location can be used for repeated backups of the same collection.
+Repeated backups of the same collection are done incrementally, so that files unchanged since the last backup are not duplicated in the backup repository.
 
 `async`::
 Request ID to track this action which will be <<collections-api.adoc#asynchronous-calls,processed asynchronously>>.
 
 `repository`::
-The name of a repository to be used for the backup. If no repository is specified then the local filesystem repository will be used automatically.
+The name of a repository to be used for the backup.
+If no repository is specified then the local filesystem repository will be used automatically.
 
 `maxNumBackupPoints`::
 The upper-bound on how many backups should be retained at the backup location.
@@ -1320,7 +1373,8 @@ The backup name usually corresponds to the collection-name, but isn't required t
 This parameter is required.
 
 `location`::
-The repository location to list backups from. This parameter is required, unless a default location is defined on the repository configuration, or set as a <<cluster-node-management.adoc#clusterprop,cluster property>>.
+The repository location to list backups from.
+This parameter is required, unless a default location is defined on the repository configuration, or set as a <<cluster-node-management.adoc#clusterprop,cluster property>>.
 +
 If the location path is on a mounted drive, the mount must be available on the node that serves as the overseer, even if the overseer node does not host a replica of the collection being backed up.
 Since any node can take the overseer role at any time, a best practice to avoid possible backup failures is to ensure the mount point is available on all nodes of the cluster.
@@ -1442,33 +1496,40 @@ The RESTORE operation will replace the content of a collection with files from t
 
 If the provided `collection` value matches an existing collection, Solr will use it for restoration, assuming it is compatible (same number of shards, etc.) with the stored backup files.
 If the provided `collection` value doesn't exist, a new collection with that name is created in a way compatible with the stored backup files.
-The collection created will be have the same number of shards and replicas as the original collection, preserving routing information, etc. Optionally, you can override some parameters documented below.
+The collection created will be have the same number of shards and replicas as the original collection, preserving routing information, etc.
+Optionally, you can override some parameters documented below.
 
 While restoring, if a configset with the same name exists in ZooKeeper then Solr will reuse that, or else it will upload the backed up configset in ZooKeeper and use that.
 
-You can use the collection <<collection-aliasing.adoc#createalias,CREATEALIAS>> command to make sure clients don't need to change the endpoint to query or index against the newly restored collection.
+You can use the collection <<alias-management.adoc#createalias,CREATEALIAS>> command to make sure clients don't need to change the endpoint to query or index against the newly restored collection.
 
 === RESTORE Parameters
 
 `collection`::
-The collection where the indexes will be restored into. This parameter is required.
+The collection where the indexes will be restored into.
+This parameter is required.
 
 `name`::
-The name of the existing backup that you want to restore. This parameter is required.
+The name of the existing backup that you want to restore.
+This parameter is required.
 
 `location`::
-The location on a shared drive for the RESTORE command to read from. Alternately it can be set as a <<cluster-node-management.adoc#clusterprop,cluster property>>.
+The location on a shared drive for the RESTORE command to read from.
+Alternately it can be set as a <<cluster-node-management.adoc#clusterprop,cluster property>>.
 
 `async`::
 Request ID to track this action which will be <<collections-api.adoc#asynchronous-calls,processed asynchronously>>.
 
 `repository`::
-The name of a repository to be used for the backup. If no repository is specified then the local filesystem repository will be used automatically.
+The name of a repository to be used for the backup.
+If no repository is specified then the local filesystem repository will be used automatically.
 
 `backupId`::
 The ID of a specific backup point to restore from.
 +
-Backup locations can hold multiple backups of the same collection.  This parameter allows users to choose which of those backups should be used to restore from.  If not specified the most recent backup point is used.
+Backup locations can hold multiple backups of the same collection.
+This parameter allows users to choose which of those backups should be used to restore from.
+If not specified the most recent backup point is used.
 
 There are also optional parameters that determine the target collection layout.
 The following parameters are currently supported (described in detail in the <<create,CREATE collection>> section):
@@ -1556,16 +1617,19 @@ curl -X POST http://localhost:8983/v2/collections/backups -H 'Content-Type: appl
 === DELETEBACKUP Parameters
 
 `name`::
-The backup name to delete backup files from.  This parameter is required.
+The backup name to delete backup files from.
+This parameter is required.
 
 `location`::
-The repository location to delete backups from. This parameter is required, unless a default location is defined on the repository configuration, or set as a <<cluster-node-management.adoc#clusterprop,cluster property>>.
+The repository location to delete backups from.
+This parameter is required, unless a default location is defined on the repository configuration, or set as a <<cluster-node-management.adoc#clusterprop,cluster property>>.
 +
 If the location path is on a mounted drive, the mount must be available on the node that serves as the overseer, even if the overseer node does not host a replica of the collection being backed up.
 Since any node can take the overseer role at any time, a best practice to avoid possible backup failures is to ensure the mount point is available on all nodes of the cluster.
 
 `repository`::
-The name of a repository to be used for deleting backup files. If no repository is specified then the local filesystem repository will be used automatically.
+The name of a repository to be used for deleting backup files.
+If no repository is specified then the local filesystem repository will be used automatically.
 
 `backupId`::
 Explicitly specify a single backup-ID to delete.
@@ -1621,28 +1685,35 @@ curl -X POST http://localhost:8983/api/collections/techproducts -H 'Content-Type
 ====
 --
 
-Leaders are assigned in a collection according to the `preferredLeader` property on active nodes. This command should be run after the preferredLeader property has been assigned via the BALANCESHARDUNIQUE or ADDREPLICAPROP commands.
+Leaders are assigned in a collection according to the `preferredLeader` property on active nodes.
+This command should be run after the preferredLeader property has been assigned via the BALANCESHARDUNIQUE or ADDREPLICAPROP commands.
 
-NOTE: It is not _required_ that all shards in a collection have a `preferredLeader` property. Rebalancing will only attempt to reassign leadership to those replicas that have the `preferredLeader` property set to `true` _and_ are not currently the shard leader _and_ are currently active.
+NOTE: It is not _required_ that all shards in a collection have a `preferredLeader` property.
+Rebalancing will only attempt to reassign leadership to those replicas that have the `preferredLeader` property set to `true` _and_ are not currently the shard leader _and_ are currently active.
 
 === REBALANCELEADERS Parameters
 
 `collection`::
-The name of the collection to rebalance `preferredLeaders` on. This parameter is required.
+The name of the collection to rebalance `preferredLeaders` on.
+This parameter is required.
 
 `maxAtOnce`::
-The maximum number of reassignments to have queue up at once. Values \<=0 are use the default value Integer.MAX_VALUE.
+The maximum number of reassignments to have queue up at once.
+Values \<=0 are use the default value Integer.MAX_VALUE.
 +
 When this number is reached, the process waits for one or more leaders to be successfully assigned before adding more to the queue.
 
 `maxWaitSeconds`::
-Defaults to `60`. This is the timeout value when waiting for leaders to be reassigned. If `maxAtOnce` is less than the number of reassignments that will take place, this is the maximum interval that any _single_ wait for at least one reassignment.
+Defaults to `60`. This is the timeout value when waiting for leaders to be reassigned.
+If `maxAtOnce` is less than the number of reassignments that will take place, this is the maximum interval that any _single_ wait for at least one reassignment.
 +
 For example, if 10 reassignments are to take place and `maxAtOnce` is `1` and `maxWaitSeconds` is `60`, the upper bound on the time that the command may wait is 10 minutes.
 
 === REBALANCELEADERS Response
 
-The response will include the status of the request. A status of "0" indicates the request was _processed_, not that all assignments were successful. Examine the "Summary" section for that information.
+The response will include the status of the request.
+A status of "0" indicates the request was _processed_, not that all assignments were successful.
+Examine the "Summary" section for that information.
 
 === Examples using REBALANCELEADERS
 
@@ -1662,10 +1733,12 @@ http://localhost:8983/solr/admin/collections?action=REBALANCELEADERS&collection=
 In this example:
 
 * In the "alreadyLeaders" section, core_node5 was already the leader, so there were no changes in leadership for shard1.
-* In the "inactivePreferreds" section, core_node57 had the preferredLeader property set, but the node was not active, the leader for shard7 was not changed. This is considered successful.
+* In the "inactivePreferreds" section, core_node57 had the preferredLeader property set, but the node was not active, the leader for shard7 was not changed.
+This is considered successful.
 * In the "successes" section, core_node23 was _not_ the leader for shard3, so leadership was assigned to that replica.
 
-The "Summary" section with the "Success" tag indicates that the command rebalanced all _active_ replicas with the preferredLeader property set as requried. If a replica cannot be made leader due to not being healthy (for example, it is on a Solr instance that is not running), it's also considered success.
+The "Summary" section with the "Success" tag indicates that the command rebalanced all _active_ replicas with the preferredLeader property set as requried.
+If a replica cannot be made leader due to not being healthy (for example, it is on a Solr instance that is not running), it's also considered success.
 
 [source,json]
 ----
@@ -1691,6 +1764,8 @@ The "Summary" section with the "Success" tag indicates that the command rebalanc
 
 Examining the clusterstate after issuing this call should show that every active replica that has the `preferredLeader` property should also have the "leader" property set to _true_.
 
-NOTE: The added work done by an NRT leader is quite small and only present when indexing. The primary use-case is to redistribute the leader role if there are a large number of leaders concentrated on a small number of nodes. Rebalancing will likely not improve performance unless the imbalance of leadership roles is measured in multiples of 10.
+NOTE: The added work done by an NRT leader is quite small and only present when indexing.
+The primary use-case is to redistribute the leader role if there are a large number of leaders concentrated on a small number of nodes.
+Rebalancing will likely not improve performance unless the imbalance of leadership roles is measured in multiples of 10.
 
 NOTE: The BALANCESHARDUNIQUE command that distributes the preferredLeader property does not guarantee perfect distribution and in some collection topologies it is impossible to make that guarantee.
diff --git a/solr/solr-ref-guide/src/collection-specific-tools.adoc b/solr/solr-ref-guide/src/collection-specific-tools.adoc
deleted file mode 100644
index 3e09c92..0000000
--- a/solr/solr-ref-guide/src/collection-specific-tools.adoc
+++ /dev/null
@@ -1,43 +0,0 @@
-= Collection-Specific Tools
-:page-children: analysis-screen, documents-screen, files-screen, query-screen, stream-screen, schema-browser-screen
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-In the left-hand navigation bar, you will see a pull-down menu titled "Collection Selector" that can be used to access collection specific administration screens.
-
-.Only Visible When Using SolrCloud
-[NOTE]
-====
-The "Collection Selector" pull-down menu is only available on Solr instances running in <<solrcloud.adoc#,SolrCloud mode>>.
-
-Single node or leader/follower replication instances of Solr will not display this menu, instead the Collection specific UI pages described in this section will be available in the <<core-specific-tools.adoc#,Core Selector pull-down menu>>.
-====
-
-Clicking on the Collection Selector pull-down menu will show a list of the collections in your Solr cluster, with a search box that can be used to find a specific collection by name. When you select a collection from the pull-down, the main display of the page will display some basic metadata about the collection, and a secondary menu will appear in the left nav with links to additional collection specific administration screens.
-
-image::images/collection-specific-tools/collection_dashboard.png[image,width=482,height=250]
-
-The collection-specific UI screens are listed below, with a link to the section of this guide to find out more:
-
-// TODO: SOLR-10655 BEGIN: refactor this into a 'collection-screens-list.include.adoc' file for reuse
-* <<analysis-screen.adoc#,Analysis>> - lets you analyze the data found in specific fields.
-* <<documents-screen.adoc#,Documents>> - provides a simple form allowing you to execute various Solr indexing commands directly from the browser.
-* <<files-screen.adoc#,Files>> - shows the current core configuration files such as `solrconfig.xml`.
-* <<query-screen.adoc#,Query>> - lets you submit a structured query about various elements of a core.
-* <<stream-screen.adoc#,Stream>> - allows you to submit streaming expressions and see results and parsing explanations.
-* <<schema-browser-screen.adoc#,Schema Browser>> - displays schema data in a browser window.
-// TODO: SOLR-10655 END
diff --git a/solr/solr-ref-guide/src/collections-api.adoc b/solr/solr-ref-guide/src/collections-api.adoc
index ab5e949..03c778c 100644
--- a/solr/solr-ref-guide/src/collections-api.adoc
+++ b/solr/solr-ref-guide/src/collections-api.adoc
@@ -1,5 +1,4 @@
 = Collections API
-:page-children: cluster-node-management, collection-management, collection-aliasing, shard-management, replica-management
 :page-show-toc: false
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
@@ -26,7 +25,7 @@ Because this API has a large number of commands and options, we've grouped the c
 
 *<<collection-management.adoc#,Collection Management>>*: Create, list, reload and delete collections; set collection properties; migrate documents to another collection; rebalance leaders; backup and restore collections.
 
-*<<collection-aliasing.adoc#,Collection Aliasing>>*: Create, list or delete collection aliases; set alias properties.
+*<<alias-management.adoc#,Alias Management>>*: Create, list or delete collection aliases; set alias properties.
 
 *<<shard-management.adoc#,Shard Management>>*: Create and delete a shard; split a shard into two or more additional shards; force a shard leader.
 
diff --git a/solr/solr-ref-guide/src/collections-core-admin.adoc b/solr/solr-ref-guide/src/collections-core-admin.adoc
index ffc228d..292da8a 100644
--- a/solr/solr-ref-guide/src/collections-core-admin.adoc
+++ b/solr/solr-ref-guide/src/collections-core-admin.adoc
@@ -20,12 +20,13 @@ The Collections screen provides some basic functionality for managing your Colle
 
 [NOTE]
 ====
-If you are running a single node Solr instance, you will not see a Collections option in the left nav menu of the Admin UI.
+If you are running a user-managed cluster or a single-node installation, you will not see a Collections option in the left nav menu of the Admin UI.
 
 You will instead see a "Core Admin" screen that supports some comparable Core level information & manipulation via the <<coreadmin-api.adoc#,CoreAdmin API>> instead.
 ====
 
-The main display of this page provides a list of collections that exist in your cluster. Clicking on a collection name provides some basic metadata about how the collection is defined, and its current shards & replicas, with options for adding and deleting individual replicas.
+The main display of this page provides a list of collections that exist in your cluster.
+Clicking on a collection name provides some basic metadata about how the collection is defined, and its current shards & replicas, with options for adding and deleting individual replicas.
 
 The buttons at the top of the screen let you make various collection level changes to your cluster, from add new collections or aliases to reloading or deleting a single collection.
 
diff --git a/solr/solr-ref-guide/src/combining-distribution-and-replication.adoc b/solr/solr-ref-guide/src/combining-distribution-and-replication.adoc
deleted file mode 100644
index a45e59f..0000000
--- a/solr/solr-ref-guide/src/combining-distribution-and-replication.adoc
+++ /dev/null
@@ -1,33 +0,0 @@
-= Combining Distribution and Replication
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-When your index is too large for a single machine and you have a query volume that single shards cannot keep up with, it's time to replicate each shard in your distributed search setup.
-
-The idea is to combine distributed search with replication. As shown in the figure below, a combined distributed-replication configuration features a leader server for each shard and then 1-_n_ followers that are replicated from the leader. As in a standard replicated configuration, the leader server handles updates and optimizations without adversely affecting query handling performance.
-
-Query requests should be load balanced across each of the shard followers. This gives you both increased query handling capacity and fail-over backup if a server goes down.
-
-.A Solr configuration combining both replication and leader-follower distribution.
-image::images/combining-distribution-and-replication/distributed-replication.png[image,width=312,height=344]
-
-
-None of the leader shards in this configuration know about each other. You index to each leader, the index is replicated to each follower, and then searches are distributed across the followers, using one follower from each leader/follower shard.
-
-For high availability you can use a load balancer to set up a virtual IP for each shard's set of followers. If you are new to load balancing, HAProxy (http://haproxy.1wt.eu/) is a good open source software load-balancer. If a follower server goes down, a good load-balancer will detect the failure using some technique (generally a heartbeat system), and forward all requests to the remaining live followers that served with the failed follower. A single virtual IP should then be set up so t [...]
-
-With this configuration you will have a fully load balanced, search-side fault-tolerant system (Solr does not yet support fault-tolerant indexing). Incoming searches will be handed off to one of the functioning followers, then the follower will distribute the search request across a follower for each of the shards in your configuration. The follower will issue a request to each of the virtual IPs for each shard, and the load balancer will choose one of the available followers. Finally, t [...]
diff --git a/solr/solr-ref-guide/src/commits-transaction-logs.adoc b/solr/solr-ref-guide/src/commits-transaction-logs.adoc
new file mode 100644
index 0000000..cf5903c
--- /dev/null
+++ b/solr/solr-ref-guide/src/commits-transaction-logs.adoc
@@ -0,0 +1,321 @@
+= Commits and Transaction Logs
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+In Solr, documents are not available for searching until a "commit" updates the Lucene index files.
+Your commit strategy will determine when document additions, deletes, or changes are available for searching.
+A transaction log records document updates that have been received since the last "hard" commit point.
+
+== <updateHandler> in solrconfig.xml
+
+The settings in this section are configured in the `<updateHandler>` element in `solrconfig.xml` and may affect the performance of index updates.
+These settings affect how updates are done internally.
+
+The `<updateHandler>` element takes a class parameter, which must be `solr.DirectUpdateHandler2`.
+The `_default` configset included with Solr has this section defined already, but the values for many parameters discussed below likely need to be customized for your application.
+
+[source,xml]
+----
+<config>
+  <updateHandler class="solr.DirectUpdateHandler2">
+    ...
+  </updateHandler>
+</config>
+----
+
+Note that `<updateHandler>` configurations do not affect the higher level configuration of <<requesthandlers-searchcomponents.adoc#,request handlers>> that process client update requests.
+
+== Commits
+
+Data sent to Solr is not searchable until it has been _committed_ to the index.
+The reason for this is that in some cases commits can be slow and they should be done in isolation from other possible commit requests to avoid overwriting data.
+
+=== Hard Commits vs. Soft Commits
+
+Solr supports two types of commits: hard commits and soft commits.
+
+A *hard commit* calls `fsync` on the index files to ensure they have been flushed to stable storage.
+The current transaction log is closed and a new one is opened.
+See the section <<Transaction Log>> below for how data is recovered in the absence of a hard commit.
+Optionally a hard commit can also make documents visible for search, but this may not be ideal in some use cases as it is more expensive than a soft commit.
+By default commit actions result in a hard commit of all the Lucene index files to stable storage (disk).
+
+A *soft commit* is faster since it only makes index changes visible and does not `fsync` index files, start a new segment, nor start a new transaction log.
+Search collections that have NRT requirements will want to soft commit often enough to satisfy the visibility requirements of the application.
+A softCommit may be "less expensive" than a hard commit (`openSearcher=true`), but it is not free.
+It is recommended that this be set for as long as is reasonable given the application requirements.
+
+A hard commit means that, if a server crashes, Solr will know exactly where your data was stored; a soft commit means that the data is stored, but the location information isn't yet stored.
+The tradeoff is that a soft commit gives you faster visibility because it's not waiting for background merges to finish.
+
+=== Explicit Commits
+
+When a client includes a `commit=true` parameter with an update request, this ensures that all index segments affected by the adds and deletes on an update are written to disk as soon as index updates are completed.
+
+If an additional parameter `softCommit=true` is specified, then Solr performs a soft commit.
+This is an implementation of Near Real Time storage, a feature that boosts document visibility, since you don't have to wait for background merges and storage (to ZooKeeper, if using SolrCloud) to finish before moving on to something else.
+
+Details about using explicit commit requests during indexing are in the section <<indexing-with-update-handlers.adoc#,Indexing with Update Handlers>>.
+
+For more information about Near Real Time operations, see <<solrcloud-distributed-requests.adoc#near-real-time-nrt-use-cases,Near Real Time Use Cases>>.
+
+=== Automatic Commits
+
+To avoid sending explicit commit commands during indexing and to provide control over when commits happen, it's possible to configure `autoCommit` parameters in `solrconfig.xml`.
+
+This is preferable to sending explicit commits from the indexing client as it offers much more control over your commit strategy.
+Note that defaults are provided in `solrconfig.xml`, but they are very likely not tuned to your needs and may introduce performance problems if not tuned effectively.
+
+These settings control how often pending updates will be automatically pushed to the index.
+
+`maxDocs`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: none
+|===
++
+The number of updates that have occurred since the last commit.
+
+`maxTime`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: none
+|===
++
+The number of milliseconds since the oldest uncommitted update.
+When sending a large batch of documents, this parameter is preferred over `maxDocs`.
+
+`maxSize`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: none
+|===
++
+The maximum size of the transaction log (tlog) on disk, after which a hard commit is triggered.
+This is useful when the size of documents is unknown and the intention is to restrict the size of the transaction log to reasonable size.
++
+Valid values can be bytes (default with no suffix), kilobytes (if defined with a `k` suffix, as in `25k`), megabytes (`m`) or gigabytes (`g`).
+
+`openSearcher`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: `true`
+|===
++
+Whether to open a new searcher when performing a commit.
+If this is `false`, the commit will flush recent index changes to stable storage, but does not cause a new searcher to be opened to make those changes visible.
+
+If any of the `maxDocs`, `maxTime`, or `maxSize` limits are reached, Solr automatically performs a commit operation.
+The first of these thresholds to be reached will trigger the commit.
+
+If the `autoCommit` tag is missing from `solrconfig.xml`, then only explicit commits will update the index.
+The decision whether to use autoCommit or not depends on the needs of your application.
+
+[source,xml]
+----
+<autoCommit>
+  <maxDocs>10000</maxDocs>
+  <maxTime>30000</maxTime>
+  <maxSize>512m</maxSize>
+  <openSearcher>false</openSearcher>
+</autoCommit>
+----
+
+You can also specify 'soft' autoCommits with the `autoSoftCommit` tag.
+
+[source,xml]
+----
+<autoSoftCommit>
+  <maxTime>60000</maxTime>
+</autoSoftCommit>
+----
+
+=== AutoCommit Best Practices
+
+Determining the best `autoCommit` settings is a tradeoff between performance and accuracy.
+Settings that cause frequent updates will improve the accuracy of searches because new content will be searchable more quickly, but performance may suffer because of the frequent updates.
+Less frequent updates may improve performance but it will take longer for updates to show up in queries.
+
+Here is an example NRT configuration for the two flavors of commit, a hard commit every 60 seconds and a soft commit every 30 seconds.
+Note that these are _not_ the values in the examples shipped with Solr!
+
+[source,xml]
+----
+<autoCommit>
+  <maxTime>${solr.autoCommit.maxTime:60000}</maxTime>
+  <openSearcher>false</openSearcher>
+</autoCommit>
+
+<autoSoftCommit>
+   <maxTime>${solr.autoSoftCommit.maxTime:30000}</maxTime>
+ </autoSoftCommit>
+----
+
+TIP: These parameters can be overridden at run time by defining Java "system variables", for example specifying ``-Dsolr.autoCommit.maxTime=15000` would override the hard commit interval with a value of 15 seconds.
+
+The choices for `autoCommit` (with `openSearcher=false`) and `autoSoftCommit` have different consequences.
+In the event of un-graceful shutdown, it can take up to the time specified in `autoCommit` for Solr to replay the uncommitted documents from the transaction log.
+
+The time chosen for `autoSoftCommit` determines the maximum time after a document is sent to Solr before it becomes searchable and does not affect the transaction log.
+
+Choose as long an interval as your application can tolerate for this value, often 15-60 seconds is reasonable, or even longer depending on the requirements.
+In situations where the the time is set to a very short interval (say 1 second), consider disabling your caches (queryResultCache and filterCache especially) as they will have little utility.
+
+TIP: For extremely high bulk indexing, especially for the initial load if there is no searching, consider turning off `autoSoftCommit` by specifying a value of `-1` for the maxTime parameter.
+
+=== Commit Within a Time Period
+
+An alternative to `autoCommit` is to use `commitWithin`, which can be defined when making the update request to Solr (i.e., when pushing documents), or in an update request handler.
+
+The `commitWithin` settings allow forcing document commits to happen in a defined time period.
+This is used most frequently with <<solrcloud-distributed-requests.adoc#near-real-time-nrt-use-cases,Near Real Time use cases>>, and for that reason the default is to perform a soft commit.
+This does not, however, replicate new documents to follower servers in a user-managed cluster.
+If that's a requirement for your implementation, you can force a hard commit by adding a parameter, as in this example:
+
+[source,xml]
+----
+<commitWithin>
+  <softCommit>false</softCommit>
+</commitWithin>
+----
+
+With this configuration, when you call `commitWithin` as part of your update message, it will automatically perform a hard commit every time.
+
+
+== Transaction Log
+
+Transaction logs (tlogs) are a "rolling window" of updates since the last hard commit.
+The current transaction log is closed and a new one opened each time any variety of hard commit occurs.
+Soft commits have no effect on the transaction log.
+
+When tlogs are enabled, documents being added to the index are written to the tlog before the indexing call returns to the client.
+In the event of an un-graceful shutdown (power loss, JVM crash, `kill -9`, etc.) any documents written to the tlog but not yet committed with a hard commit when Solr was stopped are replayed on startup.
+Therefore the data is not lost.
+
+When Solr is shut down gracefully (using the `bin/solr stop` command) Solr will close the tlog file and index segments so no replay will be necessary on startup.
+
+One point of confusion is how much data is contained in a transaction log.
+A tlog does not contain all documents, only the ones since the last hard commit.
+Older transaction log files are deleted when no longer needed.
+
+WARNING: Implicit in the above is that transaction logs will grow forever if hard commits are disabled. Therefore it is important that hard commits be enabled when indexing.
+
+=== Transaction Log Configuration
+
+Transaction logs are required for all SolrCloud clusters, as well as the <<realtime-get.adoc#,RealTime Get>> feature.
+It is configured in the `updateHandler` section of `solrconfig.xml`.
+
+Transaction logs are configured in `solrconfig.xml`, in a section like the following:
+
+[source,xml]
+----
+<updateLog>
+  <str name="dir">${solr.ulog.dir:}</str>
+</updateLog>
+----
+
+The only required parameter is:
+
+`dir`::
++
+[%autowidth,frame=none]
+|===
+s|Required |Default: none
+|===
++
+The location of the transaction log.
+In Solr's default `solrconfig.xml` files, this is defined as `${solr.ulog.dir:}`.
++
+As shown in the default value, the location of the transaction log can be anywhere as long as it is defined in `solrconfig.xml` and write- and read-able by Solr.
+
+There are three additional expert-level configuration settings which affect indexing performance and how far a replica can fall behind on updates before it must enter into full recovery.
+These settings would primarily impact SolrCloud cluster configurations:
+
+`numRecordsToKeep`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: `100`
+|===
++
+The number of update records to keep per log.
+
+`maxNumLogsToKeep`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: `10`
+|===
++
+The maximum number of logs keep. The default is `10`.
+
+`numVersionBuckets`::
++
+[%autowidth,frame=none]
+|===
+|Optional |Default: `65336`
+|===
++
+The number of buckets used to keep track of maximum version values when checking for re-ordered updates.
+Increase this value to reduce the cost of synchronizing access to version buckets during high-volume indexing.
+This requires `(8 bytes (long) * numVersionBuckets)` of heap space per Solr core.
+
+An example, to be included under `<updateHandler>` in `solrconfig.xml`, employing the above advanced settings:
+
+[source,xml]
+----
+<updateLog>
+  <str name="dir">${solr.ulog.dir:}</str>
+  <int name="numRecordsToKeep">500</int>
+  <int name="maxNumLogsToKeep">20</int>
+  <int name="numVersionBuckets">65536</int>
+</updateLog>
+----
+
+== Event Listeners
+
+The UpdateHandler section is also where update-related event listeners can be configured.
+These can be triggered to occur after any commit (`event="postCommit"`) or only after optimize commands (`event="postOptimize"`).
+
+Users can write custom update event listener classes in Solr plugins.
+As of Solr 7.1, `RunExecutableListener` was removed for security reasons.
+
+== Other <updateHandler> Options
+
+In some cases complex updates (such as spatial/shape) may take very long time to complete.
+In the default configuration other updates that fall into the same internal version bucket will wait indefinitely.
+Eventually these outstanding requests may pile up and lead to thread exhaustion and possibly to OutOfMemory errors.
+
+The parameter `versionBucketLockTimeoutMs` helps to prevent that by specifying a timeout for long-running update requests.
+If this limit is reached the update will fail but it won't block forever all other updates.
+
+There is a memory cost associated with this setting.
+Values greater than the default of `0` (unlimited timeout) cause Solr to use a different internal implementation of the version bucket, which increases memory consumption from ~1.5MB to ~6.8MB per Solr core.
+
+An example of specifying this option under `<config>` section of `solrconfig.xml`:
+
+[source,xml]
+----
+<updateHandler class="solr.DirectUpdateHandler2">
+  ...
+  <int name="versionBucketLockTimeoutMs">10000</int>
+</updateHandler>
+----
diff --git a/solr/solr-ref-guide/src/common-query-parameters.adoc b/solr/solr-ref-guide/src/common-query-parameters.adoc
index 08db623..62627a4 100644
--- a/solr/solr-ref-guide/src/common-query-parameters.adoc
+++ b/solr/solr-ref-guide/src/common-query-parameters.adoc
@@ -18,19 +18,23 @@
 
 Several query parsers share supported query parameters.
 
-The following sections describe Solr's common query parameters, which are supported by the <<requesthandlers-and-searchcomponents-in-solrconfig#search-handlers,Search RequestHandlers>>.
+The following sections describe Solr's common query parameters, which are supported by the <<requesthandlers-searchcomponents#search-handlers,Search RequestHandlers>>.
 
 == defType Parameter
 
-The defType parameter selects the query parser that Solr should use to process the main query parameter (`q`) in the request. For example:
+The defType parameter selects the query parser that Solr should use to process the main query parameter (`q`) in the request.
+For example:
 
 `defType=dismax`
 
-If no `defType` parameter is specified, then by default, the <<the-standard-query-parser.adoc#,The Standard Query Parser>> is used. (e.g., `defType=lucene`)
+If no `defType` parameter is specified, then by default, the <<standard-query-parser.adoc#,Standard Query Parser>> is used.
+(e.g., `defType=lucene`)
 
 == sort Parameter
 
-The `sort` parameter arranges search results in either ascending (`asc`) or descending (`desc`) order. The parameter can be used with either numerical or alphabetical content. The directions can be entered in either all lowercase or all uppercase letters (i.e., both `asc` and `ASC` are accepted).
+The `sort` parameter arranges search results in either ascending (`asc`) or descending (`desc`) order.
+The parameter can be used with either numerical or alphabetical content.
+The directions can be entered in either all lowercase or all uppercase letters (i.e., both `asc` and `ASC` are accepted).
 
 Solr can sort query responses according to:
 
@@ -38,16 +42,17 @@ Solr can sort query responses according to:
 * <<function-queries.adoc#sort-by-function,Function results>>
 * The value of any primitive field (numerics, string, boolean, dates, etc.) which has `docValues="true"` (or `multiValued="false"` and `indexed="true"`, in which case the indexed terms will used to build DocValue like structures on the fly at runtime)
 * A SortableTextField which implicitly uses `docValues="true"` by default to allow sorting on the original input string regardless of the analyzers used for Searching.
-* A single-valued TextField that uses an analyzer (such as the KeywordTokenizer) that produces only a single term per document. TextField does not support `docValues="true"`, but a DocValue-like structure will be built on the fly at runtime.
-** *NOTE:* If you want to be able to sort on a field whose contents you want to tokenize to facilitate searching, <<copying-fields.adoc#,use a `copyField` directive>> in the the Schema to clone the field. Then search on the field and sort on its clone.
+* A single-valued TextField that uses an analyzer (such as the KeywordTokenizer) that produces only a single term per document.
+TextField does not support `docValues="true"`, but a DocValue-like structure will be built on the fly at runtime.
+** *NOTE:* If you want to be able to sort on a field whose contents you want to tokenize to facilitate searching, <<copy-fields.adoc#,use a `copyField` directive>> in the the Schema to clone the field.
+Then search on the field and sort on its clone.
 
-In the case of primitive fields, or SortableTextFields, that are `multiValued="true"` the representative value used for each doc when sorting depends on the sort direction: The minimum value in each document is used for ascending (`asc`) sorting, while the maximal value in each document is used for descending (`desc`) sorting.  This default behavior is equivalent to explicitly sorting using the 2 argument `<<function-queries.adoc#field-function,field()>>` function: `sort=field(name,min)  [...]
+In the case of primitive fields, or SortableTextFields, that are `multiValued="true"` the representative value used for each doc when sorting depends on the sort direction: The minimum value in each document is used for ascending (`asc`) sorting, while the maximal value in each document is used for descending (`desc`) sorting.
+This default behavior is equivalent to explicitly sorting using the 2 argument `<<function-queries.adoc#field-function,field()>>` function: `sort=field(name,min) asc` and `sort=field(name,max) desc`
 
 The table below explains how Solr responds to various settings of the `sort` parameter.
 
-// TODO: Change column width to %autowidth.spread when https://github.com/asciidoctor/asciidoctor-pdf/issues/599 is fixed
-
-[cols="30,70",options="header"]
+[%autowidth.stretch,options="header"]
 |===
 |Example |Result
 | |If the sort parameter is omitted, sorting is performed as though the parameter were set to `score desc`.
@@ -63,7 +68,9 @@ Regarding the sort parameter's arguments:
 * A sort ordering must include a field name (or `score` as a pseudo field), followed by whitespace (escaped as + or `%20` in URL strings), followed by a sort direction (`asc` or `desc`).
 
 * Multiple sort orderings can be separated by a comma, using this syntax: `sort=<field name>+<direction>,<field name>+<direction>],...`
-** When more than one sort criteria is provided, the second entry will only be used if the first entry results in a tie. If there is a third entry, it will only be used if the first AND second entries are tied. And so on.
+** When more than one sort criteria is provided, the second entry will only be used if the first entry results in a tie.
+If there is a third entry, it will only be used if the first AND second entries are tied.
+And so on.
 ** If documents tie in all of the explicit sort criteria, Solr uses each document's Lucene document ID as the final tie-breaker.
 This internal property is subject to change during segment merges and document updates, which can lead to unexpected result ordering changes.
 Users looking to avoid this behavior can add an additional sort criteria on a unique or rarely-shared field such as `id` to prevent ties from occurring (e.g., `price desc,id asc`).
@@ -76,11 +83,13 @@ The default value is `0`. In other words, by default, Solr returns results witho
 
 Setting the `start` parameter to some other number, such as `3`, causes Solr to skip over the preceding records and start at the document identified by the offset.
 
-You can use the `start` parameter this way for paging. For example, if the `rows` parameter is set to 10, you could display three successive pages of results by setting start to 0, then re-issuing the same query and setting start to 10, then issuing the query again and setting start to 20.
+You can use the `start` parameter this way for paging.
+For example, if the `rows` parameter is set to 10, you could display three successive pages of results by setting start to 0, then re-issuing the same query and setting start to 10, then issuing the query again and setting start to 20.
 
 == rows Parameter
 
-You can use the `rows` parameter to paginate results from a query. The parameter specifies the maximum number of documents from the complete result set that Solr should return to the client at one time.
+You can use the `rows` parameter to paginate results from a query.
+The parameter specifies the maximum number of documents from the complete result set that Solr should return to the client at one time.
 
 The default value is `10`. That is, by default, Solr returns 10 documents at a time in response to a query.
 
@@ -91,42 +100,54 @@ This parameter defines if this query is cancellable during execution using the
 
 == queryUUID Parameter
 
-For cancellable queries, this allows specifying a custom UUID to identify the query with. If `canCancel` is specified and `queryUUID` is not set, an auto generated UUID will be assigned to the query.
+For cancellable queries, this allows specifying a custom UUID to identify the query with.
+If `canCancel` is specified and `queryUUID` is not set, an auto generated UUID will be assigned to the query.
 
-If `queryUUID` is specified, this UUID will be used for identifying the query. Note that if using `queryUUID`, the responsibility of ensuring uniqueness of the UUID lies with the caller. If a query UUID
-is reused while the original query UUID is still active, it will cause an exception to be throws for the second query.
+If `queryUUID` is specified, this UUID will be used for identifying the query.
+Note that if using `queryUUID`, the responsibility of ensuring uniqueness of the UUID lies with the caller.
+If a query UUID is reused while the original query UUID is still active, it will cause an exception to be throws for the second query.
 
-It is recommended that the user either uses all custom UUIDs or depends completely on the system to generate UUID. Mixing the two can lead to conflict of UUIDs.
+It is recommended that the user either uses all custom UUIDs or depends completely on the system to generate UUID.
+Mixing the two can lead to conflict of UUIDs.
 
 == fq (Filter Query) Parameter
 
-The `fq` parameter defines a query that can be used to restrict the superset of documents that can be returned, without influencing score. It can be very useful for speeding up complex queries, since the queries specified with `fq` are cached independently of the main query. When a later query uses the same filter, there's a cache hit, and filter results are returned quickly from the cache.
+The `fq` parameter defines a query that can be used to restrict the superset of documents that can be returned, without influencing score.
+It can be very useful for speeding up complex queries, since the queries specified with `fq` are cached independently of the main query.
+When a later query uses the same filter, there's a cache hit, and filter results are returned quickly from the cache.
 
 When using the `fq` parameter, keep in mind the following:
 
-* The `fq` parameter can be specified multiple times in a query. Documents will only be included in the result if they are in the intersection of the document sets resulting from each instance of the parameter. In the example below, only documents which have a popularity greater then 10 and have a section of 0 will match.
+* The `fq` parameter can be specified multiple times in a query.
+Documents will only be included in the result if they are in the intersection of the document sets resulting from each instance of the parameter.
+In the example below, only documents which have a popularity greater then 10 and have a section of 0 will match.
 +
 [source,text]
 ----
 fq=popularity:[10 TO *]&fq=section:0
 ----
 
-* Filter queries can involve complicated Boolean queries. The above example could also be written as a single `fq` with two mandatory clauses like so:
+* Filter queries can involve complicated Boolean queries.
+The above example could also be written as a single `fq` with two mandatory clauses like so:
 +
 [source,text]
 ----
 fq=+popularity:[10 TO *] +section:0
 ----
 
-* The document sets from each filter query are cached independently. Thus, concerning the previous examples: use a single `fq` containing two mandatory clauses if those clauses appear together often, and use two separate `fq` parameters if they are relatively independent. (To learn about tuning cache sizes and making sure a filter cache actually exists, see <<the-well-configured-solr-instance.adoc#,The Well-Configured Solr Instance>>.)
-* It is also possible to use <<the-standard-query-parser.adoc#differences-between-lucenes-classic-query-parser-and-solrs-standard-query-parser,filter(condition) syntax>> inside the `fq` to cache clauses individually and - among other things - to achieve union of cached filter queries.
+* The document sets from each filter query are cached independently.
+Thus, concerning the previous examples: use a single `fq` containing two mandatory clauses if those clauses appear together often, and use two separate `fq` parameters if they are relatively independent.
+(To learn about tuning cache sizes and making sure a filter cache actually exists, see <<caches-warming.adoc#caches,Caches>>.)
+* It is also possible to use <<standard-query-parser.adoc#differences-between-lucenes-classic-query-parser-and-solrs-standard-query-parser,filter(condition) syntax>> inside the `fq` to cache clauses individually and - among other things - to achieve union of cached filter queries.
 
-* As with all parameters: special characters in an URL need to be properly escaped and encoded as hex values. Online tools are available to help you with URL-encoding. For example: http://meyerweb.com/eric/tools/dencoder/.
+* As with all parameters: special characters in an URL need to be properly escaped and encoded as hex values.
+Online tools are available to help you with URL-encoding.
+For example: http://meyerweb.com/eric/tools/dencoder/.
 
 === cache Local Parameter
 
-Solr caches the results of filter queries by default in the <<query-settings-in-solrconfig.adoc#filtercache,filter cache>>.
-To disable it, use the boolean `cache` <<local-parameters-in-queries.adoc#,local parameter>>, such as `fq={!geofilt cache=false}...`.
+Solr caches the results of filter queries by default in the <<caches-warming.adoc#filter-cache,filter cache>>.
+To disable it, use the boolean `cache` <<local-params.adoc#,local param>>, such as `fq={!geofilt cache=false}...`.
 Do this when you think a query is unlikely to be repeated.
 
 Non-cached filter queries also support the `cost` local parameter to provide a _hint_ as to the order in which they are evaluated.
@@ -157,15 +178,17 @@ fq={!frange cache=false cost=200 l=0}pow(mul(sum(1, query('tag:smartphone')), di
 
 == fl (Field List) Parameter
 
-The `fl` parameter limits the information included in a query response to a specified list of fields. The fields must be either `stored="true"` or `docValues="true"``.`
+The `fl` parameter limits the information included in a query response to a specified list of fields.
+The fields must be either `stored="true"` or `docValues="true"``.`
 
-The field list can be specified as a space-separated or comma-separated list of field names. The string "score" can be used to indicate that the score of each document for the particular query should be returned as a field. The wildcard character `*` selects all the fields in the document which are either `stored="true"` or `docValues="true"` and `useDocValuesAsStored="true"` (which is the default when docValues are enabled). You can also add pseudo-fields, functions and transformers to  [...]
+The field list can be specified as a space-separated or comma-separated list of field names.
+The string "score" can be used to indicate that the score of each document for the particular query should be returned as a field.
+The wildcard character `*` selects all the fields in the document which are either `stored="true"` or `docValues="true"` and `useDocValuesAsStored="true"` (which is the default when docValues are enabled).
+You can also add pseudo-fields, functions and transformers to the field list request.
 
 This table shows some basic examples of how to use `fl`:
 
-// TODO: Change column width to %autowidth.spread when https://github.com/asciidoctor/asciidoctor-pdf/issues/599 is fixed
-
-[cols="30,70",options="header"]
+[%autowidth.stretch,options="header"]
 |===
 |Field List |Result
 |id name price |Return only the id, name, and price fields.
@@ -188,7 +211,7 @@ fl=id,title,product(price,popularity)
 
 === Document Transformers with fl
 
-<<transforming-result-documents.adoc#,Document Transformers>> can be used to modify the information returned about each documents in the results of a query:
+<<document-transformers.adoc#,Document Transformers>> can be used to modify the information returned about each documents in the results of a query:
 
 [source,text]
 ----
@@ -231,15 +254,18 @@ The `debug` parameter can be specified multiple times and supports the following
 * `debug=timing`: return debug information about how long the query took to process.
 * `debug=results`: return debug information about the score results (also known as "explain").
 ** By default, score explanations are returned as large string values, using newlines and tab indenting for structure & readability, but an additional `debug.explain.structured=true` parameter may be specified to return this information as nested data structures native to the response format requested by `wt`.
-* `debug=all`: return all available debug information about the request request. (alternatively usage: `debug=true`)
+* `debug=all`: return all available debug information about the request request.
+An alternative usage is `debug=true`.
 
-For backwards compatibility with older versions of Solr, `debugQuery=true` may instead be specified as an alternative way to indicate `debug=all`
+For backwards compatibility with older versions of Solr, `debugQuery=true` may instead be specified as an alternative way to indicate `debug=all`.
 
 The default behavior is not to include debugging information.
 
 == explainOther Parameter
 
-The `explainOther` parameter specifies a Lucene query in order to identify a set of documents. If this parameter is included and is set to a non-blank value, the query will return debugging information, along with the "explain info" of each document that matches the Lucene query, relative to the main query (which is specified by the `q` parameter). For example:
+The `explainOther` parameter specifies a Lucene query in order to identify a set of documents.
+If this parameter is included and is set to a non-blank value, the query will return debugging information, along with the "explain info" of each document that matches the Lucene query, relative to the main query (which is specified by the `q` parameter).
+For example:
 
 [source,text]
 ----
@@ -252,7 +278,11 @@ The default value of this parameter is blank, which causes no extra "explain inf
 
 == timeAllowed Parameter
 
-This parameter specifies the amount of time, in milliseconds, allowed for a search to complete. If this time expires before the search is complete, any partial results will be returned, but values such as `numFound`, <<faceting.adoc#,facet>> counts, and result <<the-stats-component.adoc#,stats>> may not be accurate for the entire result set. In case of expiration, if `omitHeader` isn't set to `true` the response header contains a special flag called `partialResults`. When using `timeAllo [...]
+This parameter specifies the amount of time, in milliseconds, allowed for a search to complete.
+If this time expires before the search is complete, any partial results will be returned, but values such as `numFound`, <<faceting.adoc#,facet>> counts, and result <<stats-component.adoc#,stats>> may not be accurate for the entire result set.
+In case of expiration, if `omitHeader` isn't set to `true` the response header contains a special flag called `partialResults`.
+When using `timeAllowed` in combination with <<pagination-of-results.adoc#using-cursors,`cursorMark`>>, and the `partialResults` flag is present, some matching documents may have been skipped in the result set.
+Additionally, if  the `partialResults` flag is present, `cursorMark` can match `nextCursorMark` even if there may be more results
 
 [source,json]
 ----
@@ -280,17 +310,19 @@ This value is only checked at the time of:
 . Document collection
 . Doc Values reading
 
-As this check is periodically performed, the actual time for which a request can be processed before it is aborted would be marginally greater than or equal to the value of `timeAllowed`. If the request consumes more time in other stages, custom components, etc., this parameter is not expected to abort the request. Regular search, JSON Facet and the Analytics component abandon requests in accordance with this parameter.
+As this check is periodically performed, the actual time for which a request can be processed before it is aborted would be marginally greater than or equal to the value of `timeAllowed`.
+If the request consumes more time in other stages, custom components, etc., this parameter is not expected to abort the request.
+Regular search, JSON Facet and the Analytics component abandon requests in accordance with this parameter.
 
 == segmentTerminateEarly Parameter
 
 This parameter may be set to either `true` or `false`.
 
-If set to `true`, and if <<indexconfig-in-solrconfig.adoc#mergepolicyfactory,the mergePolicyFactory>> for this collection is a {solr-javadocs}/core/org/apache/solr/index/SortingMergePolicyFactory.html[`SortingMergePolicyFactory`] which uses a `sort` option compatible with <<sort Parameter,the sort parameter>> specified for this query, then Solr will be able to skip documents on a per-segment basis that are definitively not candidates for the current page of results.
+If set to `true`, and if <<index-segments-merging.adoc#mergepolicyfactory,the mergePolicyFactory>> for this collection is a {solr-javadocs}/core/org/apache/solr/index/SortingMergePolicyFactory.html[`SortingMergePolicyFactory`] which uses a `sort` option compatible with <<sort Parameter,the sort parameter>> specified for this query, then Solr will be able to skip documents on a per-segment basis that are definitively not candidates for the current page of results.
 
 If early termination is used, a `segmentTerminatedEarly` header will be included in the `responseHeader`.
 
-Similar to using <<timeAllowed Parameter,the `timeAllowed` Parameter>>, when early segment termination happens values such as `numFound`, <<faceting.adoc#,Facet>> counts, and result <<the-stats-component.adoc#,Stats>> may not be accurate for the entire result set.
+Similar to using <<timeAllowed Parameter,the `timeAllowed` Parameter>>, when early segment termination happens values such as `numFound`, <<faceting.adoc#,Facet>> counts, and result <<stats-component.adoc#,Stats>> may not be accurate for the entire result set.
 
 The default value of this parameter is `false`.
 
@@ -298,17 +330,23 @@ The default value of this parameter is `false`.
 
 This parameter may be set to either `true` or `false`.
 
-If set to `true`, this parameter excludes the header from the returned results. The header contains information about the request, such as the time it took to complete. The default value for this parameter is `false`. When using parameters such as <<common-query-parameters.adoc#timeallowed-parameter,`timeAllowed`>>, and <<solrcloud-query-routing-and-read-tolerance.adoc#shards-tolerant-parameter,`shards.tolerant`>>, which can lead to partial results, it is advisable to keep the header, so [...]
+If set to `true`, this parameter excludes the header from the returned results.
+The header contains information about the request, such as the time it took to complete.
+The default value for this parameter is `false`.
+When using parameters such as <<common-query-parameters.adoc#timeallowed-parameter,`timeAllowed`>>, and <<solrcloud-distributed-requests.adoc#shards-tolerant-parameter,`shards.tolerant`>>, which can lead to partial results, it is advisable to keep the header, so that the `partialResults` flag can be checked, and values such as `numFound`, `nextCursorMark`, <<faceting.adoc#,Facet>> counts, and result <<stats-component.adoc#,Stats>> can be interpreted in the context of partial results.
 
 == wt Parameter
 
-The `wt` parameter selects the Response Writer that Solr should use to format the query's response. For detailed descriptions of Response Writers, see <<response-writers.adoc#,Response Writers>>.
+The `wt` parameter selects the Response Writer that Solr should use to format the query's response.
+For detailed descriptions of Response Writers, see <<response-writers.adoc#,Response Writers>>.
 
 If you do not define the `wt` parameter in your queries, JSON will be returned as the format of the response.
 
 == logParamsList Parameter
 
-By default, Solr logs all parameters of requests. Set this parameter to restrict which parameters of a request are logged. This may help control logging to only those parameters considered important to your organization.
+By default, Solr logs all parameters of requests.
+Set this parameter to restrict which parameters of a request are logged.
+This may help control logging to only those parameters considered important to your organization.
 
 For example, you could define this like:
 
@@ -327,8 +365,11 @@ The `echoParams` parameter controls what information about request parameters is
 The `echoParams` parameter accepts the following values:
 
 * `explicit`: Only parameters included in the actual request will be added to the `params` section of the response header.
-* `all`: Include all request parameters that contributed to the query. This will include everything defined in the request handler definition found in `solrconfig.xml` as well as parameters included with the request, plus the `_` parameter. If a parameter is included in the request handler definition AND the request, it will appear multiple times in the response header.
-* `none`: Entirely removes the `params` section of the response header. No information about the request parameters will be available in the response.
+* `all`: Include all request parameters that contributed to the query.
+This will include everything defined in the request handler definition found in `solrconfig.xml` as well as parameters included with the request, plus the `_` parameter.
+If a parameter is included in the request handler definition AND the request, it will appear multiple times in the response header.
+* `none`: Entirely removes the `params` section of the response header.
+No information about the request parameters will be available in the response.
 
 The default value is `none`, though many `solrconfig.xml` handlers set default to be `explicit`.
 Here is an example of a JSON response where the echoParams parameter was set in that SearchHandler's default,
@@ -382,14 +423,21 @@ This is what happens if a similar request is sent that adds `echoParams=all` to
 ----
 
 == minExactCount Parameter
-When this parameter is used, Solr will count the number of hits accurately at least until this value. After that, Solr can skip over documents that don't have a score high enough to enter in the top N. This can greatly improve performance of search queries. On the other hand, when this parameter is used, the `numFound` may not be exact, and may instead be an approximation.
-The `numFoundExact` boolean attribute is included in all responses, indicating if the `numFound` value is exact or an approximation. If it's an approximation, the real number of hits for the query is guaranteed to be greater or equal `numFound`.
+When this parameter is used, Solr will count the number of hits accurately at least until this value.
+After that, Solr can skip over documents that don't have a score high enough to enter in the top N.
+This can greatly improve performance of search queries.
+On the other hand, when this parameter is used, the `numFound` may not be exact, and may instead be an approximation.
+The `numFoundExact` boolean attribute is included in all responses, indicating if the `numFound` value is exact or an approximation.
+If it's an approximation, the real number of hits for the query is guaranteed to be greater or equal `numFound`.
 
 More about approximate document counting and `minExactCount`:
 
-* The documents returned in the response are guaranteed to be the docs with the top scores. This parameter will not make Solr skip documents that are to be returned in the response, it will only allow Solr to skip counting docs that, while they match the query, their score is low enough to not be in the top N.
-* Providing `minExactCount` doesn't guarantee that Solr will use approximate hit counting (and thus, provide the speedup). Some types of queries, or other parameters (like if facets are requested) will require accurate counting.
-* Approximate counting can only be used when sorting by `score desc` first (which is the default sort in Solr). Other fields can be used after `score desc`, but if any other type of sorting is used before score, then the approximation won't be applied.
+* The documents returned in the response are guaranteed to be the docs with the top scores.
+This parameter will not make Solr skip documents that are to be returned in the response, it will only allow Solr to skip counting docs that, while they match the query, their score is low enough to not be in the top N.
+* Providing `minExactCount` doesn't guarantee that Solr will use approximate hit counting (and thus, provide the speedup).
+Some types of queries, or other parameters (like if facets are requested) will require accurate counting.
+* Approximate counting can only be used when sorting by `score desc` first (which is the default sort in Solr).
+Other fields can be used after `score desc`, but if any other type of sorting is used before score, then the approximation won't be applied.
 * When doing distributed queries across multiple shards, each shard will accurately count hits until `minExactCount` (which means the query could be hitting `numShards * minExactCount` docs and `numFound` in the response would still be accurate)
 For example:
 
@@ -419,4 +467,5 @@ q=quick brown fox&minExactCount=200&rows=10
     "docs": [{"doc1"}]
 }
 ----
-In this case we know that `163` is the exact number of hits for the query. Both queries must have returned the same number of documents in the top 10.
+In this case we know that `163` is the exact number of hits for the query.
+Both queries must have returned the same number of documents in the top 10.
diff --git a/solr/solr-ref-guide/src/config-api.adoc b/solr/solr-ref-guide/src/config-api.adoc
index 4c70a33..258f308 100644
--- a/solr/solr-ref-guide/src/config-api.adoc
+++ b/solr/solr-ref-guide/src/config-api.adoc
@@ -18,17 +18,22 @@
 
 The Config API enables manipulating various aspects of your `solrconfig.xml` using REST-like API calls.
 
-This feature is enabled by default and works similarly in both SolrCloud and standalone mode. Many commonly edited properties (such as cache sizes and commit settings) and request handler definitions can be changed with this API.
+This feature is enabled by default and works similarly in both SolrCloud, user-managed clusters, and single-node installations.
+Many commonly edited properties (such as cache sizes and commit settings) and request handler definitions can be changed with this API.
 
-When using this API, `solrconfig.xml` is not changed. Instead, all edited configuration is stored in a file called `configoverlay.json`. The values in `configoverlay.json` override the values in `solrconfig.xml`.
+When using this API, `solrconfig.xml` is not changed.
+Instead, all edited configuration is stored in a file called `configoverlay.json`.
+The values in `configoverlay.json` override the values in `solrconfig.xml`.
 
 == Config API Endpoints
 
 All Config API endpoints are collection-specific, meaning this API can inspect or modify the configuration for a single collection at a time.
 
-* `_collection_/config`: retrieve the full effective config, or modify the config. Use GET to retrieve and POST for executing commands.
+* `_collection_/config`: retrieve the full effective config, or modify the config.
+Use GET to retrieve and POST for executing commands.
 * `_collection_/config/overlay`: retrieve the details in the `configoverlay.json` only, removing any options defined in `solrconfig.xml` directly or implicitly through defaults.
-* `_collection_/config/params`: create parameter sets that can override or take the place of parameters defined in `solrconfig.xml`. See <<request-parameters-api.adoc#,Request Parameters API>> for more information about this endpoint.
+* `_collection_/config/params`: create parameter sets that can override or take the place of parameters defined in `solrconfig.xml`.
+See <<request-parameters-api.adoc#,Request Parameters API>> for more information about this endpoint.
 
 == Retrieving the Config
 
@@ -60,7 +65,9 @@ http://localhost:8983/api/collections/techproducts/config
 The response will be the Solr configuration resulting from merging settings in `configoverlay.json` with those in `solrconfig.xml`.
 
 
-It's possible to restrict the returned configuration to a top-level section, such as, `query`, `requestHandler` or `updateHandler`. To do this, append the name of the section to the `config` endpoint. For example, to retrieve configuration for all request handlers:
+It's possible to restrict the returned configuration to a top-level section, such as, `query`, `requestHandler` or `updateHandler`.
+To do this, append the name of the section to the `config` endpoint.
+For example, to retrieve configuration for all request handlers:
 
 [.dynamic-tabs]
 --
@@ -85,7 +92,9 @@ http://localhost:8983/api/collections/techproducts/config/requestHandler
 ====
 --
 
-The output will be details of each request handler defined in `solrconfig.xml`, all  <<implicit-requesthandlers.adoc#,defined implicitly>> by Solr, and all defined with this Config API stored in `configoverlay.json`. To see the configuration for implicit request handlers, add `expandParams=true` to the request. See the documentation for the implicit request handlers for examples using this command.
+The output will be details of each request handler defined in `solrconfig.xml`, <<implicit-requesthandlers.adoc#,defined implicitly>> by Solr, and defined with this Config API stored in `configoverlay.json`.
+To see the configuration for implicit request handlers, add `expandParams=true` to the request.
+See the documentation for implicit request handlers linked above for examples using this command.
 
 The available top-level sections that can be added as path parameters are: `query`, `requestHandler`, `searchComponent`, `updateHandler`, `queryResponseWriter`, `initParams`, `znodeVersion`, `listener`, `directoryFactory`, `indexConfig`, and `codecFactory`.
 
@@ -135,9 +144,11 @@ The ability to restrict to objects within a top-level section is limited to requ
 
 == Commands to Modify the Config
 
-This API uses specific commands with POST requests to tell Solr what property or type of property to add to or modify in `configoverlay.json`. The commands are passed with the data to add or modify the property or component.
+This API uses specific commands with POST requests to tell Solr what property or type of property to add to or modify in `configoverlay.json`.
+The commands are passed with the data to add or modify the property or component.
 
-The Config API commands for modifications are categorized into 3 types, each of which manipulate specific data structures in `solrconfig.xml`. These types are:
+The Config API commands for modifications are categorized into 3 types, each of which manipulate specific data structures in `solrconfig.xml`.
+These types are:
 
 * `set-property` and `unset-property` for <<Commands for Common Properties,Common Properties>>
 * Component-specific `add-`, `update-`, and `delete-` commands for <<Commands for Handlers and Components,Custom Handlers and Local Components>>
@@ -145,16 +156,20 @@ The Config API commands for modifications are categorized into 3 types, each of
 
 === Commands for Common Properties
 
-The common properties are those that are frequently customized in a Solr instance. They are manipulated with two commands:
+The common properties are those that are frequently customized in a Solr instance.
+They are manipulated with two commands:
 
-* `set-property`: Set a well known property. The names of the properties are predefined and fixed. If the property has already been set, this command will overwrite the previous setting.
+* `set-property`: Set a known property.
+The names of the properties are predefined and fixed.
+If the property has already been set, this command will overwrite the previous setting.
 * `unset-property`: Remove a property set using the `set-property` command.
 
-The properties that can be configured with `set-property` and `unset-property` are predefined and listed below. The names of these properties are derived from their XML paths as found in `solrconfig.xml`.
+The properties that can be configured with `set-property` and `unset-property` are predefined and listed below.
+The names of these properties are derived from their XML paths as found in `solrconfig.xml`.
 
 *Update Handler Settings*
 
-See <<updatehandlers-in-solrconfig.adoc#,UpdateHandlers in SolrConfig>> for defaults and acceptable values for these settings.
+See <<commits-transaction-logs.adoc#,Commits and Transaction Logs>> for defaults and acceptable values for these settings.
 
 * `updateHandler.autoCommit.maxDocs`
 * `updateHandler.autoCommit.maxTime`
@@ -166,7 +181,7 @@ See <<updatehandlers-in-solrconfig.adoc#,UpdateHandlers in SolrConfig>> for defa
 
 *Query Settings*
 
-See <<query-settings-in-solrconfig.adoc#,Query Settings in SolrConfig>> for defaults and acceptable values for these settings.
+See <<caches-warming.adoc#,Caches and Query Warming>> for defaults and acceptable values for these settings.
 
 _Caches and Cache Sizes_
 
@@ -210,7 +225,7 @@ See <<circuit-breakers.adoc#,Circuit Breakers in Solr>> for more details
 
 *RequestDispatcher Settings*
 
-See <<requestdispatcher-in-solrconfig.adoc#,RequestDispatcher in SolrConfig>> for defaults and acceptable values for these settings.
+See <<requestdispatcher.adoc#,RequestDispatcher>> for defaults and acceptable values for these settings.
 
 * `requestDispatcher.handleSelect`
 * `requestDispatcher.requestParsers.enableRemoteStreaming`
@@ -321,7 +336,8 @@ curl -X POST -H 'Content-type: application/json' -d '{"unset-property": "updateH
 
 Request handlers, search components, and other types of localized Solr components (such as query parsers, update processors, etc.) can be added, updated and deleted with specific commands for the type of component being modified.
 
-The syntax is similar in each case: `add-<component-name>`, `update-_<component-name>_`, and `delete-<component-name>`. The command name is not case sensitive, so `Add-RequestHandler`, `ADD-REQUESTHANDLER` and `add-requesthandler` are equivalent.
+The syntax is similar in each case: `add-<_component-name_>`, `update-<_component-name_>`, and `delete-<_component-name_>`.
+The command name is not case sensitive, so `Add-RequestHandler`, `ADD-REQUESTHANDLER` and `add-requesthandler` are equivalent.
 
 In each case, `add-` commands add a new configuration to `configoverlay.json`, which will override any other settings for the component in `solrconfig.xml`.
 
@@ -433,7 +449,7 @@ Make a call to the new request handler to check if it is registered:
 
 [source,bash]
 ----
-curl http://localhost:8983/solr/techproducts/mypath?omitHeader=true
+$ curl http://localhost:8983/solr/techproducts/mypath?omitHeader=true
 ----
 
 And you should see the following as output:
@@ -561,14 +577,18 @@ curl -X POST -H 'Content-type:application/json' -d '{
 
 === Commands for User-Defined Properties
 
-Solr lets users templatize the `solrconfig.xml` using the place holder format `${variable_name:default_val}`. You could set the values using system properties, for example, `-Dvariable_name= my_customvalue`. The same can be achieved during runtime using these commands:
+Solr lets users templatize the `solrconfig.xml` using the place holder format `${variable_name:default_val}`.
+You could set the values using system properties, for example, `-Dvariable_name= my_customvalue`.
+The same can be achieved during runtime using these commands:
 
-* `set-user-property`: Set a user-defined property. If the property has already been set, this command will overwrite the previous setting.
+* `set-user-property`: Set a user-defined property.
+If the property has already been set, this command will overwrite the previous setting.
 * `unset-user-property`: Remove a user-defined property.
 
-The structure of the request is similar to the structure of requests using other commands, in the format of `"command":{"variable_name": "property_value"}`. You can add more than one variable at a time if necessary.
+The structure of the request is similar to the structure of requests using other commands, in the format of `"command":{"variable_name": "property_value"}`.
+You can add more than one variable at a time if necessary.
 
-For more information about user-defined properties, see the section <<configuring-solrconfig-xml.adoc#user-defined-properties-in-core-properties,User defined properties in core.properties>>.
+For more information about user-defined properties, see the section <<property-substitution.adoc#user-defined-properties-in-core-properties,User defined properties in core.properties>>.
 
 See also the section <<Creating and Updating User-Defined Properties>> below for examples of how to use this type of command.
 
@@ -660,9 +680,10 @@ curl -X POST -H 'Content-type:application/json' -d '{"unset-user-property": "var
 ====
 --
 
-=== What about updateRequestProcessorChain?
+=== updateRequestProcessorChain Elements
 
-The Config API does not let you create or edit `updateRequestProcessorChain` elements. However, it is possible to create `updateProcessor` entries and use them by name to create a chain.
+The Config API does not let you create or edit `updateRequestProcessorChain` elements.
+However, it is possible to create `updateProcessor` entries and use them by name to create a chain.
 
 For example:
 
@@ -701,7 +722,8 @@ You can use this directly in your request by adding a parameter in the `updateRe
 
 == How to Map solrconfig.xml Properties to JSON
 
-By using this API, you will be generating JSON representations of properties defined in `solrconfig.xml`. To understand how properties should be represented with the API, let's take a look at a few examples.
+By using this API, you will be generating JSON representations of properties defined in `solrconfig.xml`.
+To understand how properties should be represented with the API, let's take a look at a few examples.
 
 Here is what a request handler looks like in `solrconfig.xml`:
 
@@ -850,14 +872,22 @@ Define the same properties with the Config API:
 
 === Name Components for the Config API
 
-The Config API always allows changing the configuration of any component by name. However, some configurations such as `listener` or `initParams` do not require a name in `solrconfig.xml`. In order to be able to `update` and `delete` of the same item in `configoverlay.json`, the name attribute becomes mandatory.
+The Config API always allows changing the configuration of any component by name.
+However, some configurations such as `listener` or `initParams` do not require a name in `solrconfig.xml`.
+In order to be able to `update` and `delete` of the same item in `configoverlay.json`, the name attribute becomes mandatory.
 
 
 == How the Config API Works
 
-Every core watches the ZooKeeper directory for the configset being used with that core. In standalone mode, however, there is no watch (because ZooKeeper is not running). If there are multiple cores in the same node using the same configset, only one ZooKeeper watch is used.
+When using SolrCloud, every core watches the ZooKeeper directory for the configset being used with that core.
+If there are multiple cores in the same node using the same configset, only one ZooKeeper watch is used.
 
-For instance, if the configset 'myconf' is used by a core, the node would watch `/configs/myconf`. Every write operation performed through the API would 'touch' the directory and all watchers are notified. Every core would check if the schema file, `solrconfig.xml`, or `configoverlay.json` has been modified by comparing the `znode` versions. If any have been modified, the core is reloaded.
+TIP:: In a user-managed cluster or single-node installation, there is no watch (because ZooKeeper is not running).
+
+For instance, if the configset 'myconf' is used by a core, the node would watch `/configs/myconf`.
+Every write operation performed through the API would 'touch' the directory and all watchers are notified.
+Every core would check if the schema file, `solrconfig.xml`, or `configoverlay.json` has been modified by comparing the `znode` versions.
+If any have been modified, the core is reloaded.
 
 If `params.json` is modified, the params object is just updated without a core reload (see <<request-parameters-api.adoc#,Request Parameters API>> for more information about `params.json`).
 
@@ -898,4 +928,6 @@ Any component can register a listener using:
 
 `SolrCore#addConfListener(Runnable listener)`
 
-to get notified for configuration changes. This is not very useful if the files modified result in core reloads (i.e., `configoverlay.xml` or the schema). Components can use this to reload the files they are interested in.
+to get notified of configuration changes.
+This is not very useful if the files modified result in core reloads (i.e., `configoverlay.xml` or the schema).
+Components can use this to reload the files they are interested in.
diff --git a/solr/solr-ref-guide/src/config-sets.adoc b/solr/solr-ref-guide/src/config-sets.adoc
index b3c7f15..1c56627 100644
--- a/solr/solr-ref-guide/src/config-sets.adoc
+++ b/solr/solr-ref-guide/src/config-sets.adoc
@@ -18,13 +18,14 @@
 
 Configsets are a set of configuration files used in a Solr installation: `solrconfig.xml`, the schema, and then <<resource-loading.adoc#,resources>> like language files, `synonyms.txt`, and others.
 
-Such configuration, _configsets_, can be named and then referenced by collections or cores, possibly with the intent to share them to avoid duplication.
+Such configuration, _configsets_, can be named and then referenced by collections or cores, allowing you to share them to avoid duplication.
 
-Solr ships with two example configsets located in `server/solr/configsets`, which can be used as a base for your own. These example configsets are named `_default` and `sample_techproducts_configs`.
+Solr ships with two example configsets located in `server/solr/configsets`, which can be used as a base for your own.
+These example configsets are named `_default` and `sample_techproducts_configs`.
 
-== Configsets in Standalone Mode
+== Configsets in User-Managed Clusters or Single-Node Installations
 
-If you are using Solr in standalone mode, configsets are managed on the filesystem.
+If you are using Solr in a user-managed cluster or a single-node installation, configsets are managed on the filesystem.
 
 Each Solr core can have it's very own configset located beneath it in a `<instance_dir>/conf/` dir.
 Here, it is not named or shared and the word _configset_ isn't found.
@@ -48,9 +49,11 @@ The structure should look something like this:
             /solrconfig.xml
 ----
 
-The default base directory is `$SOLR_HOME/configsets`. This path can be configured in `solr.xml` (see <<format-of-solr-xml.adoc#,Format of solr.xml>> for details).
+The default base directory is `$SOLR_HOME/configsets`.
+This path can be configured with the `configSetBaseDir` parameter in `solr.xml` (see <<configuring-solr-xml.adoc#,Format of solr.xml>> for details).
 
-To create a new core using a configset, pass `configSet` as one of the core properties. For example, if you do this via the CoreAdmin API:
+To create a new core using a configset, pass `configSet` as one of the core properties.
+For example, if you do this via the CoreAdmin API:
 
 [.dynamic-tabs]
 --
@@ -81,18 +84,19 @@ curl -v -X POST -H 'Content-type: application/json' -d '{
 ====
 --
 
-== Configsets in SolrCloud Mode
+== Configsets in SolrCloud Clusters
 
-In SolrCloud, it's critical to understand that configsets are fundamentally stored in ZooKeeper _and not_ the file system.
+In SolrCloud, it's critical to understand that configsets are stored in ZooKeeper _and not_ the file system.
 Solr's `_default` configset is uploaded to ZooKeeper on initialization.
-This and some demonstration ones remain on the file system but Solr does not use them whatsoever in this mode.
+This and a couple of example configsets remain on the file system but Solr does not use them unless they are used with a new collection.
 
-When you create a collection in SolrCloud, you can specify a named configset -- possibly shared.
-If you don't, then the `_default` will be copied and given a unique name for use by this collection.
+When you create a collection in SolrCloud, you can specify a named configset.
+If you don't, then the `_default` will be copied and given a unique name for use by the new collection.
 
 A configset can be uploaded to ZooKeeper either via the <<configsets-api.adoc#,Configsets API>> or more directly via <<solr-control-script-reference.adoc#upload-a-configuration-set,`bin/solr zk upconfig`>>.
 The Configsets API has some other operations as well, and likewise, so does the CLI.
 
 To upload a file to a configset already stored on ZooKeeper, you can use <<solr-control-script-reference.adoc#copy-between-local-files-and-zookeeper-znodes,`bin/solr zk cp`>>.
 
-CAUTION: By default, ZooKeeper's file size limit is 1MB. If your files are larger than this, you'll need to either <<setting-up-an-external-zookeeper-ensemble.adoc#increasing-the-file-size-limit,increase the ZooKeeper file size limit>> or store them instead <<libs.adoc#lib-directives-in-solrconfig,on the filesystem>>.
+CAUTION: By default, ZooKeeper's file size limit is 1MB.
+If your files are larger than this, you'll need to either <<zookeeper-ensemble.adoc#increasing-the-file-size-limit,increase the ZooKeeper file size limit>> or store them <<libs.adoc#lib-directives-in-solrconfig,on the filesystem>>.
diff --git a/solr/solr-ref-guide/src/configsets-api.adoc b/solr/solr-ref-guide/src/configsets-api.adoc
index 141d6d6..c0d9a9c 100644
--- a/solr/solr-ref-guide/src/configsets-api.adoc
+++ b/solr/solr-ref-guide/src/configsets-api.adoc
@@ -19,17 +19,22 @@
 
 The Configsets API enables you to upload new configsets to ZooKeeper, create, and delete configsets when Solr is running SolrCloud mode.
 
-Configsets are a collection of configuration files such as `solrconfig.xml`, `synonyms.txt`, the schema, language-specific files, and other collection-level configuration files (everything that normally lives in the `conf` directory). Solr ships with two example configsets (`_default` and `sample_techproducts_configs`) which can be used when creating collections. Using the same concept, you can create your own configsets and make them available when creating collections.
+Configsets are a collection of configuration files such as `solrconfig.xml`, `synonyms.txt`, the schema, language-specific files, and other collection-level configuration files (everything that normally lives in the `conf` directory). Solr ships with two example configsets (`_default` and `sample_techproducts_configs`) which can be used when creating collections.
+Using the same concept, you can create your own configsets and make them available when creating collections.
 
 This API provides a way to upload configuration files to ZooKeeper and share the same set of configuration files between two or more collections.
 
 Once a configset has been uploaded to ZooKeeper, use the configset name when creating the collection with the <<collections-api.adoc#,Collections API>> and the collection will use your configuration files.
 
-Configsets do not have to be shared between collections if they are uploaded with this API, but this API makes it easier to do so if you wish. An alternative to uploading your configsets in advance would be to put the configuration files into a directory under `server/solr/configsets` and using the directory name as the `-d` parameter when using `bin/solr create` to create a collection.
+Configsets do not have to be shared between collections if they are uploaded with this API, but this API makes it easier to do so if you wish.
+An alternative to uploading your configsets in advance would be to put the configuration files into a directory under `server/solr/configsets` and using the directory name as the `-d` parameter when using `bin/solr create` to create a collection.
 
-NOTE: This API can only be used with Solr running in SolrCloud mode. If you are not running Solr in SolrCloud mode but would still like to use shared configurations, please see the section <<config-sets.adoc#,Configsets>>.
+NOTE: This API can only be used with Solr running in SolrCloud mode.
+If you are not running Solr in SolrCloud mode but would still like to use shared configurations, please see the section <<config-sets.adoc#,Configsets>>.
 
-The API works by passing commands to the `configs` endpoint. The path to the endpoint varies depending on the API being used: the v1 API uses `solr/admin/configs`, while the v2 API uses `api/cluster/configs`. Examples of both types are provided below.
+The API works by passing commands to the `configs` endpoint.
+The path to the endpoint varies depending on the API being used: the v1 API uses `solr/admin/configs`, while the v2 API uses `api/cluster/configs`.
+Examples of both types are provided below.
 
 [[configsets-list]]
 == List Configsets
@@ -83,20 +88,25 @@ The output will look like:
 Upload a configset, which is sent as a zipped file.
 A single, non-zipped file can also be uploaded with the `filePath` parameter.
 
-This functionality is enabled by default, but can be disabled via a runtime parameter `-Dconfigset.upload.enabled=false`. Disabling this feature is advisable if you want to expose Solr installation to untrusted users (even though you should never do that!).
+This functionality is enabled by default, but can be disabled via a runtime parameter `-Dconfigset.upload.enabled=false`.
+Disabling this feature is advisable if you want to expose Solr installation to untrusted users (even though you should never do that!).
 
-A configset is uploaded in a "trusted" mode if authentication is enabled and the upload operation is performed as an authenticated request. Without authentication, a configset is uploaded in an "untrusted" mode. Upon creation of a collection using an "untrusted" configset, the following functionality will not work:
+A configset is uploaded in a "trusted" mode if authentication is enabled and the upload operation is performed as an authenticated request.
+Without authentication, a configset is uploaded in an "untrusted" mode.
+Upon creation of a collection using an "untrusted" configset, the following functionality will not work:
 
 * The XSLT transformer (`tr` parameter) cannot be used at request processing time.
 * If specified in the configset, the ScriptUpdateProcessorFactory will not initialize.
-* Collections won't initialize if <lib> directives are used in the configset. (Note: Libraries added to Solr's classpath don't need the <lib> directive)
+* Collections won't initialize if <lib> directives are used in the configset.
+(Note: Libraries added to Solr's classpath don't need the <lib> directive)
 
 If you use any of these parameters or features, you must have enabled security features in your Solr installation and you must upload the configset as an authenticated user.
 
 The `upload` command takes the following parameters:
 
 `name`::
-The configset to be created when the upload is complete. This parameter is required.
+The configset to be created when the upload is complete.
+This parameter is required.
 
 `overwrite`::
 If set to `true`, Solr will overwrite an existing configset with the same name (if false, the request will fail).
@@ -104,7 +114,8 @@ If `filePath` is provided, then this option specifies whether the specified file
 Default is `false` when using the v1 API, but `true` when using the v2 API.
 
 `cleanup`::
-When overwriting an existing configset (`overwrite=true`), this parameter tells Solr to delete the files in ZooKeeper that existed in the old configset but not in the one being uploaded. Default is `false`.
+When overwriting an existing configset (`overwrite=true`), this parameter tells Solr to delete the files in ZooKeeper that existed in the old configset but not in the one being uploaded.
+Default is `false`.
 This parameter cannot be set to true when `filePath` is used.
 
 `filePath`::
@@ -112,7 +123,8 @@ This parameter allows the uploading of a single, non-zipped file to the given pa
 This functionality respects the `overwrite` parameter, so a request will fail if the given file path already exists in the configset and overwrite is set to `false`.
 The `cleanup` parameter cannot be set to true when `filePath` is used.
 
-If uploading an entire configset, the body of the request should be a zip file that contains the configset. The zip file must be created from within the `conf` directory (i.e., `solrconfig.xml` must be the top level entry in the zip file).
+If uploading an entire configset, the body of the request should be a zip file that contains the configset.
+The zip file must be created from within the `conf` directory (i.e., `solrconfig.xml` must be the top level entry in the zip file).
 
 Here is an example on how to create the zip file named "myconfig.zip" and upload it as a configset named "myConfigSet":
 
@@ -207,10 +219,12 @@ If you have not yet uploaded any configsets, see the <<Upload a Configset>> comm
 The following parameters are supported when creating a configset.
 
 `name`::
-The configset to be created. This parameter is required.
+The configset to be created.
+This parameter is required.
 
 `baseConfigSet`::
-The name of the configset to copy as a base. This defaults to `_default`
+The name of the configset to copy as a base.
+This defaults to `_default`
 
 `configSetProp._property_=_value_`::
 A configset property from the base configset to override in the copied configset.
@@ -278,10 +292,12 @@ curl -X POST -H 'Content-type: application/json' -d '{
 [[configsets-delete]]
 == Delete a Configset
 
-The `delete` command removes a configset. It does not remove any collections that were created with the configset.
+The `delete` command removes a configset.
+It does not remove any collections that were created with the configset.
 
 `name`::
-The configset to be deleted. This parameter is required.
+The configset to be deleted.
+This parameter is required.
 
 To delete a configset named "myConfigSet":
 
@@ -291,7 +307,8 @@ To delete a configset named "myConfigSet":
 ====
 [.tab-label]*V1 API*
 
-With the v1 API, the `delete` command must be capitalized as `DELETE`. The name of the configset to delete is provided with the `name` parameter:
+With the v1 API, the `delete` command must be capitalized as `DELETE`.
+The name of the configset to delete is provided with the `name` parameter:
 
 [source,bash]
 ----
@@ -303,7 +320,8 @@ http://localhost:8983/solr/admin/configs?action=DELETE&name=myConfigSet&omitHead
 ====
 [.tab-label]*V2 API*
 
-With the v2 API, the `delete` command is provided as the request method, as in `-X DELETE`. The name of the configset to delete is provided as a path parameter:
+With the v2 API, the `delete` command is provided as the request method, as in `-X DELETE`.
+The name of the configset to delete is provided as a path parameter:
 
 [source,bash]
 ----
diff --git a/solr/solr-ref-guide/src/configuration-apis.adoc b/solr/solr-ref-guide/src/configuration-apis.adoc
index d3a0eeb..ef11bcf 100644
--- a/solr/solr-ref-guide/src/configuration-apis.adoc
+++ b/solr/solr-ref-guide/src/configuration-apis.adoc
@@ -1,5 +1,11 @@
 = Configuration APIs
-:page-children:  config-api, request-parameters-api, managed-resources
+:page-children:  config-api, \
+    request-parameters-api, \
+    managed-resources, \
+    collections-api, \
+    configsets-api, \
+    coreadmin-api, \
+    v2-api
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -19,6 +25,19 @@
 
 Solr includes several APIs that can be used to modify settings in `solrconfig.xml`.
 
-* <<config-api.adoc#,Config API>>
-* <<request-parameters-api.adoc#,Request Parameters API>>
-* <<managed-resources.adoc#,Managed Resources>>
+****
+// This tags the below list so it can be used in the parent page section list
+// tag::configapi-sections[]
+[cols="1,1",frame=none,grid=none,stripes=none]
+|===
+| <<config-api.adoc#,Config API>>: Configure `solrconfig.xml`.
+| <<request-parameters-api.adoc#,Request Parameters API>>: Override parameters in `solrconfig.xml`.
+| <<managed-resources.adoc#,Managed Resources>>: Programmatic control over resource files.
+| <<collections-api.adoc#,Collections API>>: Manage SolrCloud from cores to nodes.
+| <<configsets-api.adoc#,Configsets API>>: Manage configsets.
+| <<coreadmin-api.adoc#,CoreAdmin API>>: Manage Cores.
+| <<v2-api.adoc#,V2 API>>: The v2 API structure.
+|
+|===
+// end::configapi-sections[]
+****
diff --git a/solr/solr-ref-guide/src/configuration-files.adoc b/solr/solr-ref-guide/src/configuration-files.adoc
new file mode 100644
index 0000000..f0781c7
--- /dev/null
+++ b/solr/solr-ref-guide/src/configuration-files.adoc
@@ -0,0 +1,107 @@
+= Solr Configuration Files
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+Solr has several configuration files that you will interact with during your implementation.
+
+Many of these files are in XML format, although APIs that interact with configuration settings tend to accept JSON for programmatic access as needed.
+
+== Solr Home
+When Solr runs, it needs access to a home directory.
+
+When you first install Solr, your home directory is `server/solr`.
+However, some examples may change this location (such as, if you run `bin/solr start -e cloud`, your home directory will be `example/cloud`).
+
+The home directory contains important configuration information and is the place where Solr will store its index.
+The layout of the home directory will look a little different when you are running Solr in a user-managed cluster or single-node installation vs. when you are running a SolrCloud cluster.
+
+The crucial parts of the Solr home directory are shown in these examples:
+
+.User-Managed Cluster or Single-Node
+[source,plain]
+----
+<solr-home-directory>/
+   solr.xml
+   core_name1/
+      core.properties
+      conf/
+         solrconfig.xml
+         managed-schema
+      data/
+   core_name2/
+      core.properties
+      conf/
+         solrconfig.xml
+         managed-schema
+      data/
+----
+
+.SolrCloud
+[source,plain]
+----
+<solr-home-directory>/
+   solr.xml
+   core_name1/
+      core.properties
+      data/
+   core_name2/
+      core.properties
+      data/
+----
+
+You may see other files, but the main ones you need to know are discussed below.
+
+== Solr's Configuration Files
+Inside Solr's Home, you'll find these files:
+
+* `solr.xml` specifies configuration options for your Solr server instance.
+For more information on `solr.xml` see <<configuring-solr-xml.adoc#,Configuring solr.xml>>.
+* Per Solr Core:
+** `core.properties` defines specific properties for each core such as its name, the collection the core belongs to, the location of the schema, and other parameters.
+For more details on `core.properties`, see the section <<core-discovery.adoc#,Core Discovery>>.
+** `solrconfig.xml` controls high-level behavior.
+You can, for example, specify an alternate location for the data directory.
+For more information on `solrconfig.xml`, see <<configuring-solrconfig-xml.adoc#,Configuring solrconfig.xml>>.
+** `managed-schema` (or `schema.xml`) describes the documents you will ask Solr to index.
+The schema defines a document as a collection of fields.
+You can define both the field types and the fields themselves.
+Field type definitions are powerful and include information about how Solr processes incoming field values and query values.
+For more information on Solr schemas, see <<solr-schema.adoc#,Solr Schema>>.
+** `data/` contains index files.
+
+Note that the SolrCloud example does not include a `conf` directory for each Solr Core (so there is no `solrconfig.xml` or schema file).
+This is because the configuration files usually found in the `conf` directory are stored in ZooKeeper so they can be propagated across the cluster.
+
+If you are using SolrCloud with the embedded ZooKeeper instance, you may also see `zoo.cfg` and `zoo.data` which are ZooKeeper configuration and data files.
+However, if you are running your own ZooKeeper ensemble, you would supply your own ZooKeeper configuration file when you start it and the copies in Solr would be unused.
+
+== Files Screen
+
+The Files screen in the Admin UI lets you browse & view configuration files (such `solrconfig.xml` and the schema file) for the collection you selected.
+
+.The Files Screen
+image::images/configuration-files/files-screen.png[Files screen,height=400]
+
+If you are using <<cluster-types.adoc#solrcloud-mode,SolrCloud>>, the files displayed are the configuration files for this collection stored in ZooKeeper.
+In user-managed clusters or single-node installations, all files in the `conf` directory are displayed.
+
+The configuration files shown may or may not be used by the collection as use of the file depends on how they are referenced in either `solrconfig.xml` or your schema.
+
+Configuration files cannot be edited with this screen, so a text editor of some kind must be used.
+
+This screen is related to the <<schema-browser-screen.adoc#,Schema Browser Screen>>, in that they both can display information from the schema.
+However, the Schema Browser provides a way to drill into the analysis chain and displays linkages between field types, fields, and dynamic field rules.
diff --git a/solr/solr-ref-guide/src/configuration-guide.adoc b/solr/solr-ref-guide/src/configuration-guide.adoc
new file mode 100644
index 0000000..d0c4e12
--- /dev/null
+++ b/solr/solr-ref-guide/src/configuration-guide.adoc
@@ -0,0 +1,74 @@
+= Configuration Guide
+:page-children: configuration-files, \
+    property-substitution, \
+    core-discovery, \
+    configuring-solr-xml, \
+    configuring-solrconfig-xml, \
+    configuration-apis, \
+    config-sets, \
+    resource-loading, \
+    solr-plugins
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+[.lead]
+This section covers configuration files and options for customizing your Solr installation.
+
+****
+[discrete]
+=== Configuration Overview
+
+[cols="1,1",frame=none,grid=none,stripes=none]
+|===
+| <<configuration-files.adoc#,Solr Configuration Files>>: Solr's major configuration files.
+| <<property-substitution.adoc#,Property Substitution in Configuration Files>>: Provide property values at startup or in shared property files.
+| <<core-discovery.adoc#,Core Discovery>>: Placement of `core.properties` and available property options.
+| <<config-sets.adoc#,Configsets>>: Use configsets to avoid duplicating effort when defining a new core.
+| <<resource-loading.adoc#,Resource Loading>>: Resolving word lists, model files, and related data.
+| <<configuring-solr-xml.adoc#,Configuring solr.xml>>: Global configuration options.
+|===
+****
+
+****
+[discrete]
+=== solrconfig.xml
+
+<<configuring-solrconfig-xml.adoc#,*Configuring solrconfig.xml*>>
+
+// This pulls the sub-section list from the child page to reduce errors
+include::configuring-solrconfig-xml.adoc[tag=solrconfig-sections]
+****
+
+****
+[discrete]
+=== Configuration APIs
+
+<<configuration-apis.adoc#,*Configuration APIs*>>
+
+// This pulls the sub-section list from the child page to reduce errors
+include::configuration-apis.adoc[tag=configapi-sections]
+****
+
+****
+[discrete]
+=== Solr Plugins
+
+<<solr-plugins.adoc#,*Solr Plugins*>>
+
+// This pulls the sub-section list from the child page to reduce errors
+include::solr-plugins.adoc[tag=plugin-sections]
+****
diff --git a/solr/solr-ref-guide/src/configuring-logging.adoc b/solr/solr-ref-guide/src/configuring-logging.adoc
index 8d50cb8..8607710 100644
--- a/solr/solr-ref-guide/src/configuring-logging.adoc
+++ b/solr/solr-ref-guide/src/configuring-logging.adoc
@@ -16,50 +16,65 @@
 // specific language governing permissions and limitations
 // under the License.
 
-Solr logs are a key way to know what's happening in the system. There are several ways to adjust the default logging configuration.
-
-[IMPORTANT]
-====
-In addition to the logging options described below, there is a way to configure which request parameters (such as parameters sent as part of queries) are logged with an additional request parameter called `logParamsList`. See the section on <<common-query-parameters.adoc#logparamslist-parameter,Common Query Parameters>> for more information.
-====
+Solr logs are a key way to know what's happening in the system.
+There are several ways to adjust the default logging configuration.
 
 == Temporary Logging Settings
 
-You can control the amount of logging output in Solr by using the Admin Web interface. Select the *LOGGING* link. Note that this page only lets you change settings in the running system and is not saved for the next run. (For more information about the Admin Web interface, see <<using-the-solr-administration-user-interface.adoc#,Using the Solr Administration User Interface>>.)
+There are several ways to temporarily change log levels when needed.
+
+=== Logging Screen
+You can temporarily change the amount of logging output by Solr using the Admin UI.
+Select the *Logging*] link in the left-hand menu.
+
+Note that log levels will also be reset on next Solr restart.
 
 .The Logging Screen
-image::images/logging/logging.png[image]
+image::images/configuring-logging/logging.png[Logging Screen]
 
-This part of the Admin Web interface allows you to set the logging level for many different log categories. Fortunately, any categories that are *unset* will have the logging level of its parent. This makes it possible to change many categories at once by adjusting the logging level of their parent.
+This part of the Admin Web interface allows you to set the logging level for many different log categories.
+Fortunately, any categories that are *unset* will have the logging level of its parent.
+This makes it possible to change many categories at once by adjusting the logging level of their parent.
 
-When you select **Level**, you see the following menu:
+When you select menu:Logging[Level], you see the following menu:
 
 .The Log Level Menu
-image::images/logging/level_menu.png[image,width=1159,height=577]
+image::images/configuring-logging/level_menu.png[image,width=1159,height=577]
+
+Solr classes are shown in the left column in a directory tree structure representing the classpath.
+The current level is shown in the right column.
+
+Directories are shown with their current logging levels.
+A row highlighted in yellow indicates that the class currently has logging enabled.
+The Log Level Menu floats over these.
+To set a log level for a particular directory, click the current level in the right column and the Log Level Menu will appear.
+Select the button next to your desired log level.
 
-Directories are shown with their current logging levels. The Log Level Menu floats over these. To set a log level for a particular directory, select it and click the appropriate log level button.
 The log level change will be distributed to all nodes in the cluster.
 
-Log levels settings are as follows:
+The possible log levels are as follows:
 
 [width="100%",options="header",]
 |===
 |Level |Result
-|FINEST |Reports everything.
-|FINE |Reports everything but the least important messages.
-|CONFIG |Reports configuration errors.
+|ALL |Reports everything.
+|TRACE |Reports everything but the least important messages.
+|DEBUG |Reports configuration errors.
 |INFO |Reports everything but normal status.
 |WARN |Reports all warnings.
-|SEVERE |Reports only the most severe warnings.
+|ERROR |Reports only the most severe warnings.
+|FATAL |Reports only fatal events.
 |OFF |Turns off logging.
 |UNSET |Removes the previous log setting.
 |===
 
 Multiple settings at one time are allowed.
 
-=== Loglevel API
+=== Log Level API
+
+There is also a way of sending REST commands to the `admin/info/logging` endpoint to do the same.
 
-There is also a way of sending REST commands to the logging endpoint to do the same. Example:
+Example:
 
 [source,bash]
 ----
@@ -67,13 +82,16 @@ There is also a way of sending REST commands to the logging endpoint to do the s
 curl -s http://localhost:8983/solr/admin/info/logging --data-binary "set=root:WARN"
 ----
 
-== Choosing Log Level at Startup
+=== Choosing Log Level at Startup
 
-You can temporarily choose a different logging level as you start Solr. There are two ways:
+You can temporarily choose a different logging level as you start Solr.
+There are two ways:
 
-The first way is to set the `SOLR_LOG_LEVEL` environment variable before you start Solr, or place the same variable in `bin/solr.in.sh` or `bin/solr.in.cmd`. The variable must contain an uppercase string with a supported log level (see above).
+The first way is to set the `SOLR_LOG_LEVEL` environment variable before you start Solr, or place the same variable in `bin/solr.in.sh` or `bin/solr.in.cmd`.
+The variable must contain an uppercase string with a supported log level (see above).
 
-The second way is to start Solr with the -v or -q options, see <<solr-control-script-reference.adoc#,Solr Control Script Reference>> for details. Examples:
+The second way is to start Solr with the -v or -q options, see <<solr-control-script-reference.adoc#,Solr Control Script Reference>> for details.
+Examples:
 
 [source,bash]
 ----
@@ -85,11 +103,16 @@ bin/solr start -f -q
 
 == Permanent Logging Settings
 
-Solr uses http://logging.apache.org/log4j/log4j-{ivy-log4j-version}/[Log4J version {ivy-log4j-version}] for logging which is configured using `server/resources/log4j2.xml`. Take a moment to inspect the contents of the `log4j2.xml` file so that you are familiar with its structure. By default, Solr log messages will be written to `SOLR_LOGS_DIR/solr.log`.
+Solr uses http://logging.apache.org/log4j/log4j-{ivy-log4j-version}/[Log4J version {ivy-log4j-version}] for logging which is configured using `server/resources/log4j2.xml`.
+Take a moment to inspect the contents of the `log4j2.xml` file so that you are familiar with its structure.
+By default, Solr log messages will be written to `SOLR_LOGS_DIR/solr.log`.
 
-When you're ready to deploy Solr in production, set the variable `SOLR_LOGS_DIR` to the location where you want Solr to write log files, such as `/var/solr/logs`. You may also want to tweak `log4j2.xml`. Note that if you installed Solr as a service using the instructions provided in <<taking-solr-to-production.adoc#,Taking Solr to Production>>, then see `/var/solr/log4j2.xml` instead of the default `server/resources` version.
+When you're ready to deploy Solr in production, set the variable `SOLR_LOGS_DIR` to the location where you want Solr to write log files, such as `/var/solr/logs`.
+You may also want to tweak `log4j2.xml`.
+Note that if you installed Solr as a service using the instructions provided in <<taking-solr-to-production.adoc#,Taking Solr to Production>>, then see `/var/solr/log4j2.xml` instead of the default `server/resources` version.
 
-When starting Solr in the foreground (`-f` option), all logs will be sent to the console, in addition to `solr.log`. When starting Solr in the background, it will write all `stdout` and `stderr` output to a log file in `solr-<port>-console.log`, and automatically disable the CONSOLE logger configured in `log4j2.xml`, having the same effect as if you removed the CONSOLE appender from the rootLogger manually.
+When starting Solr in the foreground (`-f` option), all logs will be sent to the console, in addition to `solr.log`.
+When starting Solr in the background, it will write all `stdout` and `stderr` output to a log file in `solr-<port>-console.log`, and automatically disable the CONSOLE logger configured in `log4j2.xml`, having the same effect as if you removed the CONSOLE appender from the rootLogger manually.
 
 Also, in `log4j2.xml` if the default log rotation size threshold of 32MB is too small for production servers then you should increase it to a larger value (such as 100MB or more).
 
@@ -100,19 +123,29 @@ Also, in `log4j2.xml` if the default log rotation size threshold of 32MB is too
 
 Java Garbage Collection logs are rotated by the JVM when size hits 20M, for a max of 9 generations.
 
-On every startup or restart of Solr, log4j2 performs log rotation. If you choose to use another log framework that does not support rotation on startup, you may enable `SOLR_LOG_PRESTART_ROTATION` in `bin/solr.in.sh` or `bin/solr.in.cmd` to let the start script rotate the logs on startup.
+On every startup or restart of Solr, log4j2 performs log rotation.
+If you choose to use another log framework that does not support rotation on startup, you may enable `SOLR_LOG_PRESTART_ROTATION` in `bin/solr.in.sh` or `bin/solr.in.cmd` to let the start script rotate the logs on startup.
 
 == Logging Slow Queries
 
-For high-volume search applications, logging every query can generate a large amount of logs and, depending on the volume, potentially impact performance. If you mine these logs for additional insights into your application, then logging every query request may be useful.
+For high-volume search applications, logging every query can generate a large amount of logs and, depending on the volume, potentially impact performance.
+If you mine these logs for additional insights into your application, then logging every query request may be useful.
 
-On the other hand, if you're only concerned about warnings and error messages related to requests, then you can set the log verbosity to WARN. However, this poses a potential problem in that you won't know if any queries are slow, as slow queries are still logged at the INFO level.
+On the other hand, if you're only concerned about warnings and error messages related to requests, then you can set the log verbosity to WARN.
+However, this poses a potential problem in that you won't know if any queries are slow, as slow queries are still logged at the INFO level.
 
-Solr provides a way to set your log verbosity threshold to WARN and be able to set a latency threshold above which a request is considered "slow" and log that request at the WARN level to help you identify slow queries in your application. To enable this behavior, configure the `<slowQueryThresholdMillis>` element in the *query* section of `solrconfig.xml`:
+Solr provides a way to set your log verbosity threshold to WARN and be able to set a latency threshold above which a request is considered "slow" and log that request at the WARN level to help you identify slow queries in your application.
+To enable this behavior, configure the `<slowQueryThresholdMillis>` element in the *query* section of `solrconfig.xml`:
 
 [source,xml]
 ----
 <slowQueryThresholdMillis>1000</slowQueryThresholdMillis>
 ----
 
-Any queries that take longer than the specified threshold will be logged as "slow" queries at the WARN level. The log file under which you can find all these queries is called `solr_slow_requests.log` and will be found in your `SOLR_LOGS_DIR` (see <<Permanent Logging Settings>> for more about defining log locations).
+Any queries that take longer than the specified threshold will be logged as "slow" queries at the WARN level.
+The log file under which you can find all these queries is called `solr_slow_requests.log` and will be found in your `SOLR_LOGS_DIR` (see <<Permanent Logging Settings>> for more about defining log locations).
+
+== Logging Select Request Parameters
+
+In addition to the logging options described above, it's possible to log only a selected list of request parameters (such as those sent with queries) with an additional request parameter called `logParamsList`.
+See the section on <<common-query-parameters.adoc#logparamslist-parameter,logParamsList Parameter>> for more information.
diff --git a/solr/solr-ref-guide/src/format-of-solr-xml.adoc b/solr/solr-ref-guide/src/configuring-solr-xml.adoc
similarity index 65%
rename from solr/solr-ref-guide/src/format-of-solr-xml.adoc
rename to solr/solr-ref-guide/src/configuring-solr-xml.adoc
index 1d9a1ae..0e0f74a 100644
--- a/solr/solr-ref-guide/src/format-of-solr-xml.adoc
+++ b/solr/solr-ref-guide/src/configuring-solr-xml.adoc
@@ -1,4 +1,4 @@
-= Format of solr.xml
+= Configuring solr.xml
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -18,11 +18,13 @@
 
 The `solr.xml` file defines some global configuration options that apply to all or many cores.
 
-This section will describe the default `solr.xml` file included with Solr and how to modify it for your needs. For details on how to configure `core.properties`, see the section <<defining-core-properties.adoc#,Defining core.properties>>.
+This section will describe the default `solr.xml` file included with Solr and how to modify it for your needs.
+For details on how to configure `core.properties`, see the section <<core-discovery.adoc#,Core Discovery>>.
 
 == Defining solr.xml
 
-You can find `solr.xml` in your `$SOLR_HOME` directory (usually `server/solr` or `/var/solr/data`) or optionally in ZooKeeper when using SolrCloud. The default `solr.xml` file looks like this:
+You can find `solr.xml` in your `$SOLR_HOME` directory (usually `server/solr` or `/var/solr/data`) or optionally in ZooKeeper when using SolrCloud.
+The default `solr.xml` file looks like this:
 
 [source,xml]
 ----
@@ -55,13 +57,15 @@ You can find `solr.xml` in your `$SOLR_HOME` directory (usually `server/solr` or
 </solr>
 ----
 
-As you can see, the discovery Solr configuration is "SolrCloud friendly". However, the presence of the `<solrcloud>` element does _not_ mean that the Solr instance is running in SolrCloud mode. Unless the `-DzkHost` or `-DzkRun` are specified at startup time, this section is ignored.
+As you can see, the discovery Solr configuration is "SolrCloud friendly". However, the presence of the `<solrcloud>` element does _not_ mean that the Solr instance is running in SolrCloud mode.
+Unless the `-DzkHost` or `-DzkRun` are specified at startup time, this section is ignored.
 
 == Solr.xml Parameters
 
 === The <solr> Element
 
-There are no attributes that you can specify in the `<solr>` tag, which is the root element of `solr.xml`. The tables below list the child nodes of each XML element in `solr.xml`.
+There are no attributes that you can specify in the `<solr>` tag, which is the root element of `solr.xml`.
+The tables below list the child nodes of each XML element in `solr.xml`.
 
 `configSetService`::
 This attribute does not need to be set.
@@ -74,7 +78,8 @@ If this attribute isn't set, Solr uses the default `configSetService`, with zook
 `adminHandler`::
 This attribute does not need to be set.
 +
-If used, this attribute should be set to the FQN (Fully qualified name) of a class that inherits from CoreAdminHandler. For example, `<str name="adminHandler">com.myorg.MyAdminHandler</str>` would configure the custom admin handler (MyAdminHandler) to handle admin requests.
+If used, this attribute should be set to the FQN (Fully qualified name) of a class that inherits from CoreAdminHandler.
+For example, `<str name="adminHandler">com.myorg.MyAdminHandler</str>` would configure the custom admin handler (MyAdminHandler) to handle admin requests.
 +
 If this attribute isn't set, Solr uses the default admin handler, `org.apache.solr.handler.admin.CoreAdminHandler`.
 
@@ -99,33 +104,52 @@ The root of the core discovery tree, defaults to `$SOLR_HOME` (by default, `serv
 Currently non-operational.
 
 `sharedLib`::
-Specifies the path to a common library directory that will be shared across all cores. Any JAR files in this directory will be added to the search path for Solr plugins. If the specified path is not absolute, it will be relative to `$SOLR_HOME`. Custom handlers may be placed in this directory. Note that specifying `sharedLib` will not remove `$SOLR_HOME/lib` from Solr's class path.
+Specifies the path to a common library directory that will be shared across all cores.
+Any JAR files in this directory will be added to the search path for Solr plugins.
+If the specified path is not absolute, it will be relative to `$SOLR_HOME`.
+Custom handlers may be placed in this directory.
+Note that specifying `sharedLib` will not remove `$SOLR_HOME/lib` from Solr's class path.
 
 `allowPaths`::
-Solr will normally only access folders relative to `$SOLR_HOME`, `$SOLR_DATA_HOME` or `coreRootDir`. If you need to e.g., create a core outside of these paths, you can explicitly allow the path with `allowPaths`. It is a comma separated string of file system paths to allow. The special value of `*` will allow any path on the system.
+Solr will normally only access folders relative to `$SOLR_HOME`, `$SOLR_DATA_HOME` or `coreRootDir`.
+If you need to e.g., create a core outside of these paths, you can explicitly allow the path with `allowPaths`.
+It is a comma separated string of file system paths to allow.
+The special value of `*` will allow any path on the system.
 
-[#_allow_urls]
+[#allow-urls]
 `allowUrls`::
-Comma-separated list of Solr hosts to allow. The http/https protocol may be omitted, and only the host and port are checked, i.e., `10.0.0.1:8983/solr,10.0.0.1:8984/solr`.
-When running Solr in non-cloud mode and if planning to do distributed search (using the "shards" parameter), the list of hosts needs to be allowed or Solr will forbid the request. In Solr cloud mode, this allow-list is added to the cluster's live nodes to determine which hosts are allowed. The allow-list can also be configured with the `solr.allowUrls` system property (see its use in `solr.in.sh`).
+Comma-separated list of Solr hosts to allow.
++
+The HTTP/HTTPS protocol may be omitted, and only the host and port are checked, i.e., `10.0.0.1:8983/solr,10.0.0.1:8984/solr`.
++
+When running Solr as a user-managed cluster and using the `shards` parameter, a list of hosts needs to be specifically configured as allowed or Solr will forbid the request.
++
+In SolrCloud mode, the allow-list is automatically configured to include all live nodes in the cluster.
++
+The allow-list can also be configured with the `solr.allowUrls` system property in `solr.in.sh` / `solr.in.cmd`.
 If you need to disable this feature for backwards compatibility, you can set the system property `solr.disable.allowUrls=true`.
 
 `shareSchema`::
-This attribute, when set to `true`, ensures that the multiple cores pointing to the same Schema resource file will be referring to the same IndexSchema Object. Sharing the IndexSchema Object makes loading the core faster. If you use this feature, make sure that no core-specific property is used in your Schema file.
+This attribute, when set to `true`, ensures that the multiple cores pointing to the same Schema resource file will be referring to the same IndexSchema Object.
+Sharing the IndexSchema Object makes loading the core faster.
+If you use this feature, make sure that no core-specific property is used in your Schema file.
 
 `transientCacheSize`::
 Defines how many cores with `transient=true` that can be loaded before swapping the least recently used core for a new core.
 
 `configSetBaseDir`::
-The directory under which configsets for Solr cores can be found. Defaults to `$SOLR_HOME/configsets`.
+The directory under which configsets for Solr cores can be found.
+Defaults to `$SOLR_HOME/configsets`.
 
 [[global-maxbooleanclauses]]
 `maxBooleanClauses`::
 Sets the maximum number of (nested) clauses allowed in any query.
 +
-This global limit provides a safety constraint on the total number of clauses allowed in any query against any collection -- regardless of whether those clauses were explicitly specified in a query string, or were the result of query expansion/re-writing from a more complex type of query based on the terms in the index.  This limit is enforced at multiple points in the Lucene code base, both to prevent primative query objects (mainly `BooleanQuery`) from being constructed with an excessi [...]
+This global limit provides a safety constraint on the total number of clauses allowed in any query against any collection -- regardless of whether those clauses were explicitly specified in a query string, or were the result of query expansion/re-writing from a more complex type of query based on the terms in the index.
+This limit is enforced at multiple points in  Lucene, both to prevent primitive query objects (mainly `BooleanQuery`) from being constructed with an excessive number of clauses in a way that may exhaust the JVM heap, but also to ensure that no composite query (made up of multiple primitive queries) can be executed with an excessive _total_ number of nested clauses in a way that may cause a search thread to use excessive CPU.
 +
-In default configurations this property uses the value of the `solr.max.booleanClauses` system property if specified.  This is the same system property used in the `_default` configset for the <<query-settings-in-solrconfig#maxbooleanclauses,`<maxBooleanClauses>` setting of `solrconfig.xml`>> making it easy for Solr administrators to increase both values (in all collections) without needing to search through and update all of their configs.
+In default configurations this property uses the value of the `solr.max.booleanClauses` system property if specified.
+This is the same system property used in the `_default` configset for the <<caches-warming.adoc#maxbooleanclauses-element,`<maxBooleanClauses>` element of `solrconfig.xml`>> making it easy for Solr administrators to increase both values (in all collections) without needing to search through and update all of their configs.
 +
 [source,xml]
 ----
@@ -134,7 +158,8 @@ In default configurations this property uses the value of the `solr.max.booleanC
 
 === The <solrcloud> Element
 
-This element defines several parameters that relate so SolrCloud. This section is ignored unless theSolr instance is started with either `-DzkRun` or `-DzkHost`
+This element defines several parameters that relate so SolrCloud.
+This section is ignored unless theSolr instance is started with either `-DzkRun` or `-DzkHost`
 
 `distribUpdateConnTimeout`::
 Used to set the underlying `connTimeout` for intra-cluster updates.
@@ -166,13 +191,15 @@ When trying to elect a leader for a shard, this property sets the maximum time a
 Typically, the default value of `180000` (ms) is sufficient for conflicts to be resolved; you may need to increase this value if you have hundreds or thousands of small collections in SolrCloud.
 
 `zkClientTimeout`::
-A timeout for connection to a ZooKeeper server. It is used with SolrCloud.
+A timeout for connection to a ZooKeeper server.
+It is used with SolrCloud.
 
 `zkHost`::
 In SolrCloud mode, the URL of the ZooKeeper host that Solr should use for cluster state information.
 
 `genericCoreNodeNames`::
-If `TRUE`, node names are not based on the address of the node, but on a generic name that identifies the core. When a different machine takes over serving that core things will be much easier to understand.
+If `TRUE`, node names are not based on the address of the node, but on a generic name that identifies the core.
+When a different machine takes over serving that core things will be much easier to understand.
 
 `zkCredentialsProvider` & `zkACLProvider`::
 Optional parameters that can be specified if you are using <<zookeeper-access-control.adoc#,ZooKeeper Access Control>>.
@@ -184,7 +211,8 @@ If `TRUE`, the internal behavior of SolrCloud is changed to not use the Overseer
 === The <logging> Element
 
 `class`::
-The class to use for logging. The corresponding JAR file must be available to Solr, perhaps through a `<lib>` directive in `solrconfig.xml`.
+The class to use for logging.
+The corresponding JAR file must be available to Solr, perhaps through a `<lib>` directive in `solrconfig.xml`.
 
 `enabled`::
 true/false - whether to enable logging or not.
@@ -195,7 +223,8 @@ true/false - whether to enable logging or not.
 The number of log events that are buffered.
 
 `threshold`::
-The logging level above which your particular logging implementation will record. For example when using log4j one might specify DEBUG, WARN, INFO, etc.
+The logging level above which your particular logging implementation will record.
+For example when using log4j one might specify DEBUG, WARN, INFO, etc.
 
 === The <shardHandlerFactory> Element
 
@@ -206,37 +235,50 @@ Custom shard handlers can be defined in `solr.xml` if you wish to create a custo
 <shardHandlerFactory name="ShardHandlerFactory" class="qualified.class.name">
 ----
 
-Since this is a custom shard handler, sub-elements are specific to the implementation. The default and only shard handler provided by Solr is the `HttpShardHandlerFactory` in which case, the following sub-elements can be specified:
+Since this is a custom shard handler, sub-elements are specific to the implementation.
+The default and only shard handler provided by Solr is the `HttpShardHandlerFactory` in which case, the following sub-elements can be specified:
 
 `socketTimeout`::
-The read timeout for intra-cluster query and administrative requests. The default is the same as the `distribUpdateSoTimeout` specified in the `<solrcloud>` section.
+The read timeout for intra-cluster query and administrative requests.
+The default is the same as the `distribUpdateSoTimeout` specified in the `<solrcloud>` section.
 
 `connTimeout`::
-The connection timeout for intra-cluster query and administrative requests. Defaults to the `distribUpdateConnTimeout` specified in the `<solrcloud>` section.
+The connection timeout for intra-cluster query and administrative requests.
+Defaults to the `distribUpdateConnTimeout` specified in the `<solrcloud>` section.
 
 `urlScheme`::
 The URL scheme to be used in distributed search.
 
 `maxConnectionsPerHost`::
-Maximum connections allowed per host. Defaults to `100000`.
+Maximum connections allowed per host.
+Defaults to `100000`.
 
 `corePoolSize`::
-The initial core size of the threadpool servicing requests. Default is `0`.
+The initial core size of the threadpool servicing requests.
+Default is `0`.
 
 `maximumPoolSize`::
-The maximum size of the threadpool servicing requests. Default is unlimited.
+The maximum size of the threadpool servicing requests.
+Default is unlimited.
 
 `maxThreadIdleTime`::
-The amount of time in seconds that idle threads persist for in the queue, before being killed. Default is `5` seconds.
+The amount of time in seconds that idle threads persist for in the queue, before being killed.
+Default is `5` seconds.
 
 `sizeOfQueue`::
-If the threadpool uses a backing queue, what is its maximum size to use direct handoff. Default is to use a SynchronousQueue.
+If the threadpool uses a backing queue, what is its maximum size to use direct handoff.
+Default is to use a SynchronousQueue.
 
 `fairnessPolicy`::
-A boolean to configure if the threadpool favors fairness over throughput. Default is false to favor throughput.
+A boolean to configure if the threadpool favors fairness over throughput.
+Default is false to favor throughput.
 
 `replicaRouting`::
-A NamedList specifying replica routing preference configuration. This may be used to select and configure replica routing preferences. `default=true` may be used to set the default base replica routing preference. Only positive default status assertions are respected; i.e., `default=false` has no effect. If no explicit default base replica routing preference is configured, the implicit default will be `random`.
+A NamedList specifying replica routing preference configuration.
+This may be used to select and configure replica routing preferences.
+`default=true` may be used to set the default base replica routing preference.
+Only positive default status assertions are respected; i.e., `default=false` has no effect.
+If no explicit default base replica routing preference is configured, the implicit default will be `random`.
 ----
 <shardHandlerFactory class="HttpShardHandlerFactory">
   <lst name="replicaRouting">
@@ -248,19 +290,26 @@ A NamedList specifying replica routing preference configuration. This may be use
   </lst>
 </shardHandlerFactory>
 ----
-Replica routing may also be specified (overriding defaults) per-request, via the `shards.preference` request parameter. If a request contains both `dividend` and `hash`, `dividend` takes priority for routing. For configuring `stable` routing, the `hash` parameter implicitly defaults to a hash of the String value of the main query parameter (i.e., `q`).
+Replica routing may also be specified (overriding defaults) per-request, via the `shards.preference` request parameter.
+If a request contains both `dividend` and `hash`, `dividend` takes priority for routing.
+For configuring `stable` routing, the `hash` parameter implicitly defaults to a hash of the String value of the main query parameter (i.e., `q`).
 +
-The `dividend` parameter must be configured explicitly; there is no implicit default. If only `dividend` routing is desired, `hash` may be explicitly set to the empty string, entirely disabling implicit hash-based routing.
+The `dividend` parameter must be configured explicitly; there is no implicit default.
+If only `dividend` routing is desired, `hash` may be explicitly set to the empty string, entirely disabling implicit hash-based routing.
 
 === The <metrics> Element
 
-The `<metrics>` element in `solr.xml` allows you to customize the metrics reported by Solr. You can define system properties that should not be returned, or define custom suppliers and reporters.
+The `<metrics>` element in `solr.xml` allows you to customize the metrics reported by Solr.
+You can define system properties that should not be returned, or define custom suppliers and reporters.
 
-In a default `solr.xml` you will not see any `<metrics>` configuration. If you would like to customize the metrics for your installation, see the section <<metrics-reporting.adoc#metrics-configuration,Metrics Configuration>>.
+In a default `solr.xml` you will not see any `<metrics>` configuration.
+If you would like to customize the metrics for your installation, see the section <<metrics-reporting.adoc#metrics-configuration,Metrics Configuration>>.
 
 == Substituting JVM System Properties in solr.xml
 
-Solr supports variable substitution of JVM system property values in `solr.xml`, which allows runtime specification of various configuration options. The syntax is `${propertyname[:option default value]}`. This allows defining a default that can be overridden when Solr is launched. If a default value is not specified, then the property must be specified at runtime or the `solr.xml` file will generate an error when parsed.
+Solr supports variable substitution of JVM system property values in `solr.xml`, which allows runtime specification of various configuration options.
+The syntax is `${propertyname[:option default value]}`. This allows defining a default that can be overridden when Solr is launched.
+If a default value is not specified, then the property must be specified at runtime or the `solr.xml` file will generate an error when parsed.
 
 Any JVM system properties usually specified using the `-D` flag when starting the JVM, can be used as variables in the `solr.xml` file.
 
diff --git a/solr/solr-ref-guide/src/configuring-solrconfig-xml.adoc b/solr/solr-ref-guide/src/configuring-solrconfig-xml.adoc
index 814d351..c91f031 100644
--- a/solr/solr-ref-guide/src/configuring-solrconfig-xml.adoc
+++ b/solr/solr-ref-guide/src/configuring-solrconfig-xml.adoc
@@ -1,16 +1,17 @@
 = Configuring solrconfig.xml
-:page-children: datadir-and-directoryfactory-in-solrconfig, \
-    schema-factory-definition-in-solrconfig, \
-    indexconfig-in-solrconfig, \
-    requesthandlers-and-searchcomponents-in-solrconfig, \
-    initparams-in-solrconfig, \
-    updatehandlers-in-solrconfig, \
-    query-settings-in-solrconfig, \
-    requestdispatcher-in-solrconfig, \
+:page-children: index-location-format, \
+    index-segments-merging, \
+    schema-factory, \
+    commits-transaction-logs, \
+    caches-warming, \
+    requesthandlers-searchcomponents, \
+    implicit-requesthandlers, \
+    realtime-get, \
+    initparams, \
+    requestdispatcher, \
     update-request-processors, \
-    script-update-processor, \    
+    script-update-processor, \
     codec-factory
-
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -42,165 +43,32 @@ In `solrconfig.xml`, you configure important features such as:
 
 * the Admin Web interface
 
-* parameters related to replication and duplication (these parameters are covered in detail in <<legacy-scaling-and-distribution.adoc#,Legacy Scaling and Distribution>>)
-
-The `solrconfig.xml` file is located in the `conf/` directory for each collection. Several well-commented example files can be found in the `server/solr/configsets/` directories demonstrating best practices for many different types of installations.
-
-We've covered the options in the following sections:
+* parameters related to replication and duplication when not running in SolrCloud mode
 
-* <<datadir-and-directoryfactory-in-solrconfig.adoc#,DataDir and DirectoryFactory in SolrConfig>>
-* <<schema-factory-definition-in-solrconfig.adoc#,Schema Factory Definition in SolrConfig>>
-* <<indexconfig-in-solrconfig.adoc#,IndexConfig in SolrConfig>>
-* <<requesthandlers-and-searchcomponents-in-solrconfig.adoc#,RequestHandlers and SearchComponents in SolrConfig>>
-* <<initparams-in-solrconfig.adoc#,InitParams in SolrConfig>>
-* <<updatehandlers-in-solrconfig.adoc#,UpdateHandlers in SolrConfig>>
-* <<query-settings-in-solrconfig.adoc#,Query Settings in SolrConfig>>
-* <<requestdispatcher-in-solrconfig.adoc#,RequestDispatcher in SolrConfig>>
-* <<update-request-processors.adoc#,Update Request Processors>>
-* <<codec-factory.adoc#,Codec Factory>>
+The `solrconfig.xml` file is located in the `conf/` directory for each collection.
+Several well-commented example files can be found in the `server/solr/configsets/` directories demonstrating best practices for many different types of installations.
 
-Some SolrConfig aspects are covered in other sections.
+Some `solrconfig.xml` aspects are documented in other sections.
 See <<libs.adoc#lib-directives-in-solrconfig,lib directives in SolrConfig>>, which can be used for both Plugins and Resources.
 
-== Substituting Properties in Solr Config Files
-
-Solr supports variable substitution of property values in configuration files, which allows runtime specification of various configuration options in `solrconfig.xml`. The syntax is `${propertyname[:option default value]`}. This allows defining a default that can be overridden when Solr is launched. If a default value is not specified, then the property _must_ be specified at runtime or the configuration file will generate an error when parsed.
-
-There are multiple methods for specifying properties that can be used in configuration files. Of those below, strongly consider "config overlay" as the preferred approach, as it stays local to the configset and is easy to modify.
-
-=== JVM System Properties
-
-Any JVM System properties, usually specified using the `-D` flag when starting the JVM, can be used as variables in any XML configuration file in Solr.
-
-For example, in the sample `solrconfig.xml` files, you will see this value which defines the locking type to use:
-
-[source,xml]
-----
-<lockType>${solr.lock.type:native}</lockType>
-----
-
-Which means the lock type defaults to "native" but when starting Solr, you could override this using a JVM system property by launching the Solr it with:
-
-[source,bash]
-----
-bin/solr start -Dsolr.lock.type=none
-----
-
-In general, any Java system property that you want to set can be passed through the `bin/solr` script using the standard `-Dproperty=value` syntax. Alternatively, you can add common system properties to the `SOLR_OPTS` environment variable defined in the Solr include file (`bin/solr.in.sh` or `bin/solr.in.cmd`). For more information about how the Solr include file works, refer to: <<taking-solr-to-production.adoc#,Taking Solr to Production>>.
-
-=== Config API to Override solrconfig.xml
-
-The <<config-api.adoc#,Config API>> allows you to use an API to modify Solr's configuration, specifically user defined properties. Changes made with this API are stored in a file named `configoverlay.json`. This file should only be edited with the API, but will look like this example:
-
-[source,json]
-----
-{
-  "userProps":{"update.autoCreateFields":"false"},
-  "requestHandler":{"/myterms":{
-      "name":"/myterms",
-      "class":"solr.SearchHandler",
-      "defaults":{
-        "terms":true,
-        "distrib":false},
-      "components":["terms"]}}}
-----
-
-For more details, see the section <<config-api.adoc#,Config API>>.
-
-=== solrcore.properties
-
-If the configuration directory for a Solr core contains a file named `solrcore.properties` that file can contain any arbitrary user-defined property names and values using the Java https://en.wikipedia.org/wiki/.properties[properties file format]. Those properties can then be used as variables in other configuration files for that Solr core.
-
-For example, the following `solrcore.properties` file could be created in the `conf/` directory of a collection using one of the example configurations, to override the lockType used.
-
-[source,properties]
-----
-#conf/solrcore.properties
-solr.lock.type=none
-----
-
-.Deprecation
-[WARNING]
-====
-`solrcore.properties` won't work in SolrCloud mode (it is not read from ZooKeeper). This feature is likely to be removed in the future. Instead, use another mechanism like a config overlay.
-====
-
-[IMPORTANT]
-====
-
-The path and name of the `solrcore.properties` file can be overridden using the `properties` property in <<defining-core-properties.adoc#,`core.properties`>>.
-
-====
-
-=== User-Defined Properties in core.properties
-
-Every Solr core has a `core.properties` file, automatically created when using the APIs. When you create a SolrCloud collection, you can pass through custom parameters by prefixing the parameter name with `_property.name_` as a parameter.
-
-For example, to add a property named "my.custom.prop":
-
-[.dynamic-tabs]
---
-[example.tab-pane#v1customprop]
-====
-[.tab-label]*V1 API*
-
-[source,bash]
-----
-http://localhost:8983/solr/admin/collections?action=CREATE&name=gettingstarted&numShards=1&property.my.custom.prop=edismax
-----
-====
-
-[example.tab-pane#v2]
-====
-[.tab-label]*V2 API*
-
-[source,bash]
-----
-curl -X POST -H 'Content-type: application/json' -d '{"create": {"name": "gettingstarted", "numShards": "1", "property.my.custom.prop": "edismax"}}' http://localhost:8983/api/collections
-----
-====
---
-
-This will create a `core.properties` file that has at least the following properties (others omitted for brevity):
-
-[source,properties]
-----
-#core.properties
-name=gettingstarted
-my.custom.prop=edismax
-----
-
-The `my.custom.prop` property can then be used as a variable, such as in `solrconfig.xml`:
-
-[source,xml]
-----
-<requestHandler name="/select">
-  <lst name="defaults">
-    <str name="defType">${my.custom.prop}</str>
-  </lst>
-</requestHandler>
-----
-
-=== Implicit Core Properties
-
-Several attributes of a Solr core are available as "implicit" properties that can be used in variable substitution, independent of where or how the underlying value is initialized.
-
-For example, regardless of whether the name for a particular Solr core is explicitly configured in `core.properties` or inferred from the name of the instance directory, the implicit property `solr.core.name` is available for use as a variable in that core's configuration file:
-
-[source,xml]
-----
-<requestHandler name="/select">
-  <lst name="defaults">
-    <str name="collection_name">${solr.core.name}</str>
-  </lst>
-</requestHandler>
-----
-
-All implicit properties use the `solr.core.` name prefix, and reflect the runtime value of the equivalent <<defining-core-properties.adoc#,`core.properties` property>>:
-
-* `solr.core.name`
-* `solr.core.config`
-* `solr.core.schema`
-* `solr.core.dataDir`
-* `solr.core.transient`
-* `solr.core.loadOnStartup`
+****
+// This tags the below list so it can be used in the parent page section list
+// tag::solrconfig-sections[]
+[cols="1,1",frame=none,grid=none,stripes=none]
+|===
+| <<index-location-format.adoc#,Index Location and Format>>: Where and how Solr's indexes are stored.
+| <<index-segments-merging.adoc#,Index Segments and Merging>>: Lucene index writers, including segment management, merges, and locks.
+| <<schema-factory.adoc#,Schema Factory>>: Schema file formats.
+| <<commits-transaction-logs.adoc#,Commits and Transaction Logs>>: Update requests and commit settings.
+| <<caches-warming.adoc#,Caches and Query Warming>>: Caches, query warming, and query listeners.
+| <<requesthandlers-searchcomponents.adoc#,Request Handlers and Search Components>>: Request processors and handlers for search features.
+| <<implicit-requesthandlers.adoc#,Implicit Request Handlers>>: Request end-points automatically provided by Solr.
+| <<realtime-get.adoc#,RealTime Get>>: Get the latest version of a document without opening a searcher.
+| <<initparams.adoc#,InitParams>>: Default parameters for request handlers.
+| <<requestdispatcher.adoc#,RequestDispatcher>>: Advanced request parsing and HTTP cache headers.
+| <<update-request-processors.adoc#,Update Request Processors>>: Plugins for update requests.
+| <<script-update-processor.adoc#,Script Update Processor>>: Java scripting engines during document updates.
+| <<codec-factory.adoc#,Codec Factory>>: Lucene codecs when writing data to disk.
+|===
+//end::solrconfig-sections[]
+****
diff --git a/solr/solr-ref-guide/src/content-streams.adoc b/solr/solr-ref-guide/src/content-streams.adoc
index f307787..9e87596 100644
--- a/solr/solr-ref-guide/src/content-streams.adoc
+++ b/solr/solr-ref-guide/src/content-streams.adoc
@@ -18,24 +18,29 @@
 
 Content streams are bulk data passed with a request to Solr.
 
-When Solr RequestHandlers are accessed using path based URLs, the `SolrQueryRequest` object containing the parameters of the request may also contain a list of ContentStreams containing bulk data for the request. (The name SolrQueryRequest is a bit misleading: it is involved in all requests, regardless of whether it is a query request or an update request.)
+When Solr RequestHandlers are accessed using path based URLs, the `SolrQueryRequest` object containing the parameters of the request may also contain a list of ContentStreams containing bulk data for the request.
+(The name SolrQueryRequest is a bit misleading: it is involved in all requests, regardless of whether it is a query request or an update request.)
 
 == Content Stream Sources
 
 Currently request handlers can get content streams in a variety of ways:
 
 * For multipart file uploads, each file is passed as a stream.
-* For POST requests where the content-type is not `application/x-www-form-urlencoded`, the raw POST body is passed as a stream. The full POST body is parsed as parameters and included in the Solr parameters.
+* For POST requests where the content-type is not `application/x-www-form-urlencoded`, the raw POST body is passed as a stream.
+The full POST body is parsed as parameters and included in the Solr parameters.
 * The contents of parameter `stream.body` is passed as a stream.
 * If remote streaming is enabled and URL content is called for during request handling, the contents of each `stream.url` and `stream.file` parameters are fetched and passed as a stream.
 
-By default, curl sends a `contentType="application/x-www-form-urlencoded"` header. If you need to test a SolrContentHeader content stream, you will need to set the content type with curl's `-H` flag.
+By default, curl sends a `contentType="application/x-www-form-urlencoded"` header.
+If you need to test a SolrContentHeader content stream, you will need to set the content type with curl's `-H` flag.
 
 == Remote Streaming
 
-Remote streaming lets you send the contents of a URL as a stream to a given Solr RequestHandler. You could use remote streaming to send a remote or local file to an update plugin.
+Remote streaming lets you send the contents of a URL as a stream to a given Solr RequestHandler.
+You could use remote streaming to send a remote or local file to an update plugin.
 
-Remote streaming is disabled by default. Enabling it is not recommended in a production situation without additional security between you and untrusted remote clients.
+Remote streaming is disabled by default.
+Enabling it is not recommended in a production situation without additional security between you and untrusted remote clients.
 
 In `solrconfig.xml`, you can enable it by changing the following `enableRemoteStreaming` parameter to `true`:
 
@@ -75,7 +80,8 @@ curl -X POST -H 'Content-type: application/json' -d '{"set-property": {"requestD
 
 [IMPORTANT]
 ====
-If `enableRemoteStreaming="true"` is used, be aware that this allows _anyone_ to send a request to any URL or local file. If the <<Debugging Requests,DumpRequestHandler>> is enabled, it will allow anyone to view any file on your system.
+If `enableRemoteStreaming="true"` is used, be aware that this allows _anyone_ to send a request to any URL or local file.
+If the <<Debugging Requests,DumpRequestHandler>> is enabled, it will allow anyone to view any file on your system.
 ====
 
 The source of the data can be compressed using gzip, and Solr will generally detect this.
@@ -84,4 +90,5 @@ Gzip doesn't apply to `stream.body`.
 
 == Debugging Requests
 
-The implicit "dump" RequestHandler (see <<implicit-requesthandlers.adoc#,Implicit RequestHandlers>>) simply outputs the contents of the Solr QueryRequest using the specified writer type `wt`. This is a useful tool to help understand what streams are available to the RequestHandlers.
+The implicit "dump" RequestHandler (see <<implicit-requesthandlers.adoc#,Implicit Request Handlers>>) simply outputs the contents of the Solr QueryRequest using the specified writer type `wt`.
+This is a useful tool to help understand what streams are available to the RequestHandlers.
diff --git a/solr/solr-ref-guide/src/controlling-results.adoc b/solr/solr-ref-guide/src/controlling-results.adoc
new file mode 100644
index 0000000..9e62326
--- /dev/null
+++ b/solr/solr-ref-guide/src/controlling-results.adoc
@@ -0,0 +1,58 @@
+= Controlling Results
+:page-children: faceting, \
+    json-facet-api, \
+    collapse-and-expand-results, \
+    result-grouping, \
+    result-clustering, \
+    highlighting, \
+    query-elevation-component, \
+    document-transformers, \
+    response-writers, \
+    exporting-result-sets, \
+    pagination-of-results
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+[.lead]
+Once users submit a query, Solr provides a number of options for how to present the results.
+
+Features like facets, grouping, collapsing, and clustering provide ways to group similar results together.
+
+Highlighting shows users their query terms in context with surrounding text, helping them decide if a document "matches" their query.
+
+Solr offers several ways to get results, or control how "pages" of results are returned to your client.
+
+****
+// This tags the below list so it can be used in the parent page section list
+// tag::results-sections[]
+[cols="1,1",frame=none,grid=none,stripes=none]
+|===
+| <<faceting.adoc#,Faceting>>: Categorize search results based on indexed terms.
+| <<json-facet-api.adoc#facet-analytics-module,JSON Facet API>>: JSON Facet API.
+| <<collapse-and-expand-results.adoc#,Collapse and Expand Results>>: Collapse documents into groups and expand the results.
+| <<result-grouping.adoc#,Result Grouping>>: Group results based on common field values.
+| <<result-clustering.adoc#,Result Clustering>>: Group search results based on cluster analysis applied to text fields.
+| <<highlighting.adoc#,Highlighting>>: Highlighting search terms in document snippets.
+| <<query-elevation-component.adoc#,Query Elevation Component>>: Force documents to the top of the results for certain queries.
+| <<document-transformers.adoc#,Document Transformers>>: Compute information and add to individual documents.
+| <<response-writers.adoc#,Response Writers>>: Format options for search results.
+| <<exporting-result-sets.adoc#,Exporting Result Sets>>: Export large result sets out of Solr.
+| <<pagination-of-results.adoc#,Pagination of Results>>: Offering paginated results.
+|
+|===
+// end::results-sections[]
+****
diff --git a/solr/solr-ref-guide/src/copying-fields.adoc b/solr/solr-ref-guide/src/copy-fields.adoc
similarity index 55%
rename from solr/solr-ref-guide/src/copying-fields.adoc
rename to solr/solr-ref-guide/src/copy-fields.adoc
index a1e388f..2404b70 100644
--- a/solr/solr-ref-guide/src/copying-fields.adoc
+++ b/solr/solr-ref-guide/src/copy-fields.adoc
@@ -1,4 +1,4 @@
-= Copying Fields
+= Copy Fields
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -16,7 +16,8 @@
 // specific language governing permissions and limitations
 // under the License.
 
-You might want to interpret some document fields in more than one way. Solr has a mechanism for making copies of fields so that you can apply several distinct field types to a single piece of incoming information.
+You might want to interpret some document fields in more than one way.
+Solr has a mechanism for making copies of fields so that you can apply several distinct field types to a single piece of incoming information.
 
 The name of the field you want to copy is the _source_, and the name of the copy is the _destination_. In `schema.xml`, it's very simple to make copies of fields:
 
@@ -25,15 +26,22 @@ The name of the field you want to copy is the _source_, and the name of the copy
 <copyField source="cat" dest="text" maxChars="30000" />
 ----
 
-In this example, we want Solr to copy the `cat` field to a field named `text`. Fields are copied before <<understanding-analyzers-tokenizers-and-filters.adoc#,analysis>> is done, meaning you can have two fields with identical original content, but which use different analysis chains and are stored in the index differently.
+In this example, we want Solr to copy the `cat` field to a field named `text`.
+Fields are copied before <<document-analysis.adoc#,analysis>> is done, meaning you can have two fields with identical original content, but which use different analysis chains and are stored in the index differently.
 
-In the example above, if the `text` destination field has data of its own in the input documents, the contents of the `cat` field will be added as additional values – just as if all of the values had originally been specified by the client. Remember to configure your fields as `multivalued="true"` if they will ultimately get multiple values (either from a multivalued source or from multiple `copyField` directives).
+In the example above, if the `text` destination field has data of its own in the input documents, the contents of the `cat` field will be added as additional values – just as if all of the values had originally been specified by the client.
+Remember to configure your fields as `multivalued="true"` if they will ultimately get multiple values (either from a multivalued source or from multiple `copyField` directives).
 
-A common usage for this functionality is to create a single "search" field that will serve as the default query field when users or clients do not specify a field to query. For example, `title`, `author`, `keywords`, and `body` may all be fields that should be searched by default, with copy field rules for each field to copy to a `catchall` field (for example, it could be named anything). Later you can set a rule in `solrconfig.xml` to search the `catchall` field by default. One caveat t [...]
+A common usage for this functionality is to create a single "search" field that will serve as the default query field when users or clients do not specify a field to query.
+For example, `title`, `author`, `keywords`, and `body` may all be fields that should be searched by default, with copy field rules for each field to copy to a `catchall` field (for example, it could be named anything). Later you can set a rule in `solrconfig.xml` to search the `catchall` field by default.
+One caveat to this is your index will grow when using copy fields.
+However, whether this becomes problematic for you and the final size will depend on the number of fields being copied, the number of destination fields being copied to, the analysis in use, and the available disk space.
 
-The `maxChars` parameter, an `int` parameter, establishes an upper limit for the number of characters to be copied from the source value when constructing the value added to the destination field. This limit is useful for situations in which you want to copy some data from the source field, but also control the size of index files.
+The `maxChars` parameter, an `int` parameter, establishes an upper limit for the number of characters to be copied from the source value when constructing the value added to the destination field.
+This limit is useful for situations in which you want to copy some data from the source field, but also control the size of index files.
 
-Both the source and the destination of `copyField` can contain either leading or trailing asterisks, which will match anything. For example, the following line will copy the contents of all incoming fields that match the wildcard pattern `*_t` to the text field.:
+Both the source and the destination of `copyField` can contain either leading or trailing asterisks, which will match anything.
+For example, the following line will copy the contents of all incoming fields that match the wildcard pattern `*_t` to the text field.:
 
 [source,xml]
 ----
@@ -42,10 +50,13 @@ Both the source and the destination of `copyField` can contain either leading or
 
 [IMPORTANT]
 ====
-The `copyField` command can use a wildcard (*) character in the `dest` parameter only if the `source` parameter contains one as well. `copyField` uses the matching glob from the source field for the `dest` field name into which the source content is copied.
+The `copyField` command can use a wildcard (*) character in the `dest` parameter only if the `source` parameter contains one as well.
+`copyField` uses the matching glob from the source field for the `dest` field name into which the source content is copied.
 ====
 
-Copying is done at the stream source level and no copy feeds into another copy. This means that copy fields cannot be chained i.e., _you cannot_ copy from `here` to `there` and then from `there` to `elsewhere`. However, the same source field can be copied to multiple destination fields:
+Copying is done at the stream source level and no copy feeds into another copy.
+This means that copy fields cannot be chained i.e., _you cannot_ copy from `here` to `there` and then from `there` to `elsewhere`.
+However, the same source field can be copied to multiple destination fields:
 
 [source,xml]
 ----
diff --git a/solr/solr-ref-guide/src/core-discovery.adoc b/solr/solr-ref-guide/src/core-discovery.adoc
new file mode 100644
index 0000000..ed2107f
--- /dev/null
+++ b/solr/solr-ref-guide/src/core-discovery.adoc
@@ -0,0 +1,131 @@
+= Core Discovery
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+Core discovery means that creating a core is as simple as a `core.properties` file located on disk.
+
+== The core.properties File
+
+In Solr, the term _core_ is used to refer to a single index and associated transaction log and configuration files (including the `solrconfig.xml` and schema files, among others).
+Your Solr installation can have multiple cores if needed, which allows you to index data with different structures in the same server, and maintain control over how your data is presented to different audiences.
+In SolrCloud mode you will be more familiar with the term _collection_.
+Behind the scenes a collection consists of one or more cores.
+
+Cores can be created using `bin/solr` script or as part of SolrCloud collection creation using the APIs.
+Core-specific properties (such as the directories to use for the indexes or configuration files, the core name, and other options) are defined in a `core.properties` file.
+Any `core.properties` file in any directory of your Solr installation (or in a directory under where `solr_home` is defined) will be found by Solr and the defined properties will be used for the core named in the file.
+
+The `core.properties` file is a simple Java Properties file where each line is just a key=value pair, e.g., `name=core1`.
+Notice that no quotes are required.
+
+==
+A minimal `core.properties` file looks like the example below.
+However, it can also be empty, see information on placement of `core.properties` below.
+
+[source,bash]
+----
+name=my_core_name
+----
+
+== Placement of core.properties
+
+Solr cores are configured by placing a file named `core.properties` in a sub-directory under `solr.home`.
+There are no a-priori limits to the depth of the tree, nor are there limits to the number of cores that can be defined.
+Cores may be anywhere in the tree with the exception that cores may _not_ be defined under an existing core.
+That is, the following is not allowed:
+
+[source,text]
+----
+./cores/core1/core.properties
+./cores/core1/coremore/core5/core.properties
+----
+
+In this example, the enumeration will stop at "core1".
+
+The following is legal:
+
+[source,text]
+----
+./cores/somecores/core1/core.properties
+./cores/somecores/core2/core.properties
+./cores/othercores/core3/core.properties
+./cores/extracores/deepertree/core4/core.properties
+----
+
+It is possible to segment Solr into multiple cores, each with its own configuration and indices.
+Cores may be dedicated to a single application or to very different ones, but all are administered through a common administration interface.
+You can create new Solr cores on the fly, shutdown cores, even replace one running core with another, all without ever stopping or restarting Solr.
+
+Your `core.properties` file can be empty if necessary.
+Suppose `core.properties` is located in `./cores/core1` (relative to `solr_home`) but is empty.
+In this case, the core name is assumed to be "core1".
+The instanceDir will be the folder containing `core.properties` (i.e., `./cores/core1`).
+The dataDir will be `../cores/core1/data`, etc.
+
+[NOTE]
+====
+You can run Solr without configuring any cores.
+====
+
+== Defining core.properties Files
+
+The minimal `core.properties` file is an empty file, in which case all of the properties are defaulted appropriately.
+
+Java properties files allow the hash (`#`) or bang (`!`) characters to specify comment-to-end-of-line.
+
+The following properties are available:
+
+`name`:: The name of the SolrCore.
+You'll use this name to reference the SolrCore when running commands with the `CoreAdminHandler`.
+
+`config`:: The configuration file name for a given core.
+The default is `solrconfig.xml`.
+
+`schema`:: The schema file name for a given core.
+The default is `schema.xml` but please note that if you are using a "managed schema" (the default behavior) then any value for this property which does not match the effective `managedSchemaResourceName` will be read once, backed up, and converted for managed schema use.
+See <<schema-factory.adoc#,Schema Factory Definition in SolrConfig>> for more details.
+
+`dataDir`:: The core's data directory (where indexes are stored) as either an absolute pathname, or a path relative to the value of `instanceDir`.
+This is `data` by default.
+
+`configSet`:: The name of a defined configset, if desired, to use to configure the core (see the section <<config-sets.adoc#,Configsets>> for more details).
+
+`properties`:: The name of the properties file for this core.
+The value can be an absolute pathname or a path relative to the value of `instanceDir`.
+
+`transient`:: If `true`, the core can be unloaded if Solr reaches the `transientCacheSize`.
+The default is `false`.
+Cores are unloaded in order of least recently used first.
+_Setting this to `true` is not recommended in SolrCloud mode._
+
+`loadOnStartup`:: If `true`, the default, the core will loaded when Solr starts.
+_Setting this to `false` is not recommended in SolrCloud mode._
+
+`coreNodeName`:: Used only in SolrCloud, this is a unique identifier for the node hosting this replica.
+By default a `coreNodeName` is generated automatically, but setting this attribute explicitly allows you to manually assign a new core to replace an existing replica.
+For example, this can be useful when replacing a machine that has had a hardware failure by restoring from backups on a new machine with a new hostname or port.
+
+`ulogDir`:: The absolute or relative directory for the update log for this core (SolrCloud only).
+
+`shard`:: The shard to assign this core to (SolrCloud only).
+
+`collection`:: The name of the collection this core is part of (SolrCloud only).
+
+`roles`:: Future parameter for SolrCloud or a way for users to mark nodes for their own use.
+
+Additional user-defined properties may be specified for use as variables.
+For more information on how to define local properties, see the section <<property-substitution.adoc#,Property Substitution in `solrconfig.xml`>>.
diff --git a/solr/solr-ref-guide/src/core-specific-tools.adoc b/solr/solr-ref-guide/src/core-specific-tools.adoc
deleted file mode 100644
index bd23a11..0000000
--- a/solr/solr-ref-guide/src/core-specific-tools.adoc
+++ /dev/null
@@ -1,47 +0,0 @@
-= Core-Specific Tools
-:page-children: ping, plugins-stats-screen, replication-screen, segments-info
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-The Core-Specific tools are a group of UI screens that allow you to see core-level information.
-
-In the left-hand navigation bar, you will see a pull-down menu titled "Core Selector". Clicking on the menu will show a list of Solr cores hosted on this Solr node, with a search box that can be used to find a specific core by name.
-
-When you select a core from the pull-down, the main display of the page will show some basic metadata about the core, and a secondary menu will appear in the left nav with links to additional core specific administration screens.
-
-.Core overview screen
-image::images/core-specific-tools/core_dashboard.png[image,width=515,height=250]
-
-The core-specific UI screens are listed below, with a link to the section of this guide to find out more:
-
-// TODO: SOLR-10655 BEGIN: refactor this into a 'core-screens-list.include.adoc' file for reuse
-* <<ping.adoc#,Ping>> - lets you ping a named core and determine whether the core is active.
-* <<plugins-stats-screen.adoc#,Plugins/Stats>> - shows statistics for plugins and other installed components.
-* <<replication-screen.adoc#,Replication>> - shows you the current replication status for the core, and lets you enable/disable replication.
-* <<segments-info.adoc#,Segments Info>> - Provides a visualization of the underlying Lucene index segments.
-// TODO: SOLR-10655 END
-
-If you are running a single node instance of Solr, additional UI screens normally displayed on a per-collection bases will also be listed:
-
-// TODO: SOLR-10655 BEGIN: refactor this into a 'collection-screens-list.include.adoc' file for reuse
-* <<analysis-screen.adoc#,Analysis>> - lets you analyze the data found in specific fields.
-* <<documents-screen.adoc#,Documents>> - provides a simple form allowing you to execute various Solr indexing commands directly from the browser.
-* <<files-screen.adoc#,Files>> - shows the current core configuration files such as `solrconfig.xml`.
-* <<query-screen.adoc#,Query>> - lets you submit a structured query about various elements of a core.
-* <<stream-screen.adoc#,Stream>> - allows you to submit streaming expressions and see results and parsing explanations.
-* <<schema-browser-screen.adoc#,Schema Browser>> - displays schema data in a browser window.
-// TODO: SOLR-10655 END
diff --git a/solr/solr-ref-guide/src/coreadmin-api.adoc b/solr/solr-ref-guide/src/coreadmin-api.adoc
index fe3ecef..5900191 100644
--- a/solr/solr-ref-guide/src/coreadmin-api.adoc
+++ b/solr/solr-ref-guide/src/coreadmin-api.adoc
@@ -17,11 +17,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
-The Core Admin API is primarily used under the covers by the <<collections-api.adoc#,Collections API>> when running a <<solrcloud.adoc#,SolrCloud>> cluster.
+The Core Admin API is primarily used under the covers by the <<collections-api.adoc#,Collections API>> when running a <<cluster-types.adoc#solrcloud-mode,SolrCloud>> cluster.
 
-SolrCloud users should not typically use the CoreAdmin API directly, but the API may be useful for users of single-node or leader/follower Solr installations for core maintenance operations.
+SolrCloud users should not typically use the CoreAdmin API directly, but the API may be useful for users of user-managed clusters or single-node installations for core maintenance operations.
 
-The CoreAdmin API is implemented by the CoreAdminHandler, which is a special purpose <<requesthandlers-and-searchcomponents-in-solrconfig.adoc#,request handler>> that is used to manage Solr cores. Unlike other request handlers, the CoreAdminHandler is not attached to a single core. Instead, there is a single instance of the CoreAdminHandler in each Solr node that manages all the cores running in that node and is accessible at the `/solr/admin/cores` path.
+The CoreAdmin API is implemented by the CoreAdminHandler, which is a special purpose <<requesthandlers-searchcomponents.adoc#,request handler>> that is used to manage Solr cores.
+Unlike other request handlers, the CoreAdminHandler is not attached to a single core.
+Instead, there is a single instance of the CoreAdminHandler in each Solr node that manages all the cores running in that node and is accessible at the `/solr/admin/cores` path.
 
 CoreAdmin actions can be executed by via HTTP requests that specify an `action` request parameter, with additional action specific arguments provided as additional parameters.
 
@@ -86,14 +88,17 @@ curl -X GET http://localhost:8983/api/cores?indexInfo=false
 The name of a core, as listed in the "name" attribute of a `<core>` element in `solr.xml`. This parameter is required in v1, and part of the url in the v2 API.
 
 `indexInfo`::
-If `false`, information about the index will not be returned with a core STATUS request. In Solr implementations with a large number of cores (i.e., more than hundreds), retrieving the index information for each core can take a lot of time and isn't always required. The default is `true`.
+If `false`, information about the index will not be returned with a core STATUS request.
+In Solr implementations with a large number of cores (i.e., more than hundreds), retrieving the index information for each core can take a lot of time and isn't always required.
+The default is `true`.
 
 [[coreadmin-create]]
 == CREATE
 
 The `CREATE` action creates a new core and registers it.
 
-If a Solr core with the given name already exists, it will continue to handle requests while the new core is initializing. When the new core is ready, it will take new requests and the old core will be unloaded.
+If a Solr core with the given name already exists, it will continue to handle requests while the new core is initializing.
+When the new core is ready, it will take new requests and the old core will be unloaded.
 
 `admin/cores?action=CREATE&name=_core-name_&instanceDir=_path/to/dir_&config=solrconfig.xml&dataDir=data`
 [.dynamic-tabs]
@@ -134,56 +139,76 @@ curl -X POST http://localhost:8983/api/cores -H 'Content-Type: application/json'
 ====
 --
 
-Note that this command is the only one of the Core Admin API commands that *does not* support the `core` parameter. Instead, the `name` parameter is required, as shown below.
+Note that this command is the only one of the Core Admin API commands that *does not* support the `core` parameter.
+Instead, the `name` parameter is required, as shown below.
 
-.CREATE must be able to find a configuration!
-[WARNING]
-====
-Your CREATE call must be able to find a configuration, or it will not succeed.
+Note that CREATE must be able to find a configuration or it will not succeed.
 
-When you are running SolrCloud and create a new core for a collection, the configuration will be inherited from the collection. Each collection is linked to a configName, which is stored in ZooKeeper. This satisfies the configuration requirement. There is something to note, though: if you're running SolrCloud, you should *NOT* use the CoreAdmin API at all. Use the <<collections-api.adoc#,Collections API>>.
+When you are running SolrCloud and create a new core for a collection, the configuration will be inherited from the collection.
+Each collection is linked to a `configName`, which is stored in ZooKeeper.
+This satisfies the configuration requirement.
+That said, if you're running SolrCloud, you should *NOT* use the CoreAdmin API at all.
+Instead, use the <<collections-api.adoc#,Collections API>>.
 
-When you are not running SolrCloud, if you have <<config-sets.adoc#,Configsets>> defined, you can use the `configSet` parameter as documented below. If there are no configsets, then the `instanceDir` specified in the CREATE call must already exist, and it must contain a `conf` directory which in turn must contain `solrconfig.xml`, your schema (usually named either `managed-schema` or `schema.xml`), and any files referenced by those configs.
+With a user-managed cluster, if you have <<config-sets.adoc#,Configsets>> defined, you can use the `configSet` parameter as documented below.
+If there are no configsets, then the `instanceDir` specified in the CREATE call must already exist, and it must contain a `conf` directory which in turn must contain `solrconfig.xml`, your schema (usually named either `managed-schema` or `schema.xml`), and any files referenced by those configs.
 
-The config and schema filenames can be specified with the `config` and `schema` parameters, but these are expert options. One thing you could do to avoid creating the `conf` directory is use `config` and `schema` parameters that point at absolute paths, but this can lead to confusing configurations unless you fully understand what you are doing.
-====
+The config and schema filenames can be specified with the `config` and `schema` parameters, but these are expert options.
+One thing you could do to avoid creating the `conf` directory is use `config` and `schema` parameters that point at absolute paths, but this can lead to confusing configurations unless you fully understand what you are doing.
 
 .CREATE and the `core.properties` file
 [IMPORTANT]
 ====
-The `core.properties` file is built as part of the CREATE command. If you create a `core.properties` file yourself in a core directory and then try to use CREATE to add that core to Solr, you will get an error telling you that another core is already defined there. The `core.properties` file must NOT exist before calling the CoreAdmin API with the CREATE command.
+The `core.properties` file is built as part of the CREATE command.
+If you create a `core.properties` file yourself in a core directory and then try to use CREATE to add that core to Solr, you will get an error telling you that another core is already defined there.
+The `core.properties` file must NOT exist before calling the CoreAdmin API with the CREATE command.
 ====
 
 === CREATE Core Parameters
 
 `name`::
-The name of the new core. Same as `name` on the `<core>` element. This parameter is required.
+The name of the new core.
+Same as `name` on the `<core>` element.
+This parameter is required.
 
 `instanceDir`::
-The directory where files for this core should be stored. Same as `instanceDir` on the `<core>` element. The default is the value specified for the `name` parameter if not supplied. This directory must be inside `SOLR_HOME`, `SOLR_DATA_HOME` or one of the paths specified by system property `solr.allowPaths`.
+The directory where files for this core should be stored.
+Same as `instanceDir` on the `<core>` element.
+The default is the value specified for the `name` parameter if not supplied.
+This directory must be inside `SOLR_HOME`, `SOLR_DATA_HOME` or one of the paths specified by system property `solr.allowPaths`.
 
 `config`::
 Name of the config file (i.e., `solrconfig.xml`) relative to `instanceDir`.
 
 `schema`::
-Name of the schema file to use for the core. Please note that if you are using a "managed schema" (the default behavior) then any value for this property which does not match the effective `managedSchemaResourceName` will be read once, backed up, and converted for managed schema use. See <<schema-factory-definition-in-solrconfig.adoc#,Schema Factory Definition in SolrConfig>> for details.
+Name of the schema file to use for the core.
+Please note that if you are using a "managed schema" (the default behavior) then any value for this property which does not match the effective `managedSchemaResourceName` will be read once, backed up, and converted for managed schema use.
+See <<schema-factory.adoc#,Schema Factory Definition in SolrConfig>> for details.
 
 `dataDir`::
-Name of the data directory relative to `instanceDir`. If absolute value is used, it must be inside `SOLR_HOME`, `SOLR_DATA_HOME` or one of the paths specified by system property `solr.allowPaths`.
+Name of the data directory relative to `instanceDir`.
+If absolute value is used, it must be inside `SOLR_HOME`, `SOLR_DATA_HOME` or one of the paths specified by system property `solr.allowPaths`.
 
 `configSet`::
-Name of the configset to use for this core. For more information, see the section <<config-sets.adoc#,Configsets>>.
+Name of the configset to use for this core.
+For more information, see the section <<config-sets.adoc#,Configsets>>.
 
 `collection`::
-The name of the collection to which this core belongs. The default is the name of the core. `collection._param_=_value_` causes a property of `_param_=_value_` to be set if a new collection is being created. Use `collection.configName=_config-name_` to point to the configuration for a new collection.
+The name of the collection to which this core belongs.
+The default is the name of the core.
+`collection._param_=_value_` causes a property of `_param_=_value_` to be set if a new collection is being created.
+Use `collection.configName=_config-name_` to point to the configuration for a new collection.
 +
-WARNING: While it's possible to create a core for a non-existent collection, this approach is not supported and not recommended. Always create a collection using the <<collections-api.adoc#,Collections API>> before creating a core directly for it.
+WARNING: While it's possible to create a core for a non-existent collection, this approach is not supported and not recommended.
+Always create a collection using the <<collections-api.adoc#,Collections API>> before creating a core directly for it.
 
 `shard`::
-The shard id this core represents. Normally you want to be auto-assigned a shard id.
+The shard ID this core represents.
+This should only be required in special circumstances; normally you want to be auto-assigned a shard ID.
 
 `property._name_=_value_`::
-Sets the core property _name_ to _value_. See the section on defining <<defining-core-properties.adoc#defining-core-properties-files,core.properties file contents>>.
+Sets the core property _name_ to _value_.
+See the section on defining <<core-discovery.adoc#defining-core-properties-files,core.properties file contents>>.
 
 `async`::
 Request ID to track this action which will be processed asynchronously.
@@ -199,7 +224,9 @@ http://localhost:8983/solr/admin/cores?action=CREATE&name=my_core&collection=my_
 [[coreadmin-reload]]
 == RELOAD
 
-The RELOAD action loads a new core from the configuration of an existing, registered Solr core. While the new core is initializing, the existing one will continue to handle requests. When the new Solr core is ready, it takes over and the old core is unloaded.
+The RELOAD action loads a new core from the configuration of an existing, registered Solr core.
+While the new core is initializing, the existing one will continue to handle requests.
+When the new Solr core is ready, it takes over and the old core is unloaded.
 
 [.dynamic-tabs]
 --
@@ -229,17 +256,20 @@ curl -X POST http://localhost:8983/api/cores/techproducts -H 'Content-Type: appl
 ====
 --
 
-This is useful when you've made changes to a Solr core's configuration on disk, such as adding new field definitions. Calling the RELOAD action lets you apply the new configuration without having to restart Solr.
+This is useful when you've made changes to a Solr core's configuration on disk, such as adding new field definitions.
+Calling the RELOAD action lets you apply the new configuration without having to restart Solr.
 
 [IMPORTANT]
 ====
-RELOAD performs "live" reloads of SolrCore, reusing some existing objects. Some configuration options, such as the `dataDir` location and `IndexWriter`-related settings in `solrconfig.xml` can not be changed and made active with a simple RELOAD action.
+RELOAD performs "live" reloads of SolrCore, reusing some existing objects.
+Some configuration options, such as the `dataDir` location and `IndexWriter`-related settings in `solrconfig.xml` can not be changed and made active with a simple RELOAD action.
 ====
 
 === RELOAD Core Parameters
 
 `core`::
-The name of the core, as listed in the "name" attribute of a `<core>` element in `solr.xml`. This parameter is required in v1, and part of the url in the v2 API.
+The name of the core, as listed in the "name" attribute of a `<core>` element in `solr.xml`.
+This parameter is required in v1, and part of the url in the v2 API.
 
 [[coreadmin-rename]]
 == RENAME
@@ -251,10 +281,13 @@ The `RENAME` action changes the name of a Solr core.
 === RENAME Parameters
 
 `core`::
-The name of the Solr core to be renamed. This parameter is required.
+The name of the Solr core to be renamed.
+This parameter is required.
 
 `other`::
-The new name for the Solr core. If the persistent attribute of `<solr>` is `true`, the new name will be written to `solr.xml` as the `name` attribute of the `<core>` attribute. This parameter is required.
+The new name for the Solr core.
+If the persistent attribute of `<solr>` is `true`, the new name will be written to `solr.xml` as the `name` attribute of the `<core>` attribute.
+This parameter is required.
 
 `async`::
 Request ID to track this action which will be processed asynchronously.
@@ -263,22 +296,28 @@ Request ID to track this action which will be processed asynchronously.
 [[coreadmin-swap]]
 == SWAP
 
-`SWAP` atomically swaps the names used to access two existing Solr cores. This can be used to swap new content into production. The prior core remains available and can be swapped back, if necessary. Each core will be known by the name of the other, after the swap.
+`SWAP` atomically swaps the names used to access two existing Solr cores.
+This can be used to swap new content into production.
+The prior core remains available and can be swapped back, if necessary.
+Each core will be known by the name of the other, after the swap.
 
 `admin/cores?action=SWAP&core=_core-name_&other=_other-core-name_`
 
 [IMPORTANT]
 ====
-Do not use `SWAP` with a SolrCloud node. It is not supported and can result in the core being unusable.
+Do not use `SWAP` with a SolrCloud node.
+It is not supported and can result in the core being unusable.
 ====
 
 === SWAP Parameters
 
 `core`::
-The name of one of the cores to be swapped. This parameter is required.
+The name of one of the cores to be swapped.
+This parameter is required.
 
 `other`::
-The name of one of the cores to be swapped. This parameter is required.
+The name of one of the cores to be swapped.
+This parameter is required.
 
 `async`::
 Request ID to track this action which will be processed asynchronously.
@@ -287,11 +326,14 @@ Request ID to track this action which will be processed asynchronously.
 [[coreadmin-unload]]
 == UNLOAD
 
-The `UNLOAD` action removes a core from Solr. Active requests will continue to be processed, but no new requests will be sent to the named core. If a core is registered under more than one name, only the given name is removed.
+The `UNLOAD` action removes a core from Solr.
+Active requests will continue to be processed, but no new requests will be sent to the named core.
+If a core is registered under more than one name, only the given name is removed.
 
 `admin/cores?action=UNLOAD&core=_core-name_`
 
-The `UNLOAD` action requires a parameter (`core`) identifying the core to be removed. If the persistent attribute of `<solr>` is set to `true`, the `<core>` element with this `name` attribute will be removed from `solr.xml`.
+The `UNLOAD` action requires a parameter (`core`) identifying the core to be removed.
+If the persistent attribute of `<solr>` is set to `true`, the `<core>` element with this `name` attribute will be removed from `solr.xml`.
 
 [IMPORTANT]
 ====
@@ -301,16 +343,20 @@ Unloading all cores in a SolrCloud collection causes the removal of that collect
 === UNLOAD Parameters
 
 `core`::
-The name of a core to be removed. This parameter is required.
+The name of a core to be removed.
+This parameter is required.
 
 `deleteIndex`::
-If `true`, will remove the index when unloading the core. The default is `false`.
+If `true`, will remove the index when unloading the core.
+The default is `false`.
 
 `deleteDataDir`::
-If `true`, removes the `data` directory and all sub-directories. The default is `false`.
+If `true`, removes the `data` directory and all sub-directories.
+The default is `false`.
 
 `deleteInstanceDir`::
-If `true`, removes everything related to the core, including the index directory, configuration files and other related files. The default is `false`.
+If `true`, removes everything related to the core, including the index directory, configuration files and other related files.
+The default is `false`.
 
 `async`::
 Request ID to track this action which will be processed asynchronously.
@@ -318,24 +364,33 @@ Request ID to track this action which will be processed asynchronously.
 [[coreadmin-mergeindexes]]
 == MERGEINDEXES
 
-The `MERGEINDEXES` action merges one or more indexes to another index. The indexes must have completed commits, and should be locked against writes until the merge is complete or the resulting merged index may become corrupted. The target core index must already exist and have a compatible schema with the one or more indexes that will be merged to it. Another commit on the target core should also be performed after the merge is complete.
+The `MERGEINDEXES` action merges one or more indexes to another index.
+The indexes must have completed commits, and should be locked against writes until the merge is complete or the resulting merged index may become corrupted.
+The target core index must already exist and have a compatible schema with the one or more indexes that will be merged to it.
+Another commit on the target core should also be performed after the merge is complete.
 
 `admin/cores?action=MERGEINDEXES&core=_new-core-name_&indexDir=_path/to/core1/data/index_&indexDir=_path/to/core2/data/index_`
 
-In this example, we use the `indexDir` parameter to define the index locations of the source cores. The `core` parameter defines the target index. A benefit of this approach is that we can merge any Lucene-based index that may not be associated with a Solr core.
+In this example, we use the `indexDir` parameter to define the index locations of the source cores.
+The `core` parameter defines the target index.
+A benefit of this approach is that we can merge any Lucene-based index that may not be associated with a Solr core.
 
 Alternatively, we can instead use a `srcCore` parameter, as in this example:
 
 `admin/cores?action=mergeindexes&core=_new-core-name_&srcCore=_core1-name_&srcCore=_core2-name_`
 
-This approach allows us to define cores that may not have an index path that is on the same physical server as the target core. However, we can only use Solr cores as the source indexes. Another benefit of this approach is that we don't have as high a risk for corruption if writes occur in parallel with the source index.
+This approach allows us to define cores that may not have an index path that is on the same physical server as the target core.
+However, we can only use Solr cores as the source indexes.
+Another benefit of this approach is that we don't have as high a risk for corruption if writes occur in parallel with the source index.
 
-We can make this call run asynchronously by specifying the `async` parameter and passing a request-id. This id can then be used to check the status of the already submitted task using the REQUESTSTATUS API.
+We can make this call run asynchronously by specifying the `async` parameter and passing a request ID.
+This ID can then be used to check the status of the already submitted task using the REQUESTSTATUS API.
 
 === MERGEINDEXES Parameters
 
 `core`::
-The name of the target core/index. This parameter is required.
+The name of the target core/index.
+This parameter is required.
 
 `indexDir`::
 Multi-valued, directories that would be merged.
@@ -350,26 +405,37 @@ Request ID to track this action which will be processed asynchronously.
 [[coreadmin-split]]
 == SPLIT
 
-The `SPLIT` action splits an index into two or more indexes. The index being split can continue to handle requests. The split pieces can be placed into a specified directory on the server's filesystem or it can be merged into running Solr cores.
+The `SPLIT` action splits an index into two or more indexes.
+The index being split can continue to handle requests.
+The split pieces can be placed into a specified directory on the server's filesystem or it can be merged into running Solr cores.
 
 The `SPLIT` action supports five parameters, which are described in the table below.
 
 === SPLIT Parameters
 
 `core`::
-The name of the core to be split. This parameter is required.
+The name of the core to be split.
+This parameter is required.
 
 `path`::
-Multi-valued, the directory path in which a piece of the index will be written. Either this parameter or `targetCore` must be specified. If this is specified, the `targetCore` parameter may not be used.
+Multi-valued, the directory path in which a piece of the index will be written.
+Either this parameter or `targetCore` must be specified.
+If this is specified, the `targetCore` parameter may not be used.
 
 `targetCore`::
-Multi-valued, the target Solr core to which a piece of the index will be merged. Either this parameter or `path` must be specified. If this is specified, the `path` parameter may not be used.
+Multi-valued, the target Solr core to which a piece of the index will be merged.
+Either this parameter or `path` must be specified.
+If this is specified, the `path` parameter may not be used.
 
 `ranges`::
-A comma-separated list of hash ranges in hexadecimal format. If this parameter is used, `split.key` should not be. See the <<SPLIT Examples>> below for an example of how this parameter can be used.
+A comma-separated list of hash ranges in hexadecimal format.
+If this parameter is used, `split.key` should not be.
+See the <<SPLIT Examples>> below for an example of how this parameter can be used.
 
 `split.key`::
-The key to be used for splitting the index. If this parameter is used, `ranges` should not be. See the <<SPLIT Examples>> below for an example of how this parameter can be used.
+The key to be used for splitting the index.
+If this parameter is used, `ranges` should not be.
+See the <<SPLIT Examples>> below for an example of how this parameter can be used.
 
 `async`::
 Request ID to track this action which will be processed asynchronously.
@@ -397,18 +463,24 @@ The `core` index will be split into two pieces and written into the two director
 [source,bash]
 http://localhost:8983/solr/admin/cores?action=SPLIT&core=core0&targetCore=core1&split.key=A!
 
-Here all documents having the same route key as the `split.key` i.e., 'A!' will be split from the `core` index and written to the `targetCore`.
+Here all documents having the same route key as the `split.key` i.e., `A!` will be split from the `core` index and written to the `targetCore`.
 
 *Usage with ranges parameter*:
 
 [source,bash]
 http://localhost:8983/solr/admin/cores?action=SPLIT&core=core0&targetCore=core1&targetCore=core2&targetCore=core3&ranges=0-1f4,1f5-3e8,3e9-5dc
 
-This example uses the `ranges` parameter with hash ranges 0-500, 501-1000 and 1001-1500 specified in hexadecimal. Here the index will be split into three pieces with each targetCore receiving documents matching the hash ranges specified i.e., core1 will get documents with hash range 0-500, core2 will receive documents with hash range 501-1000 and finally, core3 will receive documents with hash range 1001-1500. At least one hash range must be specified. Please note that using a single has [...]
+This example uses the `ranges` parameter with hash ranges 0-500, 501-1000 and 1001-1500 specified in hexadecimal.
+Here the index will be split into three pieces with each targetCore receiving documents matching the hash ranges specified i.e., core1 will get documents with hash range 0-500, core2 will receive documents with hash range 501-1000 and finally, core3 will receive documents with hash range 1001-1500.
+At least one hash range must be specified.
+Please note that using a single hash range equal to a route key's hash range is NOT equivalent to using the `split.key` parameter because multiple route keys can hash to the same range.
 
-The `targetCore` must already exist and must have a compatible schema with the `core` index. A commit is automatically called on the `core` index before it is split.
+The `targetCore` must already exist and must have a compatible schema with the `core` index.
+A commit is automatically called on the `core` index before it is split.
 
-This command is used as part of the <<shard-management.adoc#splitshard,SPLITSHARD>> command but it can be used for non-cloud Solr cores as well. When used against a non-cloud core without `split.key` parameter, this action will split the source index and distribute its documents alternately so that each split piece contains an equal number of documents. If the `split.key` parameter is specified then only documents having the same route key will be split from the source index.
+This command is used as part of SolrCloud's <<shard-management.adoc#splitshard,SPLITSHARD>> command but it can be used for cores in user-managed clusters as well.
+When used against a core in a user-managed cluster without `split.key` parameter, this action will split the source index and distribute its documents alternately so that each split piece contains an equal number of documents.
+If the `split.key` parameter is specified then only documents having the same route key will be split from the source index.
 
 [[coreadmin-requeststatus]]
 == REQUESTSTATUS
@@ -422,7 +494,8 @@ Request the status of an already submitted asynchronous CoreAdmin API call.
 The REQUESTSTATUS command has only one parameter.
 
 `requestid`::
-The user defined request-id for the asynchronous request. This parameter is required.
+The user defined request-id for the asynchronous request.
+This parameter is required.
 
 The call below will return the status of an already submitted asynchronous CoreAdmin call.
 
@@ -432,14 +505,16 @@ http://localhost:8983/solr/admin/cores?action=REQUESTSTATUS&requestid=1
 [[coreadmin-requestrecovery]]
 == REQUESTRECOVERY
 
-The `REQUESTRECOVERY` action manually asks a core to recover by synching with the leader. This should be considered an "expert" level command and should be used in situations where the node (SorlCloud replica) is unable to become active automatically.
+The `REQUESTRECOVERY` action manually asks a core to recover by synching with the leader.
+This should be considered an "expert" level command and should be used in situations where the node (SorlCloud replica) is unable to become active automatically.
 
 `admin/cores?action=REQUESTRECOVERY&core=_core-name_`
 
 === REQUESTRECOVERY Parameters
 
 `core`::
-The name of the core to re-sync. This parameter is required.
+The name of the core to re-sync.
+This parameter is required.
 
 === REQUESTRECOVERY Examples
 
diff --git a/solr/solr-ref-guide/src/css/decoration.css b/solr/solr-ref-guide/src/css/decoration.css
index ff3a3e2..b9bc194 100644
--- a/solr/solr-ref-guide/src/css/decoration.css
+++ b/solr/solr-ref-guide/src/css/decoration.css
@@ -21,11 +21,21 @@
     border-color: #CDCECF;
 }
 
-/* used for Download Solr button on docs home page */
-.btn-home {
-  color: #ffffff;
-  background-color: #F35B38;
-  border-color: #E6E7E8;
+/* used for query parser labels in query-parameters.adoc */
+
+.badge-lucene {
+  background-color: #6F8061;
+  color: white;
+}
+
+.badge-dismax {
+  background-color: #305CB3;
+  color: white;
+}
+
+.badge-edismax {
+  background-color: #FF833D;
+  color: white;
 }
 
 input
@@ -240,15 +250,39 @@ hr {
 }
 /** === END HORIZONTAL RULES === **/
 
-/** === ANCHORJS === **/
-/* This is the permalink icon after section headings */
-
-.anchorjs-link:hover {
-    color: #216f9b;
-}
-
-*:hover > .anchorjs-link {
-    transition: color .25s linear;
-    text-decoration: none;
-}
-/** === END ANCHORJS === **/
+/** === SECTION ANCHOR LINKS === **/
+
+h1 .anchor::before,
+h2 .anchor::before,
+h3 .anchor::before,
+h4 .anchor::before,
+h5 .anchor::before,
+h6 .anchor::before {
+  content: "\00a7";
+}
+
+h1 .anchor,
+h2 .anchor,
+h3 .anchor,
+h4 .anchor,
+h5 .anchor,
+h6 .anchor {
+  position: absolute;
+  text-decoration: none;
+  width: 1.75ex;
+  margin-left: -1ex;
+  visibility: hidden;
+  font-size: .8em;
+  font-weight: 400;
+  padding-top: .15em;
+}
+
+h1:hover .anchor,
+h2:hover .anchor,
+h3:hover .anchor,
+h4:hover .anchor,
+h5:hover .anchor,
+h6:hover .anchor {
+  visibility: visible;
+}
+/** === SECTION ANCHOR LINKS === **/
diff --git a/solr/solr-ref-guide/src/css/navs.css b/solr/solr-ref-guide/src/css/navs.css
index 204d52a..1326245 100644
--- a/solr/solr-ref-guide/src/css/navs.css
+++ b/solr/solr-ref-guide/src/css/navs.css
@@ -175,10 +175,10 @@ ul.nav li ul {
 }
 
 .nav li > a > span:after {
-    content: '\25be';
+    content: '\002b';
 }
 .nav li.active > a > span:after {
-    content: '\25b4';
+    content: '\2212';
 }
 
 
diff --git a/solr/solr-ref-guide/src/css/ref-guide.css b/solr/solr-ref-guide/src/css/ref-guide.css
index 9037823..7d7530c 100644
--- a/solr/solr-ref-guide/src/css/ref-guide.css
+++ b/solr/solr-ref-guide/src/css/ref-guide.css
@@ -151,16 +151,6 @@ i
   margin: 50px 10px 20px 10px;
 }
 
-/* Special rules for homepage (index.html) so rows are all the same height */
-.row.match-my-cols {
-    display: table;
-}
-
-.row.match-my-cols [class*="col-"] {
-    float: none;
-    display: table-cell;
-    vertical-align: top;
-}
 
 /* General text paragraphs */
 
@@ -314,6 +304,14 @@ dt[id]{
    margin-top: -40px
 }
 
+/* used on scaling-solr for discrete headings in the sidebar block */
+h3[id].discrete,
+h4[id].discrete {
+  padding-top: 20px;
+  padding-bottom: 10px;
+  margin-top: -20px;
+}
+
 .sidebarblock > .title small,
 h1 small,
 h2 small,
@@ -720,7 +718,11 @@ ol.lowergreek
 
 dl dd
 {
-    margin-left: 1.125em;
+    margin-left: 20px;
+}
+
+dl dd dl {
+  margin-left: 30px;
 }
 
 dl dd:last-child,
@@ -976,16 +978,33 @@ table thead tr th
 table tr td,
 table tr th
 {
-    padding: .5625em .625em;
+    padding: 9px 10px;
     color: rgba(0,0,0,.8);
     font-size: inherit;
 }
 
-table tr.alt,
+/** Used on some tables to tighten up space between cells
+    Add [.lowpadding] as a role to the table to use this **/
+.tableblock.lowpadding table tr td,
+.tableblock.lowpadding table tr th
+{
+    padding: 2px 3px;
+    color: rgba(0,0,0,.8);
+    font-size: inherit;
+}
+
+/** table tr.alt,
 table tr.even,
 table tr:nth-of-type(even)
 {
     background: #f8f8f7;
+} **/
+
+table.stripes-all tr,
+table.stripes-odd tr:nth-of-type(odd),
+table.stripes-even tr:nth-of-type(even),
+table.stripes-hover tr:hover {
+  background:#f8f8f7
 }
 
 table tbody tr td,
@@ -1050,7 +1069,7 @@ table.tableblock th > p:last-child
     margin-bottom: 0;
 }
 
-table.spread
+.stretch table.tableblock
 {
     width: 100%;
 }
@@ -1165,6 +1184,17 @@ table thead th
     font-weight: bold;
 }
 
+/* Special formatting for tables on index.adoc home page */
+.home-table table th,
+.home-table table th a {
+    color: #D9411E;
+    font-size: 1.125em;
+}
+
+.home-table table thead {
+    background-color: #fff;
+}
+
 tbody tr th
 {
     display: table-cell;
diff --git a/solr/solr-ref-guide/src/working-with-currencies-and-exchange-rates.adoc b/solr/solr-ref-guide/src/currencies-exchange-rates.adoc
similarity index 72%
rename from solr/solr-ref-guide/src/working-with-currencies-and-exchange-rates.adoc
rename to solr/solr-ref-guide/src/currencies-exchange-rates.adoc
index ba7c39a..20eaeed 100644
--- a/solr/solr-ref-guide/src/working-with-currencies-and-exchange-rates.adoc
+++ b/solr/solr-ref-guide/src/currencies-exchange-rates.adoc
@@ -1,4 +1,4 @@
-= Working with Currencies and Exchange Rates
+= Currencies and Exchange Rates
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -16,7 +16,8 @@
 // specific language governing permissions and limitations
 // under the License.
 
-The `currency` FieldType provides support for monetary values to Solr/Lucene with query-time currency conversion and exchange rates. The following features are supported:
+The `currency` FieldType provides support for monetary values to Solr/Lucene with query-time currency conversion and exchange rates.
+The following features are supported:
 
 * Point queries
 * Range queries
@@ -34,7 +35,8 @@ The `currency` FieldType provides support for monetary values to Solr/Lucene wit
 CurrencyField has been deprecated in favor of CurrencyFieldType; all configuration examples below use CurrencyFieldType.
 ====
 
-The `currency` field type is defined in `schema.xml`. This is the default configuration of this type.
+The `currency` field type is defined in `schema.xml`.
+This is the default configuration of this type.
 
 [source,xml]
 ----
@@ -43,7 +45,10 @@ The `currency` field type is defined in `schema.xml`. This is the default config
            defaultCurrency="USD" currencyConfig="currency.xml" />
 ----
 
-In this example, we have defined the name and class of the field type, and defined the `defaultCurrency` as "USD", for U.S. Dollars. We have also defined a `currencyConfig` to use a file called "currency.xml". This is a file of exchange rates between our default currency to other currencies. There is an alternate implementation that would allow regular downloading of currency data. See <<Exchange Rates>> below for more.
+In this example, we have defined the name and class of the field type, and defined the `defaultCurrency` as "USD", for U.S. Dollars.
+We have also defined a `currencyConfig` to use a file called "currency.xml". This is a file of exchange rates between our default currency to other currencies.
+There is an alternate implementation that would allow regular downloading of currency data.
+See <<Exchange Rates>> below for more.
 
 Many of the example schemas that ship with Solr include a <<dynamic-fields.adoc#,dynamic field>> that uses this type, such as this example:
 
@@ -54,7 +59,9 @@ Many of the example schemas that ship with Solr include a <<dynamic-fields.adoc#
 
 This dynamic field would match any field that ends in `_c` and make it a currency typed field.
 
-At indexing time, money fields can be indexed in a native currency. For example, if a product on an e-commerce site is listed in Euros, indexing the price field as "1000,EUR" will index it appropriately. The price should be separated from the currency by a comma, and the price must be encoded with a floating point value (a decimal point).
+At indexing time, money fields can be indexed in a native currency.
+For example, if a product on an e-commerce site is listed in Euros, indexing the price field as "1000,EUR" will index it appropriately.
+The price should be separated from the currency by a comma, and the price must be encoded with a floating point value (a decimal point).
 
 During query processing, range and point queries are both supported.
 
@@ -69,23 +76,28 @@ You must specify parameters `amountLongSuffix` and `codeStrSuffix`, correspondin
            defaultCurrency="USD" currencyConfig="currency.xml" />
 ----
 
-In the above example, the raw amount field will use the `"*_l_ns"` dynamic field, which must exist in the schema and use a long field type, i.e., one that extends `LongValueFieldType`.  The currency code field will use the `"*_s_ns"` dynamic field, which must exist in the schema and use a string field type, i.e., one that is or extends `StrField`.
+In the above example, the raw amount field will use the `"*_l_ns"` dynamic field, which must exist in the schema and use a long field type, i.e., one that extends `LongValueFieldType`.
+The currency code field will use the `"*_s_ns"` dynamic field, which must exist in the schema and use a string field type, i.e., one that is or extends `StrField`.
 
 .Atomic Updates won't work if dynamic sub-fields are stored
 [NOTE]
 ====
-As noted on <<updating-parts-of-documents.adoc#field-storage,Updating Parts of Documents>>, stored dynamic sub-fields will cause indexing to fail when you use Atomic Updates. To avoid this problem, specify `stored="false"` on those dynamic fields.
+As noted in <<partial-document-updates.adoc#field-storage,Atomic Update Field Storage>>, stored dynamic sub-fields will cause indexing to fail when you use Atomic Updates.
+To avoid this problem, specify `stored="false"` on those dynamic fields.
 ====
 
 == Exchange Rates
 
-You configure exchange rates by specifying a provider. Natively, two provider types are supported: `FileExchangeRateProvider` or `OpenExchangeRatesOrgProvider`.
+You configure exchange rates by specifying a provider.
+Natively, two provider types are supported: `FileExchangeRateProvider` or `OpenExchangeRatesOrgProvider`.
 
 === FileExchangeRateProvider
 
-This provider requires you to provide a file of exchange rates. It is the default, meaning that to use this provider you only need to specify the file path and name as a value for `currencyConfig` in the definition for this type.
+This provider requires you to provide a file of exchange rates.
+It is the default, meaning that to use this provider you only need to specify the file path and name as a value for `currencyConfig` in the definition for this type.
 
-There is a sample `currency.xml` file included with Solr, found in the same directory as the `schema.xml` file. Here is a small snippet from this file:
+There is a sample `currency.xml` file included with Solr, found in the same directory as the `schema.xml` file.
+Here is a small snippet from this file:
 
 [source,xml]
 ----
@@ -110,9 +122,11 @@ There is a sample `currency.xml` file included with Solr, found in the same dire
 
 === OpenExchangeRatesOrgProvider
 
-You can configure Solr to download exchange rates from http://www.OpenExchangeRates.Org[OpenExchangeRates.Org], with updates rates between USD and 170 currencies hourly. These rates are symmetrical only.
+You can configure Solr to download exchange rates from http://www.OpenExchangeRates.Org[OpenExchangeRates.Org], with updates rates between USD and 170 currencies hourly.
+These rates are symmetrical only.
 
-In this case, you need to specify the `providerClass` in the definitions for the field type and sign up for an API key. Here is an example:
+In this case, you need to specify the `providerClass` in the definitions for the field type and sign up for an API key.
+Here is an example:
 
 [source,xml]
 ----
@@ -123,4 +137,5 @@ In this case, you need to specify the `providerClass` in the definitions for the
            ratesFileLocation="http://www.openexchangerates.org/api/latest.json?app_id=yourPersonalAppIdKey"/>
 ----
 
-The `refreshInterval` is minutes, so the above example will download the newest rates every 60 minutes. The refresh interval may be increased, but not decreased.
+The `refreshInterval` is minutes, so the above example will download the newest rates every 60 minutes.
+The refresh interval may be increased, but not decreased.
diff --git a/solr/solr-ref-guide/src/working-with-dates.adoc b/solr/solr-ref-guide/src/date-formatting-math.adoc
similarity index 70%
rename from solr/solr-ref-guide/src/working-with-dates.adoc
rename to solr/solr-ref-guide/src/date-formatting-math.adoc
index 52d914b..8bd66c2 100644
--- a/solr/solr-ref-guide/src/working-with-dates.adoc
+++ b/solr/solr-ref-guide/src/date-formatting-math.adoc
@@ -1,4 +1,4 @@
-= Working with Dates
+= Date Formatting and Date Math
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -18,7 +18,8 @@
 
 == Date Formatting
 
-Solr's date fields (`DatePointField`, `DateRangeField` and the deprecated `TrieDateField`) represent "dates" as a point in time with millisecond precision. The format used is a restricted form of the canonical representation of dateTime in the http://www.w3.org/TR/xmlschema-2/#dateTime[XML Schema specification] – a restricted subset of https://en.wikipedia.org/wiki/ISO_8601[ISO-8601]. For those familiar with Java date handling, Solr uses {java-javadocs}java/time/format/DateTimeFormatter. [...]
+Solr's date fields (`DatePointField`, `DateRangeField` and the deprecated `TrieDateField`) represent "dates" as a point in time with millisecond precision.
+The format used is a restricted form of the canonical representation of dateTime in the http://www.w3.org/TR/xmlschema-2/#dateTime[XML Schema specification] – a restricted subset of https://en.wikipedia.org/wiki/ISO_8601[ISO-8601]. For those familiar with Java date handling, Solr uses {java-javadocs}java/time/format/DateTimeFormatter.html#ISO_INSTANT[DateTimeFormatter.ISO_INSTANT] for formatting, and parsing too with "leniency".
 
 `YYYY-MM-DDThh:mm:ssZ`
 
@@ -34,7 +35,8 @@ Note that no time zone can be specified; the String representations of dates is
 
 `1972-05-20T17:33:18Z`
 
-You can optionally include fractional seconds if you wish, although any precision beyond milliseconds will be ignored. Here are example values with sub-seconds:
+You can optionally include fractional seconds if you wish, although any precision beyond milliseconds will be ignored.
+Here are example values with sub-seconds:
 
 * `1972-05-20T17:33:18.772Z`
 * `1972-05-20T17:33:18.77Z`
@@ -45,7 +47,8 @@ There must be a leading `'-'` for dates prior to year 0000, and Solr will format
 .Query escaping may be required
 [WARNING]
 ====
-As you can see, the date format includes colon characters separating the hours, minutes, and seconds. Because the colon is a special character to Solr's most common query parsers, escaping is sometimes required, depending on exactly what you are trying to do.
+As you can see, the date format includes colon characters separating the hours, minutes, and seconds.
+Because the colon is a special character to Solr's most common query parsers, escaping is sometimes required, depending on exactly what you are trying to do.
 
 This is normally an invalid query: `datefield:1972-05-20T17:33:18.772Z`
 
@@ -57,17 +60,22 @@ These are valid queries: +
 
 === Date Range Formatting
 
-Solr's `DateRangeField` supports the same point in time date syntax described above (with _date math_ described below) and more to express date ranges. One class of examples is truncated dates, which represent the entire date span to the precision indicated. The other class uses the range syntax (`[ TO ]`). Here are some examples:
+Solr's `DateRangeField` supports the same point in time date syntax described above (with _date math_ described below) and more to express date ranges.
+One class of examples is truncated dates, which represent the entire date span to the precision indicated.
+The other class uses the range syntax (`[ TO ]`). Here are some examples:
 
 * `2000-11` – The entire month of November, 2000.
 * `1605-11-05` – The Fifth of November.
 * `2000-11-05T13` – Likewise but for an hour of the day (1300 to before 1400, i.e., 1pm to 2pm).
-* `-0009` – The year 10 BC. A 0 in the year position is 0 AD, and is also considered 1 BC.
+* `-0009` – The year 10 BC.
+A 0 in the year position is 0 AD, and is also considered 1 BC.
 * `[2000-11-01 TO 2014-12-01]` – The specified date range at a day resolution.
 * `[2014 TO 2014-12-01]` – From the start of 2014 till the end of the first day of December.
 * `[* TO 2014-12-01]` – From the earliest representable time thru till the end of the day on 2014-12-01.
 
-Limitations: The range syntax doesn't support embedded date math. If you specify a date instance supported by DatePointField with date math truncating it, like `NOW/DAY`, you still get the first millisecond of that day, not the entire day's range. Exclusive ranges (using `{` & `}`) work in _queries_ but not for _indexing_ ranges.
+Limitations: The range syntax doesn't support embedded date math.
+If you specify a date instance supported by DatePointField with date math truncating it, like `NOW/DAY`, you still get the first millisecond of that day, not the entire day's range.
+Exclusive ranges (using `{` & `}`) work in _queries_ but not for _indexing_ ranges.
 
 == Date Math
 
@@ -75,7 +83,8 @@ Solr's date field types also supports _date math_ expressions, which makes it ea
 
 === Date Math Syntax
 
-Date math expressions consist either adding some quantity of time in a specified unit, or rounding the current time by a specified unit. expressions can be chained and are evaluated left to right.
+Date math expressions consist either adding some quantity of time in a specified unit, or rounding the current time by a specified unit.
+Expressions can be chained and are evaluated left to right.
 
 For example: this represents a point in time two months from now:
 
@@ -85,7 +94,8 @@ This is one day ago:
 
 `NOW-1DAY`
 
-A slash is used to indicate rounding. This represents the beginning of the current hour:
+A slash is used to indicate rounding.
+This represents the beginning of the current hour:
 
 `NOW/HOUR`
 
@@ -101,7 +111,8 @@ Note that while date math is most commonly used relative to `NOW` it can be appl
 
 ==== NOW
 
-The `NOW` parameter is used internally by Solr to ensure consistent date math expression parsing across multiple nodes in a distributed request. But it can be specified to instruct Solr to use an arbitrary moment in time (past or future) to override for all situations where the the special value of "```NOW```" would impact date math expressions.
+The `NOW` parameter is used internally by Solr to ensure consistent date math expression parsing across multiple nodes in a distributed request.
+But it can be specified to instruct Solr to use an arbitrary moment in time (past or future) to override for all situations where the the special value of "```NOW```" would impact date math expressions.
 
 It must be specified as a (long valued) milliseconds since epoch.
 
@@ -153,9 +164,14 @@ http://localhost:8983/solr/my_collection/select?q=*:*&facet.range=my_date_field&
 
 == More DateRangeField Details
 
-`DateRangeField` is almost a drop-in replacement for places where `DatePointField` is used. The only difference is that Solr's XML or SolrJ response formats will expose the stored data as a String instead of a Date. The underlying index data for this field will be a bit larger. Queries that align to units of time a second on up should be faster than TrieDateField, especially if it's in UTC.
+`DateRangeField` is almost a drop-in replacement for places where `DatePointField` is used.
+The only difference is that Solr's XML or SolrJ response formats will expose the stored data as a String instead of a Date.
+The underlying index data for this field will be a bit larger.
+Queries that align to units of time a second on up should be faster than TrieDateField, especially if it's in UTC.
 
-The main point of `DateRangeField`, as its name suggests, is to allow indexing date ranges. To do that, simply supply strings in the format shown above. It also supports specifying 3 different relational predicates between the indexed data, and the query range:
+The main point of `DateRangeField`, as its name suggests, is to allow indexing date ranges.
+To do that, simply supply strings in the format shown above.
+It also supports specifying 3 different relational predicates between the indexed data, and the query range:
 
 * `Intersects` (default)
 * `Contains`
@@ -168,6 +184,7 @@ You can specify the predicate by querying using the `op` local-params parameter
 fq={!field f=dateRange op=Contains}[2013 TO 2018]
 ----
 
-Unlike most local parameters, `op` is actually _not_ defined by any query parser (`field`), it is defined by the field type, in this case `DateRangeField`. In the above example, it would find documents with indexed ranges that _contain_ (or equals) the range 2013 thru 2018. Multi-valued overlapping indexed ranges in a document are effectively coalesced.
+Unlike most local params, `op` is actually _not_ defined by any query parser (`field`), it is defined by the field type, in this case `DateRangeField`.
+In the above example, it would find documents with indexed ranges that _contain_ (or equals) the range 2013 thru 2018. Multi-valued overlapping indexed ranges in a document are effectively coalesced.
 
 For a DateRangeField example use-case, see https://cwiki.apache.org/confluence/display/solr/DateRangeField[see Solr's community wiki].
diff --git a/solr/solr-ref-guide/src/de-duplication.adoc b/solr/solr-ref-guide/src/de-duplication.adoc
index 133cb78..893c0c7 100644
--- a/solr/solr-ref-guide/src/de-duplication.adoc
+++ b/solr/solr-ref-guide/src/de-duplication.adoc
@@ -18,11 +18,15 @@
 
 If duplicate, or near-duplicate documents are a concern in your index, de-duplication may be worth implementing.
 
-Preventing duplicate or near duplicate documents from entering an index or tagging documents with a signature/fingerprint for duplicate field collapsing can be efficiently achieved with a low collision or fuzzy hash algorithm. Solr natively supports de-duplication techniques of this type via the `Signature` class and allows for the easy addition of new hash/signature implementations. A Signature can be implemented in a few ways:
+Preventing duplicate or near duplicate documents from entering an index or tagging documents with a signature/fingerprint for duplicate field collapsing can be efficiently achieved with a low collision or fuzzy hash algorithm.
+Solr natively supports de-duplication techniques of this type via the `Signature` class and allows for the easy addition of new hash/signature implementations.
+A Signature can be implemented in a few ways:
 
 * MD5Signature: 128-bit hash used for exact duplicate detection.
-* Lookup3Signature: 64-bit hash used for exact duplicate detection. This is much faster than MD5 and smaller to index.
-* https://cwiki.apache.org/confluence/display/solr/TextProfileSignature[TextProfileSignature]: Fuzzy hashing implementation from Apache Nutch for near duplicate detection. It's tunable but works best on longer text.
+* Lookup3Signature: 64-bit hash used for exact duplicate detection.
+This is much faster than MD5 and smaller to index.
+* https://cwiki.apache.org/confluence/display/solr/TextProfileSignature[TextProfileSignature]: Fuzzy hashing implementation from Apache Nutch for near duplicate detection.
+It's tunable but works best on longer text.
 
 Other, more sophisticated algorithms for fuzzy/near hashing can be added later.
 
@@ -30,7 +34,8 @@ Other, more sophisticated algorithms for fuzzy/near hashing can be added later.
 ====
 Adding in the de-duplication process will change the `allowDups` setting so that it applies to an update term (with `signatureField` in this case) rather than the unique field Term.
 
-Of course the `signatureField` could be the unique field, but generally you want the unique field to be unique. When a document is added, a signature will automatically be generated and attached to the document in the specified `signatureField`.
+Of course the `signatureField` could be the unique field, but generally you want the unique field to be unique.
+When a document is added, a signature will automatically be generated and attached to the document in the specified `signatureField`.
 ====
 
 == Configuration Options
@@ -57,25 +62,31 @@ The `SignatureUpdateProcessorFactory` has to be registered in `solrconfig.xml` a
 The `SignatureUpdateProcessorFactory` takes several properties:
 
 `signatureClass`::
-A Signature implementation for generating a signature hash. The default is `org.apache.solr.update.processor.Lookup3Signature`.
+A Signature implementation for generating a signature hash.
+The default is `org.apache.solr.update.processor.Lookup3Signature`.
 +
-The full classpath of the implementation must be specified. The available options are described above, the associated classpaths to use are:
+The full classpath of the implementation must be specified.
+The available options are described above, the associated classpaths to use are:
 
 * `org.apache.solr.update.processor.Lookup3Signature`
 * `org.apache.solr.update.processor.MD5Signature`
 * `org.apache.solr.update.process.TextProfileSignature`
 
 `fields`::
-The fields to use to generate the signature hash in a comma separated list. By default, all fields on the document will be used.
+The fields to use to generate the signature hash in a comma separated list.
+By default, all fields on the document will be used.
 
 `signatureField`::
-The name of the field used to hold the fingerprint/signature. The field should be defined in `schema.xml`. The default is `signatureField`.
+The name of the field used to hold the fingerprint/signature.
+The field should be defined in `schema.xml`. The default is `signatureField`.
 
 `enabled`::
-Set to *false* to disable de-duplication processing. The default is *true*.
+Set to *false* to disable de-duplication processing.
+The default is *true*.
 
 `overwriteDupes`::
-If *true*, the default, when a document exists that already matches this signature, it will be overwritten.  If you are using `overwriteDupes=true` the `signatureField` must be `indexed="true"` in your Schema.
+If *true*, the default, when a document exists that already matches this signature, it will be overwritten.
+If you are using `overwriteDupes=true` the `signatureField` must be `indexed="true"` in your Schema.
 
 .Using `SignatureUpdateProcessorFactory` in SolrCloud
 [WARNING]
@@ -84,7 +95,8 @@ If *true*, the default, when a document exists that already matches this signatu
 
 There are 2 important things to keep in mind when using `SignatureUpdateProcessorFactory` with SolrCloud:
 
-. The `overwriteDupes=true` setting does not work _except_ in the special case of using the uniqueKey field as the `signatureField`.  Attempting De-duplication on any other `signatureField` will not work correctly because of how updates are forwarded to replicas
+. The `overwriteDupes=true` setting does not work _except_ in the special case of using the uniqueKey field as the `signatureField`.
+Attempting De-duplication on any other `signatureField` will not work correctly because of how updates are forwarded to replicas
 . When using the uniqueKey field as the `signatureField`, `SignatureUpdateProcessorFactory` must be run prior to the `<<update-request-processors.adoc#update-processors-in-solrcloud,DistributedUpdateProcessor>>` to ensure that documents can be routed to the correct shard leader based on the (generated) uniqueKey field.
 
 (Using any other `signatureField` with `overwriteDupes=false` -- to generate a Signature for each document with out De-duplication -- has no limitations.)
diff --git a/solr/solr-ref-guide/src/defining-core-properties.adoc b/solr/solr-ref-guide/src/defining-core-properties.adoc
deleted file mode 100644
index 9e9a611..0000000
--- a/solr/solr-ref-guide/src/defining-core-properties.adoc
+++ /dev/null
@@ -1,95 +0,0 @@
-= Defining core.properties
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-Core discovery means that creating a core is as simple as a `core.properties` file located on disk.
-
-The `core.properties` file is a simple Java Properties file where each line is just a key=value pair, e.g., `name=core1`. Notice that no quotes are required.
-
-A minimal `core.properties` file looks like the example below. However, it can also be empty, see information on placement of `core.properties` below.
-
-[source,bash]
-----
-name=my_core_name
-----
-
-== Placement of core.properties
-
-Solr cores are configured by placing a file named `core.properties` in a sub-directory under `solr.home`. There are no a-priori limits to the depth of the tree, nor are there limits to the number of cores that can be defined. Cores may be anywhere in the tree with the exception that cores may _not_ be defined under an existing core. That is, the following is not allowed:
-
-[source,text]
-----
-./cores/core1/core.properties
-./cores/core1/coremore/core5/core.properties
-----
-
-In this example, the enumeration will stop at "core1".
-
-The following is legal:
-
-[source,text]
-----
-./cores/somecores/core1/core.properties
-./cores/somecores/core2/core.properties
-./cores/othercores/core3/core.properties
-./cores/extracores/deepertree/core4/core.properties
-----
-
-It is possible to segment Solr into multiple cores, each with its own configuration and indices. Cores may be dedicated to a single application or to very different ones, but all are administered through a common administration interface. You can create new Solr cores on the fly, shutdown cores, even replace one running core with another, all without ever stopping or restarting Solr.
-
-Your `core.properties` file can be empty if necessary. Suppose `core.properties` is located in `./cores/core1` (relative to `solr_home`) but is empty. In this case, the core name is assumed to be "core1". The instanceDir will be the folder containing `core.properties` (i.e., `./cores/core1`). The dataDir will be `../cores/core1/data`, etc.
-
-[NOTE]
-====
-You can run Solr without configuring any cores.
-====
-
-== Defining core.properties Files
-
-The minimal `core.properties` file is an empty file, in which case all of the properties are defaulted appropriately.
-
-Java properties files allow the hash (`#`) or bang (`!`) characters to specify comment-to-end-of-line.
-
-The following properties are available:
-
-`name`:: The name of the SolrCore. You'll use this name to reference the SolrCore when running commands with the CoreAdminHandler.
-
-`config`:: The configuration file name for a given core. The default is `solrconfig.xml`.
-
-`schema`:: The schema file name for a given core. The default is `schema.xml` but please note that if you are using a "managed schema" (the default behavior) then any value for this property which does not match the effective `managedSchemaResourceName` will be read once, backed up, and converted for managed schema use. See <<schema-factory-definition-in-solrconfig.adoc#,Schema Factory Definition in SolrConfig>> for more details.
-
-`dataDir`:: The core's data directory (where indexes are stored) as either an absolute pathname, or a path relative to the value of `instanceDir`. This is `data` by default.
-
-`configSet`:: The name of a defined configset, if desired, to use to configure the core (see the section  <<config-sets.adoc#,Configsets>> for more details).
-
-`properties`:: The name of the properties file for this core. The value can be an absolute pathname or a path relative to the value of `instanceDir`.
-
-`transient`:: If *true*, the core can be unloaded if Solr reaches the `transientCacheSize`. The default if not specified is *false*. Cores are unloaded in order of least recently used first. _Setting this to *true* is not recommended in SolrCloud mode._
-
-`loadOnStartup`:: If *true*, the default if it is not specified, the core will loaded when Solr starts. _Setting this to *false* is not recommended in SolrCloud mode._
-
-`coreNodeName`:: Used only in SolrCloud, this is a unique identifier for the node hosting this replica. By default a `coreNodeName` is generated automatically, but setting this attribute explicitly allows you to manually assign a new core to replace an existing replica. For example, this can be useful when replacing a machine that has had a hardware failure by restoring from backups on a new machine with a new hostname or port.
-
-`ulogDir`:: The absolute or relative directory for the update log for this core (SolrCloud).
-
-`shard`:: The shard to assign this core to (SolrCloud).
-
-`collection`:: The name of the collection this core is part of (SolrCloud).
-
-`roles`:: Future parameter for SolrCloud or a way for users to mark nodes for their own use.
-
-Additional user-defined properties may be specified for use as variables. For more information on how to define local properties, see the section <<configuring-solrconfig-xml.adoc#substituting-properties-in-solr-config-files,Substituting Properties in Solr Config Files>>.
diff --git a/solr/solr-ref-guide/src/deployment-and-operations.adoc b/solr/solr-ref-guide/src/deployment-and-operations.adoc
deleted file mode 100644
index df96c05..0000000
--- a/solr/solr-ref-guide/src/deployment-and-operations.adoc
+++ /dev/null
@@ -1,40 +0,0 @@
-= Deployment and Operations
-:page-children: solr-control-script-reference, solr-configuration-files, taking-solr-to-production, making-and-restoring-backups, running-solr-in-docker, running-solr-on-hdfs, aws-solrcloud-tutorial, upgrading-a-solr-cluster, solr-upgrade-notes
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-An important aspect of Solr is that all operations and deployment can be done online, with minimal or no impact to running applications. This includes minor upgrades and provisioning and removing nodes, backing up and restoring indexes and editing configurations
-
-Common administrative tasks include:
-
-<<solr-control-script-reference.adoc#,Solr Control Script Reference>>: This section provides information about all of the options available to the `bin/solr` / `bin\solr.cmd` scripts, which can start and stop Solr, configure authentication, and create or remove collections and cores.
-
-<<solr-configuration-files.adoc#,Solr Configuration Files>>: Overview of the installation layout and major configuration files.
-
-<<taking-solr-to-production.adoc#,Taking Solr to Production>>: Detailed steps to help you install Solr as a service and take your application to production.
-
-<<making-and-restoring-backups.adoc#,Making and Restoring Backups>>: Describes backup strategies for your Solr indexes.
-
-<<running-solr-in-docker.adoc#,Running Solr in Docker>>: How to use the official Solr Docker image.
-
-<<running-solr-on-hdfs.adoc#,Running Solr on HDFS>>: How to use HDFS to store your Solr indexes and transaction logs.
-
-<<aws-solrcloud-tutorial.adoc#,SolrCloud on AWS EC2>>: A tutorial on deploying Solr in Amazon Web Services (AWS) using EC2 instances.
-
-<<upgrading-a-solr-cluster.adoc#,Upgrading a Solr Cluster>>: Information for upgrading a production SolrCloud cluster.
-
-<<solr-upgrade-notes.adoc#,Solr Upgrade Notes>>: Information about changes made in Solr releases.
diff --git a/solr/solr-ref-guide/src/deployment-guide.adoc b/solr/solr-ref-guide/src/deployment-guide.adoc
new file mode 100644
index 0000000..3fcfcf8
--- /dev/null
+++ b/solr/solr-ref-guide/src/deployment-guide.adoc
@@ -0,0 +1,100 @@
+= Deployment Guide
+:page-children: solr-control-script-reference, \
+    installation-deployment, \
+    scaling-solr, \
+    monitoring-solr, \
+    securing-solr, \
+    client-apis
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+[.lead]
+The Deployment Guide covers installation, upgrades, deployments, monitoring, and client integrations.
+
+****
+[discrete]
+=== Solr CLI
+
+<<solr-control-script-reference.adoc#,*Solr Control Script Reference*>>: The options available to the `bin/solr` or `bin\solr.cmd` scripts.
+****
+
+****
+[discrete]
+=== Installation & Deployment
+
+<<installation-deployment.adoc#,*Installation and Deployment*>>
+
+// This pulls the sub-section list from the child page to reduce errors
+include::installation-deployment.adoc[tag=install-sections]
+****
+
+****
+[discrete]
+=== Scaling Solr
+
+<<scaling-solr.adoc#,*Scaling Solr*>>
+
+// This pulls the sub-section list from the child page to reduce errors
+include::scaling-solr.adoc[tag=scaling-sections]
+****
+
+****
+[discrete]
+=== Monitoring Solr
+
+<<monitoring-solr.adoc#,*Monitoring Solr*>>
+
+// This pulls the sub-section list from the child page to reduce errors
+include::monitoring-solr.adoc[tag=monitoring-sections]
+****
+
+****
+[discrete]
+=== Securing Solr
+
+<<securing-solr.adoc#,*Securing Solr*>>
+
+Authentication Plugins:
+
+// This pulls the sub-section list from the child page to reduce errors
+include::securing-solr.adoc[tag=list-of-authentication-plugins]
+
+Authorization Plugins:
+
+// This pulls the sub-section list from the child page to reduce errors
+include::securing-solr.adoc[tag=list-of-authorization-plugins]
+
+Audit Logging and SSL:
+
+[width=100%,cols="1,1",frame=none,grid=none,stripes=none]
+|===
+| <<audit-logging.adoc#,Audit Logging>>
+| <<enabling-ssl.adoc#,Enabling SSL>>
+| <<zookeeper-access-control.adoc#,ZooKeeper Access Control>>
+|
+|===
+****
+
+****
+[discrete]
+=== Client APIs
+
+<<client-apis.adoc#,*Client APIs*>>: Access Solr through various client APIs, including JavaScript, JSON, and Ruby.
+
+// This pulls the sub-section list from the child page to reduce errors
+include::client-apis.adoc[tag=client-sections]
+****
diff --git a/solr/solr-ref-guide/src/the-dismax-query-parser.adoc b/solr/solr-ref-guide/src/dismax-query-parser.adoc
similarity index 93%
rename from solr/solr-ref-guide/src/the-dismax-query-parser.adoc
rename to solr/solr-ref-guide/src/dismax-query-parser.adoc
index ce20bfc..46c0c89 100644
--- a/solr/solr-ref-guide/src/the-dismax-query-parser.adoc
+++ b/solr/solr-ref-guide/src/dismax-query-parser.adoc
@@ -1,4 +1,4 @@
-= The DisMax Query Parser
+= DisMax Query Parser
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -60,13 +60,14 @@ assigns `fieldOne` a boost of 2.3, leaves `fieldTwo` with the default boost (bec
 
 === mm (Minimum Should Match) Parameter
 
-When processing queries, Lucene/Solr recognizes three types of clauses: mandatory, prohibited, and "optional" (also known as "should" clauses). By default, all words or phrases specified in the `q` parameter are treated as "optional" clauses unless they are preceded by a "+" or a "-". When dealing with these "optional" clauses, the `mm` parameter makes it possible to say that a certain minimum number of those clauses must match. The DisMax query parser offers great flexibility in how the [...]
+When processing queries, Lucene/Solr recognizes three types of clauses: mandatory, prohibited, and "optional" (also known as "should" clauses).
+By default, all words or phrases specified in the `q` parameter are treated as "optional" clauses unless they are preceded by a "+" or a "-".
+When dealing with these "optional" clauses, the `mm` parameter makes it possible to say that a certain minimum number of those clauses must match.
+The DisMax query parser offers great flexibility in how the minimum number can be specified.
 
 The table below explains the various ways that mm values can be specified.
 
-// TODO: Change column width to %autowidth.spread when https://github.com/asciidoctor/asciidoctor-pdf/issues/599 is fixed
-
-[cols="30,10,60",options="header"]
+[%autowidth.stretch,options="header"]
 |===
 |Syntax |Example |Description
 |Positive integer |3 |Defines the minimum number of clauses that must match, regardless of how many clauses there are in total.
@@ -150,13 +151,13 @@ Generally speaking, using `bq` (or `bf`, below) is considered a poor way to "boo
 
 "Multiplicative Boosting" is generally considered to be a more predictable method of influencing document score, because it acts as a "scaling factor" -- increasing (or decreasing) the scores of each document by a _relative_ amount.
 
-The <<other-parsers.adoc#boost-query-parser,`{!boost}` QParser>> provides a convenient wrapper for implementing multiplicative boosting, and the <<the-extended-dismax-query-parser.adoc#extended-dismax-parameters,`{!edismax}` QParser>> offers a `boost` query parameter shortcut for using it.
+The <<other-parsers.adoc#boost-query-parser,`{!boost}` QParser>> provides a convenient wrapper for implementing multiplicative boosting, and the <<edismax-query-parser.adoc#extended-dismax-parameters,`{!edismax}` QParser>> offers a `boost` query parameter shortcut for using it.
 ====
 
 
 === bf (Boost Functions) Parameter
 
-The `bf` parameter specifies functions (with optional <<the-standard-query-parser.adoc#boosting-a-term-with,query boost>>) that will be used to construct FunctionQueries which will be _added_ to the user's main query as optional clauses that will influence the score. Any <<function-queries.adoc#available-functions,function supported natively by Solr>> can be used, along with a boost value. For example:
+The `bf` parameter specifies functions (with optional <<standard-query-parser.adoc#boosting-a-term-with,query boost>>) that will be used to construct FunctionQueries which will be _added_ to the user's main query as optional clauses that will influence the score. Any <<function-queries.adoc#available-functions,function supported natively by Solr>> can be used, along with a boost value. For example:
 
 [source,text]
 ----
diff --git a/solr/solr-ref-guide/src/distributed-requests.adoc b/solr/solr-ref-guide/src/distributed-requests.adoc
deleted file mode 100644
index 894bf19..0000000
--- a/solr/solr-ref-guide/src/distributed-requests.adoc
+++ /dev/null
@@ -1,216 +0,0 @@
-= Distributed Requests
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-When a Solr node receives a search request, the request is routed behind the scenes to a replica of a shard that is part of the collection being searched.
-
-The chosen replica acts as an aggregator: it creates internal requests to randomly chosen replicas of every shard in the collection, coordinates the responses, issues any subsequent internal requests as needed (for example, to refine facets values, or request additional stored fields), and constructs the final response for the client.
-
-== Limiting Which Shards are Queried
-
-While one of the advantages of using SolrCloud is the ability to query very large collections distributed across various shards, in some cases you may have configured Solr so you know <<shards-and-indexing-data-in-solrcloud.adoc#document-routing,you are only interested in results from a specific subset of shards>>. You have the option of searching over all of your data or just parts of it.
-
-A query across all shards for a collection is simply a query that does not define a `shards` parameter:
-
-[source,text]
-----
-http://localhost:8983/solr/gettingstarted/select?q=*:*
-----
-
-If you want to search just one shard, use the `shards` parameter to specify the shard by its logical ID, as in:
-
-[source,text]
-----
-http://localhost:8983/solr/gettingstarted/select?q=*:*&shards=shard1
-----
-
-If you want to search a group of shards, you can specify each shard separated by a comma in one request:
-
-[source,text]
-----
-http://localhost:8983/solr/gettingstarted/select?q=*:*&shards=shard1,shard2
-----
-
-In both of the above examples, while only the specific shards are queried, any random replica of the shard will get the request.
-
-Alternatively, you can specify a list of replicas you wish to use in place of a shard IDs by separating the replica IDs with commas:
-
-[source,text]
-----
-http://localhost:8983/solr/gettingstarted/select?q=*:*&shards=localhost:7574/solr/gettingstarted,localhost:8983/solr/gettingstarted
-----
-
-Or you can specify a list of replicas to choose from for a single shard (for load balancing purposes) by using the pipe symbol (|) between different replica IDs:
-
-[source,text]
-----
-http://localhost:8983/solr/gettingstarted/select?q=*:*&shards=localhost:7574/solr/gettingstarted|localhost:7500/solr/gettingstarted
-----
-
-Finally, you can specify a list of shards (separated by commas) each defined by a list of replicas (seperated by pipes).
-
-In the following example, 2 shards are queried, the first being a random replica from shard1, the second being a random replica from the explicit pipe delimited list:
-
-[source,text]
-----
-http://localhost:8983/solr/gettingstarted/select?q=*:*&shards=shard1,localhost:7574/solr/gettingstarted|localhost:7500/solr/gettingstarted
-----
-
-== Configuring the ShardHandlerFactory
-
-For finer-grained control, you can directly configure and tune aspects of the concurrency and thread-pooling used within distributed search in Solr. The default configuration favors throughput over latency.
-
-This is done by defining a `shardHandlerFactory` in the configuration for your search handler.
-
-To add a `shardHandlerFactory` to the standard search handler, provide a configuration in `solrconfig.xml`, as in this example:
-
-[source,xml]
-----
-<requestHandler name="/select" class="solr.SearchHandler">
-  <!-- other params go here -->
-  <shardHandlerFactory class="HttpShardHandlerFactory">
-    <int name="socketTimeout">1000</int>
-    <int name="connTimeout">5000</int>
-  </shardHandlerFactory>
-</requestHandler>
-----
-
-`HttpShardHandlerFactory` is the only `ShardHandlerFactory` implementation included out of the box with Solr, It accepts the following parameters:
-
-`socketTimeout`::
-The amount of time in ms that a socket is allowed to wait. The default is `0`, where the operating system's default will be used.
-
-`connTimeout`::
-The amount of time in ms that is accepted for binding / connecting a socket. The default is `0`, where the operating system's default will be used.
-
-`maxConnectionsPerHost`::
-The maximum number of concurrent connections that is made to each individual shard in a distributed search. The default is `100000`.
-
-`corePoolSize`::
-The retained lowest limit on the number of threads used in coordinating distributed search. The default is `0`.
-
-`maximumPoolSize`::
-The maximum number of threads used for coordinating distributed search. The default is `Integer.MAX_VALUE`.
-
-`maxThreadIdleTime`::
-The amount of time in seconds to wait for before threads are scaled back in response to a reduction in load. The default is `5`.
-
-`sizeOfQueue`::
-If specified, the thread pool will use a backing queue instead of a direct handoff buffer. High throughput systems will want to configure this to be a direct hand off (with `-1`). Systems that desire better latency will want to configure a reasonable size of queue to handle variations in requests. The default is `-1`.
-
-`fairnessPolicy`::
-Chooses the JVM specifics dealing with fair policy queuing, if enabled distributed searches will be handled in a First in First out fashion at a cost to throughput. If disabled throughput will be favored over latency. The default is `false`.
-
-In addition, `HttpShardHandlerFactory` also depends on the following top-level property:
-
-`allowUrls`::
-See <<format-of-solr-xml.adoc#_allow_urls, Format of solr.allowUrls>>
-
-[[distributedidf]]
-== Configuring statsCache (Distributed IDF)
-
-Document and term statistics are needed in order to calculate relevancy. Solr provides four implementations out of the box when it comes to document stats calculation:
-
-* `LocalStatsCache`: This only uses local term and document statistics to compute relevance. In cases with uniform term distribution across shards, this works reasonably well. This option is the default if no `<statsCache>` is configured.
-* `ExactStatsCache`: This implementation uses global values (across the collection) for document frequency.
-* `ExactSharedStatsCache`: This is exactly like the exact stats cache in its functionality but the global stats are reused for subsequent requests with the same terms.
-* `LRUStatsCache`: This implementation uses an LRU cache to hold global stats, which are shared between requests.
-
-The implementation can be selected by setting `<statsCache>` in `solrconfig.xml`. For example, the following line makes Solr use the `ExactStatsCache` implementation:
-
-[source,xml]
-----
-<statsCache class="org.apache.solr.search.stats.ExactStatsCache"/>
-----
-
-== Avoiding Distributed Deadlock
-
-Each shard serves top-level query requests and then makes sub-requests to all of the other shards. Care should be taken to ensure that the max number of threads serving HTTP requests is greater than the possible number of requests from both top-level clients and other shards. If this is not the case, the configuration may result in a distributed deadlock.
-
-For example, a deadlock might occur in the case of two shards, each with just a single thread to service HTTP requests. Both threads could receive a top-level request concurrently, and make sub-requests to each other. Because there are no more remaining threads to service requests, the incoming requests will be blocked until the other pending requests are finished, but they will not finish since they are waiting for the sub-requests. By ensuring that Solr is configured to handle a suffic [...]
-
-== shards.preference Parameter
-
-Solr allows you to pass an optional string parameter named `shards.preference` to indicate that a distributed query should sort the available replicas in the given order of precedence within each shard.
-
-The syntax is: `shards.preference=_property_:__value__`. The order of the properties and the values are significant: the first one is the primary sort, the second is secondary, etc.
-
-IMPORTANT: `shards.preference` is supported for single shard scenarios when using the SolrJ clients.
-
-The properties that can be specified are as follows:
-
-`replica.type`::
-One or more replica types that are preferred. Any combination of `PULL`, `TLOG` and `NRT` is allowed.
-
-`replica.location`::
-One or more replica locations that are preferred.
-+
-A location starts with `http://hostname:port`. Matching is done for the given string as a prefix, so it's possible to e.g., leave out the port.
-+
-A special value `local` may be used to denote any local replica running on the same Solr instance as the one handling the query. This is useful when a query requests many fields or large fields to be returned per document because it avoids moving large amounts of data over the network when it is available locally. In addition, this feature can be useful for minimizing the impact of a problematic replica with degraded performance, as it reduces the likelihood that the degraded replica wil [...]
-+
-The value of `replica.location:local` diminishes as the number of shards (that have no locally-available replicas) in a collection increases because the query controller will have to direct the query to non-local replicas for most of the shards.
-+
-In other words, this feature is mostly useful for optimizing queries directed towards collections with a small number of shards and many replicas.
-+
-Also, this option should only be used if you are load balancing requests across all nodes that host replicas for the collection you are querying, as Solr's `CloudSolrClient` will do. If not load-balancing, this feature can introduce a hotspot in the cluster since queries won't be evenly distributed across the cluster.
-
-`replica.base`::
-Applied after sorting by inherent replica attributes, this property defines a fallback ordering among sets of preference-equivalent replicas; if specified, only one value may be specified for this property, and it must be specified last.
-+
-`random`, the default, randomly shuffles replicas for each request. This distributes requests evenly, but can result in sub-optimal cache usage for shards with replication factor > 1.
-+
-`stable:dividend:_paramName_` parses an integer from the value associated with the given parameter name; this integer is used as the dividend (mod equivalent replica count) to determine (via list rotation) order of preference among equivalent replicas.
-+
-`stable[:hash[:_paramName_]]` the string value associated with the given parameter name is hashed to a dividend that is used to determine replica preference order (analogous to the explicit `dividend` property above); `_paramName_` defaults to `q` if not specified, providing stable routing keyed to the string value of the "main query". Note that this may be inappropriate for some use cases (e.g., static main queries that leverage parameter substitution)
-
-`node.sysprop`::
-Query will be routed to nodes with same defined system properties as the current one. For example, if you start Solr nodes on different racks, you'll want to identify those nodes by a <<configuring-solrconfig-xml.adoc#jvm-system-properties,system property>> (e.g., `-Drack=rack1`). Then, queries can contain `shards.preference=node.sysprop:sysprop.rack`, to make sure you always hit shards with the same value of `rack`.
-
-`replica.leader`::
-Prefer replicas based on their leader status, set to either `true` or `false`. Consider a shard with two `TLOG` replicas and four `PULL` replicas (six replicas in total, one of which is the leader).
-With `shards.preference=replica.leader:false`, 5 out of 6 replicas will be preferred. Contrast this with `shards.preference=replica.type:PULL` where only 4 of 6 replicas will be preferred.
-Note that the non-leader `TLOG` replica behaves like a `PULL` replica from a search perspective; it pulls index updates from the leader just like a `PULL` replica and does not perform soft-commits.
-The difference is that the non-leader `TLOG` replica also captures updates in its TLOG, so that it is a candidate to replace the current leader if it is lost.
-
-Examples:
-
-* Prefer stable routing (keyed to client "sessionId" parameter) among otherwise equivalent replicas:
-   `shards.preference=replica.base:stable:hash:sessionId&sessionId=abc123`
-
-* Prefer PULL replicas:
-   `shards.preference=replica.type:PULL`
-
-* Prefer PULL replicas, or TLOG replicas if PULL replicas not available:
-   `shards.preference=replica.type:PULL,replica.type:TLOG`
-
-* Prefer any local replicas:
-   `shards.preference=replica.location:local`
-
-* Prefer any replicas on a host called "server1" with "server2" as the secondary option:
-   `shards.preference=replica.location:http://server1,replica.location:http://server2`
-
-* Prefer PULL replicas if available, otherwise TLOG replicas, and local ones among those:
-   `shards.preference=replica.type:PULL,replica.type:TLOG,replica.location:local`
-
-* Prefer local replicas, and among them PULL replicas when available TLOG otherwise:
-   `shards.preference=replica.location:local,replica.type:PULL,replica.type:TLOG`
-
-* Prefer any replica that is not a leader:
-    `shards.preference=replica.leader:false`
-
-Note that if you provide the settings in a query string, they need to be properly URL-encoded.
diff --git a/solr/solr-ref-guide/src/distributed-search-with-index-sharding.adoc b/solr/solr-ref-guide/src/distributed-search-with-index-sharding.adoc
deleted file mode 100644
index 7e7ebe3..0000000
--- a/solr/solr-ref-guide/src/distributed-search-with-index-sharding.adoc
+++ /dev/null
@@ -1,178 +0,0 @@
-= Distributed Search with Index Sharding
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-When using traditional index sharding, you will need to consider how to query your documents.
-
-It is highly recommended that you use <<solrcloud.adoc#,SolrCloud>> when needing to scale up or scale out. The setup described below is legacy and was used prior to the existence of SolrCloud. SolrCloud provides for a truly distributed set of features with support for things like automatic routing, leader election, optimistic concurrency and other sanity checks that are expected out of a distributed system.
-
-Everything on this page is specific to legacy setup of distributed search. Users trying out SolrCloud should not follow any of the steps or information below.
-
-Update reorders (i.e., replica A may see update X then Y, and replica B may see update Y then X). *deleteByQuery* also handles reorders the same way, to ensure replicas are consistent. All replicas of a shard are consistent, even if the updates arrive in a different order on different replicas.
-
-== Distributing Documents across Shards
-
-When not using SolrCloud, it is up to you to get all your documents indexed on each shard of your server farm. Solr supports distributed indexing (routing) in its true form only in the SolrCloud mode.
-
-In the legacy distributed mode, Solr does not calculate universal term/doc frequencies. For most large-scale implementations, it is not likely to matter that Solr calculates TF/IDF at the shard level. However, if your collection is heavily skewed in its distribution across servers, you may find misleading relevancy results in your searches. In general, it is probably best to randomly distribute documents to your shards.
-
-== Executing Distributed Searches with the shards Parameter
-
-If a query request includes the `shards` parameter, the Solr server distributes the request across all the shards listed as arguments to the parameter. The `shards` parameter uses this syntax:
-
-`host:port/base_url,host:port/base_url*`
-
-For example, the `shards` parameter below causes the search to be distributed across two Solr servers: *solr1* and **solr2**, both of which are running on port 8983:
-
-`\http://localhost:8983/solr/core1/select?shards=solr1:8983/solr/core1,solr2:8983/solr/core1&indent=true&q=ipod+solr`
-
-Rather than require users to include the shards parameter explicitly, it is usually preferred to configure this parameter as a default in the RequestHandler section of `solrconfig.xml`.
-
-[IMPORTANT]
-====
-Do not add the `shards` parameter to the standard request handler; doing so may cause search queries may enter an infinite loop. Instead, define a new request handler that uses the `shards` parameter, and pass distributed search requests to that handler.
-====
-
-With Legacy mode, only query requests are distributed. This includes requests to the SearchHandler (or any handler extending from `org.apache.solr.handler.component.SearchHandler`) using standard components that support distributed search.
-
-As in SolrCloud mode, when `shards.info=true`, distributed responses will include information about the shard (where each shard represents a logically different index or physical location)
-
-The following components support distributed search:
-
-* The *Query* component, which returns documents matching a query
-* The *Facet* component, which processes facet.query and facet.field requests where facets are sorted by count (the default).
-* The *Highlighting* component, which enables Solr to include "highlighted" matches in field values.
-* The *Stats* component, which returns simple statistics for numeric fields within the DocSet.
-* The *Debug* component, which helps with debugging.
-
-=== URL Allow List
-
-The nodes allowed in the `shards` parameter is configurable through the `allowUrls` property in `solr.xml`. This allow-list is automatically configured for SolrCloud but needs explicit configuration for leader/follower mode. Read more details in the section <<distributed-requests.adoc#configuring-the-shardhandlerfactory,Configuring the ShardHandlerFactory>>.
-
-== Limitations to Distributed Search
-
-Distributed searching in Solr has the following limitations:
-
-* Each document indexed must have a unique key.
-* If Solr discovers duplicate document IDs, Solr selects the first document and discards subsequent documents.
-* The index for distributed searching may become momentarily out of sync if a commit happens between the first and second phase of the distributed search. This might cause a situation where a document that once matched a query and was subsequently changed may no longer match the query but will still be retrieved. This situation is expected to be quite rare, however, and is only possible for a single query request.
-* The number of shards is limited by number of characters allowed for GET method's URI; most Web servers generally support at least 4000 characters, but many servers limit URI length to reduce their vulnerability to Denial of Service (DoS) attacks.
-* Shard information can be returned with each document in a distributed search by including `fl=id, [shard]` in the search request. This returns the shard URL.
-* In a distributed search, the data directory from the core descriptor overrides any data directory in `solrconfig.xml.`
-* Update commands may be sent to any server with distributed indexing configured correctly. Document adds and deletes are forwarded to the appropriate server/shard based on a hash of the unique document id. *commit* commands and *deleteByQuery* commands are sent to every server in `shards`.
-
-Formerly a limitation was that TF/IDF relevancy computations only used shard-local statistics. This is still the case by default. If your data isn't randomly distributed, or if you want more exact statistics, then remember to configure the ExactStatsCache.
-
-== Avoiding Distributed Deadlock with Distributed Search
-
-Like in SolrCloud mode, inter-shard requests could lead to a distributed deadlock. It can be avoided by following the instructions in the section  <<distributed-requests.adoc#,Distributed Requests>>.
-
-== Testing Index Sharding on Two Local Servers
-
-For simple functional testing, it's easiest to just set up two local Solr servers on different ports. (In a production environment, of course, these servers would be deployed on separate machines.)
-
-.  Make two Solr home directories and copy `solr.xml` into the new directories:
-+
-[source,bash]
-----
-mkdir example/nodes
-mkdir example/nodes/node1
-# Copy solr.xml into this solr.home
-cp server/solr/solr.xml example/nodes/node1/.
-# Repeat the above steps for the second node
-mkdir example/nodes/node2
-cp server/solr/solr.xml example/nodes/node2/.
-----
-.  Start the two Solr instances
-+
-[source,bash]
-----
-# Start first node on port 8983
-bin/solr start -s example/nodes/node1 -p 8983
-
-# Start second node on port 8984
-bin/solr start -s example/nodes/node2 -p 8984
-----
-.  Create a core on both the nodes with the sample_techproducts_configs.
-+
-[source,bash]
-----
-bin/solr create_core -c core1 -p 8983 -d sample_techproducts_configs
-# Create a core on the Solr node running on port 8984
-bin/solr create_core -c core1 -p 8984 -d sample_techproducts_configs
-----
-.  In a third window, index an example document to each of the server:
-+
-[source,bash]
-----
-bin/post -c core1 example/exampledocs/monitor.xml -port 8983
-
-bin/post -c core1 example/exampledocs/monitor2.xml -port 8984
-----
-.  Search on the node on port 8983:
-+
-[source,bash]
-----
-curl http://localhost:8983/solr/core1/select?q=*:*&wt=xml&indent=true
-----
-+
-This should bring back one document.
-+
-Search on the node on port 8984:
-+
-[source,bash]
-----
-curl http://localhost:8984/solr/core1/select?q=*:*&wt=xml&indent=true
-----
-+
-This should also bring back a single document.
-+
-Now do a distributed search across both servers with your browser or `curl.` In the example below, an extra parameter 'fl' is passed to restrict the returned fields to id and name.
-+
-[source,bash]
-----
-curl http://localhost:8983/solr/core1/select?q=*:*&indent=true&shards=localhost:8983/solr/core1,localhost:8984/solr/core1&fl=id,name&wt=xml
-----
-+
-This should contain both the documents as shown below:
-+
-[source,xml]
-----
-<response>
-  <lst name="responseHeader">
-    <int name="status">0</int>
-    <int name="QTime">8</int>
-    <lst name="params">
-      <str name="q">*:*</str>
-      <str name="shards">localhost:8983/solr/core1,localhost:8984/solr/core1</str>
-      <str name="indent">true</str>
-      <str name="fl">id,name</str>
-      <str name="wt">xml</str>
-    </lst>
-  </lst>
-  <result name="response" numFound="2" start="0" maxScore="1.0">
-    <doc>
-      <str name="id">3007WFP</str>
-      <str name="name">Dell Widescreen UltraSharp 3007WFP</str>
-    </doc>
-    <doc>
-      <str name="id">VA902B</str>
-      <str name="name">ViewSonic VA902B - flat panel display - TFT - 19"</str>
-    </doc>
-  </result>
-</response>
-----
diff --git a/solr/solr-ref-guide/src/solr-tracing.adoc b/solr/solr-ref-guide/src/distributed-tracing.adoc
similarity index 92%
rename from solr/solr-ref-guide/src/solr-tracing.adoc
rename to solr/solr-ref-guide/src/distributed-tracing.adoc
index 6a87bcd..e6f19b3 100644
--- a/solr/solr-ref-guide/src/solr-tracing.adoc
+++ b/solr/solr-ref-guide/src/distributed-tracing.adoc
@@ -1,4 +1,4 @@
-= Distributed Solr Tracing
+= Distributed Tracing
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -24,7 +24,7 @@ At the moment, only Jaeger is supported out of the box.
 A sampled distributed tracing query request on Jaeger looks like this:
 
 .Tracing of a Solr query
-image::images/solr-tracing/query-request-tracing.png[]
+image::images/distributed-tracing/query-request-tracing.png[]
 
 == Configuring Tracer
 
@@ -61,7 +61,8 @@ Then Jaeger tracer is configured in `solr.xml` like this:
 <tracerConfig name="tracerConfig" class="org.apache.solr.jaeger.JaegerTracerConfigurator" />
 ----
 
-_There are no configuration elements in the XML_; instead, this 3rd party system is configured using System Properties or Environment Variables.  The full list are listed at https://github.com/jaegertracing/jaeger-client-java/blob/master/jaeger-core/README.md[Jaeger-README].
+_There are no configuration elements in the XML_; instead, this 3rd party system is configured using System Properties or Environment Variables.
+The full list are listed at https://github.com/jaegertracing/jaeger-client-java/blob/master/jaeger-core/README.md[Jaeger-README].
 
 For example, to use the probabilistic sampler, you could set this environment variable:
 
@@ -75,4 +76,4 @@ or System property:
 [source,bash]
 ----
 bin/solr start -DJAEGER_SAMPLER_TYPE=probabilistic
-----
\ No newline at end of file
+----
diff --git a/solr/solr-ref-guide/src/docker-faq.adoc b/solr/solr-ref-guide/src/docker-faq.adoc
index 5c4be6f..df90235 100644
--- a/solr/solr-ref-guide/src/docker-faq.adoc
+++ b/solr/solr-ref-guide/src/docker-faq.adoc
@@ -225,7 +225,7 @@ Then go to `+http://localhost:8983/solr/#/~cloud+` (adjust the hostname for your
 
 == How can I run ZooKeeper and Solr with Docker Compose?
 
-See the <<running-solr-in-docker.adoc#docker-compose,docker compose example>>.
+See the <<solr-in-docker.adoc#docker-compose,docker compose example>>.
 
 == How can I get rid of "shared memory" warnings on Solr startup?
 
diff --git a/solr/solr-ref-guide/src/document-analysis.adoc b/solr/solr-ref-guide/src/document-analysis.adoc
new file mode 100644
index 0000000..2f60f12
--- /dev/null
+++ b/solr/solr-ref-guide/src/document-analysis.adoc
@@ -0,0 +1,61 @@
+= Document Analysis in Solr
+:page-children: analyzers, \
+    tokenizers, \
+    filters, \
+    charfilterfactories, \
+    language-analysis, \
+    phonetic-matching, \
+    analysis-screen
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+The following sections describe how Solr breaks down and works with textual data.
+There are three main concepts to understand: analyzers, tokenizers, and filters.
+
+* <<analyzers.adoc#,Field analyzers>> are used both during ingestion, when a document is indexed, and at query time. An analyzer examines the text of fields and generates a token stream. Analyzers may be a single class or they may be composed of a series of tokenizer and filter classes.
+* <<tokenizers.adoc#,Tokenizers>> break field data into lexical units, or _tokens_.
+* <<filters.adoc#,Filters>> examine a stream of tokens and keep them, transform or discard them, or create new ones. Tokenizers and filters may be combined to form pipelines, or _chains_, where the output of one is input to the next. Such a sequence of tokenizers and filters is called an _analyzer_ and the resulting output of an analyzer is used to match query results or build indices.
+
+== Using Analyzers, Tokenizers, and Filters
+
+Although the analysis process is used for both indexing and querying, it is not required to use the same analysis process for both operations.
+For indexing, you often want to simplify, or normalize, words.
+For example, setting all letters to lowercase, eliminating punctuation and accents, mapping words to their stems, and so on.
+Doing so can increase recall because, for example, "ram", "Ram" and "RAM" would all match a query for "ram".
+To increase query-time precision, a filter could narrow the matches by, for example, ignoring all-cap acronyms if you're interested in male sheep, but not Random Access Memory.
+
+The tokens output by the analysis process define the values, or _terms_, of that field and are used either to build an index of those terms when a new document is added, or to identify which documents contain the terms you are querying for.
+
+=== For More Information
+
+These sections will show you how to configure field analyzers and also serves as a reference for the details of configuring each of the available tokenizer and filter classes.
+It also serves as a guide so that you can configure your own analysis classes if you have special needs that cannot be met with the included filters or tokenizers.
+
+****
+// This tags the below list so it can be used in the parent page section list
+// tag::analysis-sections[]
+[cols="1,1",frame=none,grid=none,stripes=none]
+|===
+| <<analyzers.adoc#,Analyzers>>: Overview of Solr analyzers.
+| <<tokenizers.adoc#,Tokenizers>>: Tokenizers and tokenizer factory classes.
+| <<filters.adoc#,Filters>>: Filters and filter factory classes.
+| <<charfilterfactories.adoc#,CharFilterFactories>>: Filters for pre-processing input characters.
+| <<language-analysis.adoc#,Language Analysis>>: Tokenizers and filters for character set conversion and specific languages.
+| <<analysis-screen.adoc#,Analysis Screen>>: Admin UI for testing field analysis.
+|===
+// end::analysis-sections[]
+****
diff --git a/solr/solr-ref-guide/src/transforming-result-documents.adoc b/solr/solr-ref-guide/src/document-transformers.adoc
similarity index 95%
rename from solr/solr-ref-guide/src/transforming-result-documents.adoc
rename to solr/solr-ref-guide/src/document-transformers.adoc
index 236864f..689fdf7 100644
--- a/solr/solr-ref-guide/src/transforming-result-documents.adoc
+++ b/solr/solr-ref-guide/src/document-transformers.adoc
@@ -1,4 +1,4 @@
-= Transforming Result Documents
+= Document Transformers
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -27,7 +27,7 @@ When executing a request, a document transformer can be used by including it in
 fl=id,name,score,[shard]
 ----
 
-Some transformers allow, or require, local parameters which can be specified as key value pairs inside the brackets:
+Some transformers allow, or require, local params which can be specified as key value pairs inside the brackets:
 
 [source,plain]
 ----
@@ -86,7 +86,7 @@ In addition to using these request parameters, you can configure additional name
 </transformer>
 ----
 
-The `value` option forces an explicit value to always be used, while the `defaultValue` option provides a default that can still be overridden using the `v` and `t` local parameters.
+The `value` option forces an explicit value to always be used, while the `defaultValue` option provides a default that can still be overridden using the `v` and `t` local params.
 
 
 === [explain] - ExplainAugmenterFactory
@@ -127,7 +127,7 @@ A default style can be configured by specifying an `args` parameter in your `sol
 
 This transformer returns all <<indexing-nested-documents.adoc#,descendant documents>> of each parent document matching your query.  This is useful when you have indexed nested child documents and want to retrieve the child documents for the relevant parent documents for any type of search query.
 
-Note that this transformer can be used even when the query used to match the result documents is not a <<other-parsers.adoc#block-join-query-parsers,Block Join query>>.
+Note that this transformer can be used even when the query used to match the result documents is not a <<block-join-query-parser.adoc#,Block Join query>>.
 
 
 [source,plain]
@@ -198,7 +198,7 @@ DocIdAugmenterFactory does not support any request parameters, or configuration
 
 === [elevated] and [excluded]
 
-These transformers are available only when using the <<the-query-elevation-component.adoc#,Query Elevation Component>>.
+These transformers are available only when using the <<query-elevation-component.adoc#,Query Elevation Component>>.
 
 * `[elevated]` annotates each document to indicate if it was elevated or not.
 * `[excluded]` annotates each document to indicate if it would have been excluded - this is only supported if you also use the `markExcludes` parameter.
@@ -318,7 +318,7 @@ q=*:*&fl=*,**foo**:[subquery]&**foo.**q=to be continued&**foo.**rows=10&**foo.**
 
 ==== Document Field as an Input for Subquery Parameters
 
-It's necessary to pass some document field values as a parameter for subquery. It's supported via an implicit *`row.__fieldname__`* parameter, and can be (but might not only) referred via local parameters syntax:
+It's necessary to pass some document field values as a parameter for subquery. It's supported via an implicit *`row.__fieldname__`* parameter, and can be (but might not only) referred via local params syntax:
 
 [source,plain,subs="quotes"]
 q=name:john&fl=name,id,depts:[subquery]&depts.q={!terms f=id **v=$row.dept_id**}&depts.rows=10
@@ -331,7 +331,9 @@ To log substituted subquery request parameters, add the corresponding parameter
 
 ==== Cores and Collections in SolrCloud
 
-Use `foo:[subquery fromIndex=departments]` to invoke subquery on another core on the same node. This is what `{!join}` does for non-SolrCloud mode. But with SolrCloud, just (and only) explicitly specify its native parameters like `collection, shards` for subquery, for example:
+Use `foo:[subquery fromIndex=departments]` to invoke subquery on another core on the same node.
+This is what `{!join}` does in a user-managed cluster.
+With SolrCloud, only specify its native parameters like `collection, shards` for subquery, for example:
 
 [source,plain,subs="quotes"]
 q=\*:*&fl=\*,foo:[subquery]&foo.q=cloud&**foo.collection**=departments
diff --git a/solr/solr-ref-guide/src/documents-fields-and-schema-design.adoc b/solr/solr-ref-guide/src/documents-fields-and-schema-design.adoc
deleted file mode 100644
index 04c4ae5..0000000
--- a/solr/solr-ref-guide/src/documents-fields-and-schema-design.adoc
+++ /dev/null
@@ -1,44 +0,0 @@
-= Documents, Fields, and Schema Design
-:page-children: overview-of-documents-fields-and-schema-design, solr-field-types, defining-fields, copying-fields, dynamic-fields, other-schema-elements, schema-api, putting-the-pieces-together, docvalues, schemaless-mode, luke-request-handler
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-This section discusses how Solr organizes its data into documents and fields, as well as how to work with a schema in Solr.
-
-This section includes the following topics:
-
-<<overview-of-documents-fields-and-schema-design.adoc#,Overview of Documents, Fields, and Schema Design>>: An introduction to the concepts covered in this section.
-
-<<solr-field-types.adoc#,Solr Field Types>>: Detailed information about field types in Solr, including the field types in the default Solr schema.
-
-<<defining-fields.adoc#,Defining Fields>>: Describes how to define fields in Solr.
-
-<<copying-fields.adoc#,Copying Fields>>: Describes how to populate fields with data copied from another field.
-
-<<dynamic-fields.adoc#,Dynamic Fields>>: Information about using dynamic fields in order to catch and index fields that do not exactly conform to other field definitions in your schema.
-
-<<schema-api.adoc#,Schema API>>: Use curl commands to read various parts of a schema or create new fields and copyField rules.
-
-<<other-schema-elements.adoc#,Other Schema Elements>>: Describes other important elements in the Solr schema.
-
-<<putting-the-pieces-together.adoc#,Putting the Pieces Together>>: A higher-level view of the Solr schema and how its elements work together.
-
-<<docvalues.adoc#,DocValues>>: Describes how to create a docValues index for faster lookups.
-
-<<schemaless-mode.adoc#,Schemaless Mode>>: Automatically add previously unknown schema fields using value-based field type guessing.
-
-<<luke-request-handler.adoc#,Luke Request Handler>>: The request handler which provides access to information about fields in the index. This request handler powers the <<schema-browser-screen.adoc#,Schema Browser>> page of Solr's Admin UI.
diff --git a/solr/solr-ref-guide/src/documents-fields-schema-design.adoc b/solr/solr-ref-guide/src/documents-fields-schema-design.adoc
new file mode 100644
index 0000000..33f5ac5
--- /dev/null
+++ b/solr/solr-ref-guide/src/documents-fields-schema-design.adoc
@@ -0,0 +1,99 @@
+= Documents, Fields, and Schema Design
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+[.lead]
+The fundamental premise of Solr is simple.
+You give it a lot of information, then later you can ask it questions and find the piece of information you want.
+
+The part where you feed in all the information is called _indexing_ or _updating_. When you ask a question, it's called a _query_.
+
+One way to understand how Solr works is to think of a loose-leaf book of recipes.
+Every time you add a recipe to the book, you update the index at the back.
+You list each ingredient and the page number of the recipe you just added.
+Suppose you add one hundred recipes.
+Using the index, you can very quickly find all the recipes that use garbanzo beans, or artichokes, or coffee, as an ingredient.
+Using the index is much faster than looking through each recipe one by one.
+Imagine a book of one thousand recipes, or one million.
+
+Solr allows you to build an index with many different fields, or types of entries.
+The example above shows how to build an index with just one field, `ingredients`.
+You could have other fields in the index for the recipe's cooking style, like `Asian`, `Cajun`, or `vegan`, and you could have an index field for preparation times.
+Solr can answer questions like "What Cajun-style recipes that have blood oranges as an ingredient can be prepared in fewer than 30 minutes?"
+
+The schema is the place where you tell Solr how it should build indexes from input documents.
+
+== How Solr Sees the World
+
+Solr's basic unit of information is a _document_, which is a set of data that describes something.
+A recipe document would contain the ingredients, the instructions, the preparation time, the cooking time, the tools needed, and so on.
+A document about a person, for example, might contain the person's name, biography, favorite color, and shoe size.
+A document about a book could contain the title, author, year of publication, number of pages, and so on.
+
+In the Solr universe, documents are composed of _fields_, which are more specific pieces of information. Shoe size could be a field.
+First name and last name could be fields.
+
+Fields can contain different kinds of data.
+A name field, for example, is text (character data).
+A shoe size field might be a floating point number so that it could contain values like 6 and 9.5.
+Obviously, the definition of fields is flexible (you could define a shoe size field as a text field rather than a floating point number, for example), but if you define your fields correctly, Solr will be able to interpret them correctly and your users will get better results when they perform a query.
+
+You can tell Solr about the kind of data a field contains by specifying its _field type_.
+The field type tells Solr how to interpret the field and how it can be queried.
+
+When you add a document, Solr takes the information in the document's fields and adds that information to an index.
+When you perform a query, Solr can quickly consult the index and return the matching documents.
+
+== Field Analysis
+
+_Field analysis_ tells Solr what to do with incoming data when building an index.
+A more accurate name for this process would be _processing_ or even _digestion_, but the official name is _analysis_.
+
+Consider, for example, a biography field in a person document.
+Every word of the biography must be indexed so that you can quickly find people whose lives have had anything to do with ketchup, or dragonflies, or cryptography.
+
+However, a biography will likely contains lots of words you don't care about and don't want clogging up your index—words like "the", "a", "to", and so forth.
+Furthermore, suppose the biography contains the word "Ketchup", capitalized at the beginning of a sentence.
+If a user makes a query for "ketchup", you want Solr to tell you about the person even though the biography contains the capitalized word.
+
+The solution to both these problems is field analysis.
+For the biography field, you can tell Solr how to break apart the biography into words.
+You can tell Solr that you want to make all the words lower case, and you can tell Solr to remove accents marks.
+
+Field analysis is an important part of a field type.
+<<document-analysis.adoc#,Document Analysis in Solr>> is a detailed description of field analysis.
+
+== Solr's Schema File
+
+Solr stores details about the field types and fields it is expected to understand in a schema file.
+This file is named either `managed-schema` or `schema.xml`.
+The difference is determined by how you plan to manage Solr's schema in your installation: either programmatically or by hand-editing.
+
+An important fact about the schema is that it is a Solr concept.
+The actual data in your index is stored in Lucene, and Lucene does not have the concept of a schema.
+This means that changes to the schema file will not have any impact on data already stored in the index.
+In fact, changing the schema without reindexing your data can cause serious problems with the index, to the point where the only solution is to reindex your data entirely.
+
+It is good practice to try to think about the data you plan to index: what kind of data is it?
+How do you want to be able to search it?
+How will fields from documents be displayed to users?
+
+If you aren't sure yet, plan on some test indexing runs to see how the data in your documents is indexed with default settings.
+Build into your implementation plan some time for iteration and start small.
+The more you're able to define your schema before indexing all of your documents, the higher your chances for a successful search application for your users.  
+
+More information about the schema is in the section <<schema-elements.adoc#,Schema Elements>>.
diff --git a/solr/solr-ref-guide/src/documents-screen.adoc b/solr/solr-ref-guide/src/documents-screen.adoc
index d6f5d40..15fe607 100644
--- a/solr/solr-ref-guide/src/documents-screen.adoc
+++ b/solr/solr-ref-guide/src/documents-screen.adoc
@@ -31,14 +31,14 @@ The screen allows you to:
 ====
 There are other ways to load data, see also these sections:
 
-* <<uploading-data-with-index-handlers.adoc#,Uploading Data with Index Handlers>>
-* <<uploading-data-with-solr-cell-using-apache-tika.adoc#,Uploading Data with Solr Cell using Apache Tika>>
+* <<indexing-with-update-handlers.adoc#,Indexing with Update Handlers>>
+* <<indexing-with-tika.adoc#,Indexing with Solr Cell and Apache Tika>>
 ====
 
 == Common Fields
 * Request-Handler: The first step is to define the RequestHandler. By default `/update` will be defined. Change the request handler to `/update/extract` to use Solr Cell.
 * Document Type: Select the Document Type to define the format of document to load. The remaining parameters may change depending on the document type selected.
-* Document(s): Enter a properly-formatted Solr document corresponding to the `Document Type` selected. XML and JSON documents must be formatted in a Solr-specific format, a small illustrative document will be shown. CSV files should have headers corresponding to fields defined in the schema. More details can be found at: <<uploading-data-with-index-handlers.adoc#,Uploading Data with Index Handlers>>.
+* Document(s): Enter a properly-formatted Solr document corresponding to the `Document Type` selected. XML and JSON documents must be formatted in a Solr-specific format, a small illustrative document will be shown. CSV files should have headers corresponding to fields defined in the schema. More details can be found at: <<indexing-with-update-handlers.adoc#,Indexing with Update Handlers>>.
 * Commit Within: Specify the number of milliseconds between the time the document is submitted and when it is available for searching.
 * Overwrite: If `true` the new document will replace an existing document with the same value in the `id` field. If `false` multiple documents with the same id can be added.
 
@@ -62,7 +62,7 @@ The Document Builder provides a wizard-like interface to enter fields of a docum
 The File Upload option allows choosing a prepared file and uploading it. If using `/update` for the Request-Handler option, you will be limited to XML, CSV, and JSON.
 
 Other document types (e.g., Word, PDF, etc.) can be indexed using the ExtractingRequestHandler (aka, Solr Cell). You must modify the RequestHandler to `/update/extract`, which must be defined in your `solrconfig.xml` file with your desired defaults. You should also add `&literal.id` shown in the "Extracting Request Handler Params" field so the file chosen is given a unique id.
-More information can be found at:  <<uploading-data-with-solr-cell-using-apache-tika.adoc#,Uploading Data with Solr Cell using Apache Tika>>
+More information can be found at: <<indexing-with-tika.adoc#,Indexing with Solr Cell and Apache Tika>>.
 
 == Solr Command
 
diff --git a/solr/solr-ref-guide/src/docvalues.adoc b/solr/solr-ref-guide/src/docvalues.adoc
index 2f05d08..ecf4a6f 100644
--- a/solr/solr-ref-guide/src/docvalues.adoc
+++ b/solr/solr-ref-guide/src/docvalues.adoc
@@ -77,9 +77,16 @@ If `docValues="true"` for a field, then DocValues will automatically be used any
 
 === Retrieving DocValues During Search
 
-Field values retrieved during search queries are typically returned from stored values. However, non-stored docValues fields will be also returned along with other stored fields when all fields (or pattern matching globs) are specified to be returned (e.g., "`fl=*`") for search queries depending on the effective value of the `useDocValuesAsStored` parameter for each field. For schema versions >= 1.6, the implicit default is `useDocValuesAsStored="true"`. See <<field-type-definitions-and- [...]
+Field values retrieved during search queries are typically returned from stored values.
+However, non-stored docValues fields will be also returned along with other stored fields when all fields (or pattern matching globs) are specified to be returned (e.g., "`fl=*`") for search queries depending on the effective value of the `useDocValuesAsStored` parameter for each field. For schema versions >= 1.6, the implicit default is `useDocValuesAsStored="true"`.
+See <<field-type-definitions-and-properties.adoc#,Field Type Definitions and Properties>> & <<fields.adoc#,Fields>> for more details.
 
-When `useDocValuesAsStored="false"`, non-stored DocValues fields can still be explicitly requested by name in the <<common-query-parameters.adoc#fl-field-list-parameter,`fl` parameter>>, but will not match glob patterns (`"*"`). Note that returning DocValues along with "regular" stored fields at query time has performance implications that stored fields may not because DocValues are column-oriented and may therefore incur additional cost to retrieve for each returned document. Also note  [...]
+When `useDocValuesAsStored="false"`, non-stored DocValues fields can still be explicitly requested by name in the <<common-query-parameters.adoc#fl-field-list-parameter,`fl` parameter>>, but will not match glob patterns (`"*"`).
+
+Returning DocValues along with "regular" stored fields at query time has performance implications that stored fields may not because DocValues are column-oriented and may therefore incur additional cost to retrieve for each returned document.
+
+While returning non-stored fields from DocValues, the values of a multi-valued field are returned in sorted order rather than insertion order and may have duplicates removed, see above.
+If you require the multi-valued fields to be returned in the original insertion order, then make your multi-valued field as stored (such a change requires reindexing).
 
 In cases where the query is returning _only_ docValues fields performance may improve since returning stored fields requires disk reads and decompression whereas returning docValues fields in the fl list only requires memory access.
 
diff --git a/solr/solr-ref-guide/src/dynamic-fields.adoc b/solr/solr-ref-guide/src/dynamic-fields.adoc
index 65a91ff..873bd22 100644
--- a/solr/solr-ref-guide/src/dynamic-fields.adoc
+++ b/solr/solr-ref-guide/src/dynamic-fields.adoc
@@ -18,11 +18,14 @@
 
 _Dynamic fields_ allow Solr to index fields that you did not explicitly define in your schema.
 
-This is useful if you discover you have forgotten to define one or more fields. Dynamic fields can make your application less brittle by providing some flexibility in the documents you can add to Solr.
+This is useful if you discover you have forgotten to define one or more fields.
+Dynamic fields can make your application less brittle by providing some flexibility in the documents you can add to Solr.
 
-A dynamic field is just like a regular field except it has a name with a wildcard in it. When you are indexing documents, a field that does not match any explicitly defined fields can be matched with a dynamic field.
+A dynamic field is just like a regular field except it has a name with a wildcard in it.
+When you are indexing documents, a field that does not match any explicitly defined fields can be matched with a dynamic field.
 
-For example, suppose your schema includes a dynamic field with a name of `*_i`. If you attempt to index a document with a `cost_i` field, but no explicit `cost_i` field is defined in the schema, then the `cost_i` field will have the field type and analysis defined for `*_i`.
+For example, suppose your schema includes a dynamic field with a name of `*_i`.
+If you attempt to index a document with a `cost_i` field, but no explicit `cost_i` field is defined in the schema, then the `cost_i` field will have the field type and analysis defined for `*_i`.
 
 Like regular fields, dynamic fields have a name, a field type, and options.
 
@@ -31,4 +34,5 @@ Like regular fields, dynamic fields have a name, a field type, and options.
 <dynamicField name="*_i" type="int" indexed="true"  stored="true"/>
 ----
 
-It is recommended that you include basic dynamic field mappings (like that shown above) in your `schema.xml`. The mappings can be very useful.
+It is recommended that you include basic dynamic field mappings (like that shown above) in your `schema.xml`.
+The mappings can be very useful.
diff --git a/solr/solr-ref-guide/src/the-extended-dismax-query-parser.adoc b/solr/solr-ref-guide/src/edismax-query-parser.adoc
similarity index 67%
rename from solr/solr-ref-guide/src/the-extended-dismax-query-parser.adoc
rename to solr/solr-ref-guide/src/edismax-query-parser.adoc
index 7f1c4d7..c015ac7 100644
--- a/solr/solr-ref-guide/src/the-extended-dismax-query-parser.adoc
+++ b/solr/solr-ref-guide/src/edismax-query-parser.adoc
@@ -1,4 +1,4 @@
-= The Extended DisMax (eDismax) Query Parser
+= Extended DisMax (eDisMax) Query Parser
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -16,30 +16,35 @@
 // specific language governing permissions and limitations
 // under the License.
 
-The Extended DisMax (eDisMax) query parser is an improved version of the <<the-dismax-query-parser.adoc#,DisMax query parser>>.
+The Extended DisMax (eDisMax) query parser is an improved version of the <<dismax-query-parser.adoc#,DisMax query parser>>.
 
-In addition to supporting all the DisMax query parser parameters, Extended Dismax:
+In addition to supporting all the DisMax query parser parameters, Extended DisMax:
 
-* supports <<the-standard-query-parser.adoc#,Solr's standard query parser>> syntax such as (non-exhaustive list):
+* supports <<standard-query-parser.adoc#,Solr's standard query parser>> syntax such as (non-exhaustive list):
 ** boolean operators such as AND (+, &&), OR (||), NOT (-).
 ** optionally treats lowercase "and" and "or" as "AND" and "OR" in Lucene syntax mode
 ** optionally allows embedded queries using other query parsers or functions
 * includes improved smart partial escaping in the case of syntax errors; fielded queries, +/-, and phrase queries are still supported in this mode.
 * improves proximity boosting by using word shingles; you do not need the query to match all words in the document before proximity boosting is applied.
-* includes advanced stopword handling: stopwords are not required in the mandatory part of the query but are still used in the proximity boosting part. If a query consists of all stopwords, such as "to be or not to be", then all words are required.
-* includes improved boost function: in Extended DisMax, the `boost` function is a multiplier <<the-dismax-query-parser.adoc#bq-bf-shortcomings,rather than an addend>>, improving your boost results; the additive boost functions of DisMax (`bf` and `bq`) are also supported.
+* includes advanced stopword handling: stopwords are not required in the mandatory part of the query but are still used in the proximity boosting part.
+If a query consists of all stopwords, such as "to be or not to be", then all words are required.
+* includes improved boost function: in Extended DisMax, the `boost` function is a multiplier <<dismax-query-parser.adoc#bq-bf-shortcomings,rather than an addend>>, improving your boost results; the additive boost functions of DisMax (`bf` and `bq`) are also supported.
 * supports pure negative nested queries: queries such as `+foo (-foo)` will match all documents.
 * lets you specify which fields the end user is allowed to query, and to disallow direct fielded searches.
 
 == Extended DisMax Parameters
 
-In addition to all the <<the-dismax-query-parser.adoc#dismax-query-parser-parameters,DisMax parameters>>, Extended DisMax includes these query parameters:
+In addition to all the <<dismax-query-parser.adoc#dismax-query-parser-parameters,DisMax parameters>>, Extended DisMax includes these query parameters:
 
 `sow`::
-Split on whitespace. If set to `true`, text analysis is invoked separately for each individual whitespace-separated term.  The default is `false`; whitespace-separated term sequences will be provided to text analysis in one shot, enabling proper function of analysis filters that operate over term sequences, e.g., multi-word synonyms and shingles.
+Split on whitespace.
+If set to `true`, text analysis is invoked separately for each individual whitespace-separated term.
+The default is `false`; whitespace-separated term sequences will be provided to text analysis in one shot, enabling proper function of analysis filters that operate over term sequences, e.g., multi-word synonyms and shingles.
 
 `mm`::
- Minimum should match.  See the <<the-dismax-query-parser.adoc#mm-minimum-should-match-parameter,DisMax mm parameter>> for a description of `mm`. The default eDisMax `mm` value differs from that of DisMax:
+Minimum should match.
+See the <<dismax-query-parser.adoc#mm-minimum-should-match-parameter,DisMax mm parameter>> for a description of `mm`.
+The default eDisMax `mm` value differs from that of DisMax:
 +
 * The default `mm` value is 0%:
 ** if the query contains an explicit operator other than "AND" ("-", "+", "OR", "NOT"); or
@@ -47,12 +52,14 @@ Split on whitespace. If set to `true`, text analysis is invoked separately for e
 * The default `mm` value is 100% if `q.op` is "AND" and the query does not contain any explicit operators other than "AND".
 
 `mm.autoRelax`::
-If `true`, the number of clauses required (<<the-dismax-query-parser.adoc#mm-minimum-should-match-parameter,minimum should match>>) will automatically be relaxed if a clause is removed (by e.g., stopwords filter) from some but not all <<the-dismax-query-parser.adoc#qf-query-fields-parameter,`qf`>> fields. Use this parameter as a workaround if you experience that queries return zero hits due to uneven stopword removal between the `qf` fields.
+If `true`, the number of clauses required (<<dismax-query-parser.adoc#mm-minimum-should-match-parameter,minimum should match>>) will automatically be relaxed if a clause is removed (by e.g., stopwords filter) from some but not all <<dismax-query-parser.adoc#qf-query-fields-parameter,`qf`>> fields.
+Use this parameter as a workaround if you experience that queries return zero hits due to uneven stopword removal between the `qf` fields.
 +
 Note that relaxing `mm` may cause undesired side effects, such as hurting the precision of the search, depending on the nature of your index content.
 
 `boost`::
-A multivalued list of strings parsed as <<function-queries.adoc#available-functions,functions>> whose results will be multiplied into the score from the main query for all matching documents. This parameter is shorthand for wrapping the query produced by eDisMax using the <<other-parsers.adoc#boost-query-parser,`BoostQParserPlugin`>>.
+A multivalued list of strings parsed as <<function-queries.adoc#available-functions,functions>> whose results will be multiplied into the score from the main query for all matching documents.
+This parameter is shorthand for wrapping the query produced by eDisMax using the <<other-parsers.adoc#boost-query-parser,`BoostQParserPlugin`>>.
 
 These two examples are equivalent:
 [source,text]
@@ -71,27 +78,32 @@ A Boolean parameter indicating if lowercase "and" and "or" should be treated the
 Defaults to `false`.
 
 `ps`::
-Phrase Slop. The default amount of slop - distance between terms - on phrase queries built with `pf`, `pf2` and/or `pf3` fields (affects boosting). See also the section <<Using 'Slop'>> below.
+Phrase Slop.
+The default amount of slop - distance between terms - on phrase queries built with `pf`, `pf2` and/or `pf3` fields (affects boosting). See also the section <<Using 'Slop'>> below.
 
 `pf2`::
 
-A multivalued list of fields with optional weights. Similar to `pf`, but based on word _pair_ shingles.
+A multivalued list of fields with optional weights.
+Similar to `pf`, but based on word _pair_ shingles.
 
 `ps2`::
 This is similar to `ps` but overrides the slop factor used for `pf2`. If not specified, `ps` is used.
 
 `pf3`::
-A multivalued list of fields with optional weights, based on triplets of word shingles. Similar to `pf`, except that instead of building a phrase per field out of all the words in the input, it builds a set of phrases for each field out of word _triplet_ shingles.
+A multivalued list of fields with optional weights, based on triplets of word shingles.
+Similar to `pf`, except that instead of building a phrase per field out of all the words in the input, it builds a set of phrases for each field out of word _triplet_ shingles.
 
 `ps3`::
 This is similar to `ps` but overrides the slop factor used for `pf3`. If not specified, `ps` is used.
 
 `stopwords`::
-A Boolean parameter indicating if the `StopFilterFactory` configured in the query analyzer should be respected when parsing the query. If this is set to `false`, then the `StopFilterFactory` in the query analyzer is ignored.
+A Boolean parameter indicating if the `StopFilterFactory` configured in the query analyzer should be respected when parsing the query.
+If this is set to `false`, then the `StopFilterFactory` in the query analyzer is ignored.
 
 `uf`::
 Specifies which schema fields the end user is allowed to explicitly query and to toggle whether embedded Solr queries are supported.
-This parameter supports wildcards. Multiple fields must be separated by a space.
+This parameter supports wildcards.
+Multiple fields must be separated by a space.
 +
 The default is to allow all fields and no embedded Solr queries, equivalent to `uf=* -\_query_`.
 
@@ -104,9 +116,10 @@ The default is to allow all fields and no embedded Solr queries, equivalent to `
 
 === Field Aliasing using Per-Field qf Overrides
 
-Per-field overrides of the `qf` parameter may be specified to provide 1-to-many aliasing from field names specified in the query string, to field names used in the underlying query. By default, no aliasing is used and field names specified in the query string are treated as literal field names in the index.
+Per-field overrides of the `qf` parameter may be specified to provide 1-to-many aliasing from field names specified in the query string, to field names used in the underlying query.
+By default, no aliasing is used and field names specified in the query string are treated as literal field names in the index.
 
-== Examples of eDismax Queries
+== Examples of eDisMax Queries
 
 All of the sample URLs in this section assume you are running Solr's "```techproducts```" example:
 
@@ -164,7 +177,9 @@ f.name.qf=last_name first_name
 
 == Using 'Slop'
 
-`Dismax` and `Edismax` can run queries against all query fields, and also run a query in the form of a phrase against the phrase fields. (This will work only for boosting documents, not actually for matching.) However, that phrase query can have a 'slop,' which is the distance between the terms of the query while still considering it a phrase match. For example:
+`Dismax` and `Edismax` can run queries against all query fields, and also run a query in the form of a phrase against the phrase fields (this will work only for boosting documents, not actually for matching).
+However, that phrase query can have a 'slop,' which is the distance between the terms of the query while still considering it a phrase match.
+For example:
 
 [source,text]
 ----
@@ -174,7 +189,7 @@ pf=field1^50 field2^20
 defType=dismax
 ----
 
-With these parameters, the Dismax Query Parser generates a query that looks something like this:
+With these parameters, the DisMax Query Parser generates a query that looks something like this:
 
 [source,text]
 ----
@@ -197,7 +212,8 @@ If you add the parameter `ps` (phrase slop), the second query will instead be:
 ps=10 field1:"foo bar"~10^50 OR field2:"foo bar"~10^20
 ----
 
-This means that if the terms "foo" and "bar" appear in the document with less than 10 terms between each other, the phrase will match. For example the doc that says:
+This means that if the terms "foo" and "bar" appear in the document with less than 10 terms between each other, the phrase will match.
+For example the doc that says:
 
 [source,text]
 ----
@@ -208,20 +224,25 @@ will match the phrase query.
 
 How does one use phrase slop? Usually it is configured in the request handler (in `solrconfig`).
 
-With query slop (`qs`) the concept is similar, but it applies to explicit phrase queries from the user. For example, if you want to search for a name, you could enter:
+With query slop (`qs`) the concept is similar, but it applies to explicit phrase queries from the user.
+For example, if you want to search for a name, you could enter:
 
 [source,text]
 ----
 q="Hans Anderson"
 ----
 
-A document that contains "Hans Anderson" will match, but a document that contains the middle name "Christian" or where the name is written with the last name first ("Anderson, Hans") won't. For those cases one could configure the query field `qs`, so that even if the user searches for an explicit phrase query, a slop is applied.
+A document that contains "Hans Anderson" will match, but a document that contains the middle name "Christian" or where the name is written with the last name first ("Anderson, Hans") won't.
+For those cases one could configure the query field `qs`, so that even if the user searches for an explicit phrase query, a slop is applied.
 
-Finally, in addition to the phrase fields (`pf`) parameter, `edismax` also supports the `pf2` and `pf3` parameters, for fields over which to create bigram and trigram phrase queries. The phrase slop for these parameters' queries can be specified using the `ps2` and `ps3` parameters, respectively. If you use `pf2`/`pf3` but not `ps2`/`ps3`, then the phrase slop for these parameters' queries will be taken from the `ps` parameter, if any.
+Finally, in addition to the phrase fields (`pf`) parameter, `edismax` also supports the `pf2` and `pf3` parameters, for fields over which to create bigram and trigram phrase queries.
+The phrase slop for these parameters' queries can be specified using the `ps2` and `ps3` parameters, respectively.
+If you use `pf2`/`pf3` but not `ps2`/`ps3`, then the phrase slop for these parameters' queries will be taken from the `ps` parameter, if any.
 
 === Synonyms Expansion in Phrase Queries with Slop
 
-When a phrase query with slop (e.g., `pf` with `ps`) triggers synonym expansions, a separate clause will be generated for each combination of synonyms. For example, with configured synonyms `dog,canine` and `cat,feline`, the query `"dog chased cat"` will generate the following phrase query clauses:
+When a phrase query with slop (e.g., `pf` with `ps`) triggers synonym expansions, a separate clause will be generated for each combination of synonyms.
+For example, with configured synonyms `dog,canine` and `cat,feline`, the query `"dog chased cat"` will generate the following phrase query clauses:
 
 * `"dog chased cat"`
 * `"canine chased cat"`
diff --git a/solr/solr-ref-guide/src/enabling-ssl.adoc b/solr/solr-ref-guide/src/enabling-ssl.adoc
index 95e9cd8..b2de1a2 100644
--- a/solr/solr-ref-guide/src/enabling-ssl.adoc
+++ b/solr/solr-ref-guide/src/enabling-ssl.adoc
@@ -31,7 +31,8 @@ This keystore will also be used as a truststore below.
 It's possible to use the keystore that comes with the JDK for these purposes, and to use a separate truststore, but those options aren't covered here.
 
 Run the commands below in the `server/etc/` directory in the binary Solr distribution.
-It's assumed that you have the JDK `keytool` utility on your `PATH`, and that `openssl` is also on your `PATH`. See https://www.openssl.org/related/binaries.html for OpenSSL binaries for Windows and Solaris.
+It's assumed that you have the JDK `keytool` utility on your `PATH`, and that `openssl` is also on your `PATH`.
+See https://www.openssl.org/related/binaries.html for OpenSSL binaries for Windows and Solaris.
 
 The `-ext SAN=...` `keytool` option allows you to specify all the DNS names and/or IP addresses that will be allowed during hostname verification if you choose to require it.
 
@@ -120,11 +121,13 @@ set SOLR_SSL_CHECK_PEER_NAME=true
 --
 
 .Client Authentication Settings
-WARNING: Enable either `SOLR_SSL_NEED_CLIENT_AUTH` or `SOLR_SSL_WANT_CLIENT_AUTH` but not both at the same time. They are mutually exclusive and Jetty will select one of them which may not be what you expect. `SOLR_SSL_CLIENT_HOSTNAME_VERIFICATION` should be set to false if you want to disable hostname verification.
+WARNING: Enable either `SOLR_SSL_NEED_CLIENT_AUTH` or `SOLR_SSL_WANT_CLIENT_AUTH` but not both at the same time.
+They are mutually exclusive and Jetty will select one of them which may not be what you expect.
+`SOLR_SSL_CLIENT_HOSTNAME_VERIFICATION` should be set to false if you want to disable hostname verification.
 
 When you start Solr, the `bin/solr` script includes these settings and will pass them as system properties to the JVM.
 
-If you are running Solr in standalone mode, you can skip to <<Run Single Node Solr using SSL>>.
+If you are running Solr in a user-managed cluster or single-node installation, you can skip to <<Start User-Managed Cluster or Single-Node Solr>>.
 
 If you are using SolrCloud, however, you need to <<Configure ZooKeeper>> before starting Solr.
 
@@ -155,7 +158,8 @@ Note that if the `javax.net.ssl.\*` configurations are not set, they will fallba
 Solr requires three parameters to be configured in order to use the credential store file for keystore passwords.
 
 `solr.ssl.credential.provider.chain`::
-The credential provider chain. This should be set to `hadoop`.
+The credential provider chain.
+This should be set to `hadoop`.
 
 `SOLR_HADOOP_CREDENTIAL_PROVIDER_PATH`::
 The path to the credential store file.
@@ -190,12 +194,13 @@ set HADOOP_CREDSTORE_PASSWORD="credStorePass123"
 
 === Configure ZooKeeper
 
-NOTE: ZooKeeper does not support encrypted communication with clients like Solr. There are several related JIRA tickets where SSL support is being planned/worked on: https://issues.apache.org/jira/browse/ZOOKEEPER-235[ZOOKEEPER-235]; https://issues.apache.org/jira/browse/ZOOKEEPER-236[ZOOKEEPER-236]; https://issues.apache.org/jira/browse/ZOOKEEPER-1000[ZOOKEEPER-1000]; and https://issues.apache.org/jira/browse/ZOOKEEPER-2120[ZOOKEEPER-2120].
+NOTE: ZooKeeper does not support encrypted communication with clients like Solr.
+There are several related JIRA tickets where SSL support is being planned/worked on: https://issues.apache.org/jira/browse/ZOOKEEPER-235[ZOOKEEPER-235]; https://issues.apache.org/jira/browse/ZOOKEEPER-236[ZOOKEEPER-236]; https://issues.apache.org/jira/browse/ZOOKEEPER-1000[ZOOKEEPER-1000]; and https://issues.apache.org/jira/browse/ZOOKEEPER-2120[ZOOKEEPER-2120].
 
 After creating the keystore described above and before you start any SolrCloud nodes, you must configure your Solr cluster properties in ZooKeeper so that Solr nodes know to communicate via SSL.
 
 This section assumes you have created and started an external ZooKeeper.
-See <<setting-up-an-external-zookeeper-ensemble.adoc#,Setting Up an External ZooKeeper Ensemble>> for more information.
+See <<zookeeper-ensemble.adoc#,ZooKeeper Ensemble>> for more information.
 
 The `urlScheme` cluster-wide property needs to be set to `https` before any Solr node starts up.
 The examples below use the `zkcli` tool that comes with Solr to do this.
@@ -221,7 +226,8 @@ C:\> server\scripts\cloud-scripts\zkcli.bat -zkhost server1:2181,server2:2181,se
 =====
 --
 
-Be sure to use the correct `zkhost` value for your system. If you have set up your ZooKeeper ensemble to use a <<taking-solr-to-production.adoc#zookeeper-chroot,chroot for Solr>>, make sure to include it in the `zkhost` string, e.g., `-zkhost server1:2181,server2:2181,server3:2181/solr`.
+Be sure to use the correct `zkhost` value for your system.
+If you have set up your ZooKeeper ensemble to use a <<taking-solr-to-production.adoc#zookeeper-chroot,chroot for Solr>>, make sure to include it in the `zkhost` string, e.g., `-zkhost server1:2181,server2:2181,server3:2181/solr`.
 
 === Update Cluster Properties for Existing Collections
 
@@ -240,7 +246,7 @@ Once this and all other steps are complete, you can go ahead and start Solr.
 
 == Starting Solr After Enabling SSL
 
-=== Run Single Node Solr using SSL
+=== Start User-Managed Cluster or Single-Node Solr
 
 Start Solr using the Solr control script as shown in the examples below.
 Customize the values for the parameters shown as needed and add any used in your system.
@@ -266,11 +272,12 @@ C:\> bin\solr.cmd -p 8984
 ====
 --
 
-=== Run SolrCloud with SSL
+=== Start SolrCloud
 
-NOTE: If you have defined `ZK_HOST` in `solr.in.sh`/`solr.in.cmd` (see <<setting-up-an-external-zookeeper-ensemble#updating-solr-include-files,instructions>>) you can omit `-z <zk host string>` from all of the `bin/solr`/`bin\solr.cmd` commands below.
+NOTE: If you have defined `ZK_HOST` in `solr.in.sh`/`solr.in.cmd` (see <<zookeeper-ensemble#updating-solr-include-files,instructions>>) you can omit `-z <zk host string>` from all of the `bin/solr`/`bin\solr.cmd` commands below.
 
-Start each Solr node with the Solr control script as shown in the examples below. Customize the values for the parameters shown as necessary and add any used in your system.
+Start each Solr node with the Solr control script as shown in the examples below.
+Customize the values for the parameters shown as necessary and add any used in your system.
 
 If you created the SSL key without all DNS names or IP addresses on which Solr nodes run, you can tell Solr to skip hostname verification for inter-node communications by setting the `-Dsolr.ssl.checkPeerName=false` system property.
 
@@ -300,9 +307,12 @@ C:\> bin\solr.cmd -cloud -s cloud\node1 -z server1:2181,server2:2181,server3:218
 
 [IMPORTANT]
 ====
-curl on OS X Mavericks (10.9) has degraded SSL support. For more information and workarounds to allow one-way SSL, see https://curl.se/mail/archive-2013-10/0036.html. curl on OS X Yosemite (10.10) is improved - 2-way SSL is possible - see https://curl.se/mail/archive-2014-10/0053.html.
+curl on OS X Mavericks (10.9) has degraded SSL support.
+For more information and workarounds to allow one-way SSL, see https://curl.se/mail/archive-2013-10/0036.html.
+curl on OS X Yosemite (10.10) is improved - 2-way SSL is possible - see https://curl.se/mail/archive-2014-10/0053.html.
 
-The curl commands in the following sections will not work with the system `curl` on OS X Yosemite (10.10). Instead, the certificate supplied with the `-E` parameter must be in PKCS12 format, and the file supplied with the `--cacert` parameter must contain only the CA certificate, and no key (see <<Convert the Certificate and Key to PEM Format for Use with curl,above>> for instructions on creating this file):
+The curl commands in the following sections will not work with the system `curl` on OS X Yosemite (10.10).
+Instead, the certificate supplied with the `-E` parameter must be in PKCS12 format, and the file supplied with the `--cacert` parameter must contain only the CA certificate, and no key (see <<Convert the Certificate and Key to PEM Format for Use with curl,above>> for instructions on creating this file):
 
 [source,bash]
 $ curl -E solr-ssl.keystore.p12:secret --cacert solr-ssl.cacert.pem ...
@@ -379,14 +389,15 @@ Use `post.jar` to index some example documents to the SolrCloud collection creat
 
 [source,bash]
 ----
-cd example/exampledocs
+$ cd example/exampledocs
 
-java -Djavax.net.ssl.keyStorePassword=secret -Djavax.net.ssl.keyStore=../../server/etc/solr-ssl.keystore.p12 -Djavax.net.ssl.trustStore=../../server/etc/solr-ssl.keystore.p12 -Djavax.net.ssl.trustStorePassword=secret -Durl=https://localhost:8984/solr/mycollection/update -jar post.jar *.xml
+$ java -Djavax.net.ssl.keyStorePassword=secret -Djavax.net.ssl.keyStore=../../server/etc/solr-ssl.keystore.p12 -Djavax.net.ssl.trustStore=../../server/etc/solr-ssl.keystore.p12 -Djavax.net.ssl.trustStorePassword=secret -Durl=https://localhost:8984/solr/mycollection/update -jar post.jar *.xml
 ----
 
 === Query Using curl
 
-Use curl to query the SolrCloud collection created above, from a directory containing the PEM formatted certificate and key created above (e.g., `example/etc/`) - if you have not enabled client authentication (system property `-Djetty.ssl.clientAuth=true)`, then you can remove the `-E solr-ssl.pem:secret` option:
+Use curl to query the SolrCloud collection created above, from a directory containing the PEM formatted certificate and key created above (e.g., `example/etc/`).
+If you have not enabled client authentication (system property `-Djetty.ssl.clientAuth=true)`, then you can remove the `-E solr-ssl.pem:secret` option:
 
 [source,bash]
 ----
@@ -395,7 +406,8 @@ curl -E solr-ssl.pem:secret --cacert solr-ssl.pem "https://localhost:8984/solr/m
 
 === Index a Document using CloudSolrClient
 
-From a java client using SolrJ, index a document. In the code below, the `javax.net.ssl.*` system properties are set programmatically, but you could instead specify them on the java command line, as in the `post.jar` example above:
+From a java client using SolrJ, index a document.
+In the code below, the `javax.net.ssl.*` system properties are set programmatically, but you could instead specify them on the java command line, as in the `post.jar` example above:
 
 [source,java]
 ----
diff --git a/solr/solr-ref-guide/src/enhancing-queries.adoc b/solr/solr-ref-guide/src/enhancing-queries.adoc
new file mode 100644
index 0000000..7d34db1
--- /dev/null
+++ b/solr/solr-ref-guide/src/enhancing-queries.adoc
@@ -0,0 +1,58 @@
+= Enhancing Queries
+:page-children: spell-checking, \
+    suggester, \
+    morelikethis, \
+    query-re-ranking, \
+    learning-to-rank, \
+    tagger-handler, \
+    analytics, \
+    terms-component, \
+    term-vector-component, \
+    stats-component
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+[.lead]
+Solr provides many options for assisting users with their queries.
+
+These options allow you to show users alternate spellings for their search terms, or provide suggestions for terms while they type.
+
+Re-ranking provides an ability to show documents in an order based on a query that may be more complex than the user's query.
+This forms the basis of Solr's Learning to Rank functionality, which can re-rank documents based on a machine-learned model.
+
+The Tagger request handler provides basic named entity recognition functionality.
+
+Finally, if you want to understand the terms in your index or get statistics from terms in the index, those options are covered here also.
+
+****
+// This tags the below list so it can be used in the parent page section list
+// tag::queries-sections[]
+[cols="1,1",frame=none,grid=none,stripes=none]
+|===
+| <<spell-checking.adoc#,Spell Checking>>: Check user spelling of query terms.
+| <<suggester.adoc#,Suggester>>: Suggest query terms while the user types.
+| <<morelikethis.adoc#,MoreLikeThis>>: Get results similar to result documents.
+| <<query-re-ranking.adoc#,Query Re-Ranking>>: Re-rank top documents.
+| <<learning-to-rank.adoc#,Learning to Rank>>: Use machine learned ranking models.
+| <<tagger-handler.adoc#,Tagger Handler>>: Basic named entity tagging in text.
+| <<analytics.adoc#,Analytics Component>>: Compute complex analytics over a result set.
+| <<terms-component.adoc#,Terms Component>>: Access indexed terms and the documents that include them.
+| <<term-vector-component.adoc#,Term Vector Component>>: Term information about specific documents.
+| <<stats-component.adoc#,Stats Component>>: Get information from numeric fields within a document set.
+|===
+// end::queries-sections[]
+****
diff --git a/solr/solr-ref-guide/src/working-with-enum-fields.adoc b/solr/solr-ref-guide/src/enum-fields.adoc
similarity index 85%
rename from solr/solr-ref-guide/src/working-with-enum-fields.adoc
rename to solr/solr-ref-guide/src/enum-fields.adoc
index 8df2552..9e914b3 100644
--- a/solr/solr-ref-guide/src/working-with-enum-fields.adoc
+++ b/solr/solr-ref-guide/src/enum-fields.adoc
@@ -1,4 +1,4 @@
-= Working with Enum Fields
+= Enum Fields
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -16,7 +16,8 @@
 // specific language governing permissions and limitations
 // under the License.
 
-EnumFieldType allows defining a field whose values are a closed set, and the sort order is pre-determined but is not alphabetic nor numeric. Examples of this are severity lists, or risk definitions.
+The EnumFieldType allows defining a field whose values are a closed set, and the sort order is pre-determined but is not alphabetic nor numeric.
+Examples of this are severity lists, or risk definitions.
 
 .EnumField has been Deprecated
 [WARNING]
@@ -36,7 +37,8 @@ The EnumFieldType type definition is quite simple, as in this example defining f
 
 Besides the `name` and the `class`, which are common to all field types, this type also takes two additional parameters:
 
-`enumsConfig`:: the name of a configuration file that contains the `<enum/>` list of field values and their order that you wish to use with this field type. If a path to the file is not defined specified, the file should be in the `conf` directory for the collection.
+`enumsConfig`:: the name of a configuration file that contains the `<enum/>` list of field values and their order that you wish to use with this field type.
+If a path to the file is not defined specified, the file should be in the `conf` directory for the collection.
 `enumName`:: the name of the specific enumeration in the `enumsConfig` file to use for this type.
 
 Note that `docValues="true"` must be specified either in the EnumFieldType fieldType or field specification.
@@ -45,7 +47,8 @@ Note that `docValues="true"` must be specified either in the EnumFieldType field
 
 The file named with the `enumsConfig` parameter can contain multiple enumeration value lists with different names if there are multiple uses for enumerations in your Solr schema.
 
-In this example, there are two value lists defined. Each list is between `enum` opening and closing tags:
+In this example, there are two value lists defined.
+Each list is between `enum` opening and closing tags:
 
 [source,xml]
 ----
diff --git a/solr/solr-ref-guide/src/errata.adoc b/solr/solr-ref-guide/src/errata.adoc
deleted file mode 100644
index 3076af3..0000000
--- a/solr/solr-ref-guide/src/errata.adoc
+++ /dev/null
@@ -1,25 +0,0 @@
-= Errata
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-== Errata For This Documentation
-
-Any mistakes found in this documentation after its release will be listed below.
-
-=== Errata For Past Versions of This Documentation
-
-Any known mistakes in past releases of this documentation will be noted below.
diff --git a/solr/solr-ref-guide/src/exporting-result-sets.adoc b/solr/solr-ref-guide/src/exporting-result-sets.adoc
index a4da823..3c5e3a7 100644
--- a/solr/solr-ref-guide/src/exporting-result-sets.adoc
+++ b/solr/solr-ref-guide/src/exporting-result-sets.adoc
@@ -16,20 +16,21 @@
 // specific language governing permissions and limitations
 // under the License.
 
-
-It's possible to export fully sorted result sets using a special <<query-re-ranking.adoc#,rank query parser>> and <<response-writers.adoc#,response writer>> specifically designed to work together to handle scenarios that involve sorting and exporting millions of records.
+The `/export` request handler allows a fully sorted result set to be streamed out of Solr using a special <<query-re-ranking.adoc#,rank query parser>> and <<response-writers.adoc#,response writer>>.
+These have been specifically designed to work together to handle scenarios that involve sorting and exporting millions of records.
 
 This feature uses a stream sorting technique that begins to send records within milliseconds and continues to stream results until the entire result set has been sorted and exported.
 
-The cases where this functionality may be useful include: session analysis, distributed merge joins, time series roll-ups, aggregations on high cardinality fields, fully distributed field collapsing, and sort based stats.
+The cases where this functionality may be useful include: session analysis, distributed merge joins, time series roll-ups, aggregations on high cardinality fields, fully distributed field collapsing, and sort-based stats.
 
 == Field Requirements
 
-All the fields being sorted and exported must have docValues set to true. For more information, see the section on <<docvalues.adoc#,DocValues>>.
+All the fields being sorted and exported must have docValues set to `true`.
+For more information, see the section on <<docvalues.adoc#,DocValues>>.
 
 == The /export RequestHandler
 
-The `/export` request handler with the appropriate configuration is one of Solr's out-of-the-box request handlers - see <<implicit-requesthandlers.adoc#,Implicit RequestHandlers>> for more information.
+The `/export` request handler with the appropriate configuration is one of Solr's out-of-the-box request handlers - see <<implicit-requesthandlers.adoc#,Implicit Request Handlers>> for more information.
 
 Note that this request handler's properties are defined as "invariants", which means they cannot be overridden by other properties passed at another time (such as at query time).
 
@@ -37,11 +38,15 @@ Note that this request handler's properties are defined as "invariants", which m
 
 You can use `/export` to make requests to export the result set of a query.
 
-All queries must include `sort` and `fl` parameters, or the query will return an error. Filter queries are also supported.
+All queries must include `sort` and `fl` parameters, or the query will return an error.
+Filter queries are also supported.
 
-An optional parameter `batchSize` determines the size of the internal buffers for partial results. The default value is `30000` but users may want to specify smaller values to limit the memory use (at the cost of degraded performance) or higher values to improve export performance (the relationship is not linear and larger values don't bring proportionally larger performance increases).
+An optional parameter `batchSize` determines the size of the internal buffers for partial results.
+The default value is `30000` but users may want to specify smaller values to limit the memory use (at the cost of degraded performance) or higher values to improve export performance (the relationship is not linear and larger values don't bring proportionally larger performance increases).
 
-The supported response writers are `json` and `javabin`. For backward compatibility reasons `wt=xsort` is also supported as input, but `wt=xsort` behaves same as `wt=json`. The default output format is `json`.
+The supported response writers are `json` and `javabin`.
+For backward compatibility reasons `wt=xsort` is also supported as input, but `wt=xsort` behaves same as `wt=json`.
+The default output format is `json`.
 
 Here is an example of an export request of some indexed log data:
 
@@ -52,20 +57,28 @@ http://localhost:8983/solr/core_name/export?q=my-query&sort=severity+desc,timest
 
 === Specifying the Sort Criteria
 
-The `sort` property defines how documents will be sorted in the exported result set. Results can be sorted by any field that has a field type of int,long, float, double, string. The sort fields must be single valued fields.
+The `sort` property defines how documents will be sorted in the exported result set.
+Results can be sorted by any field that has a field type of int,long, float, double, string.
+The sort fields must be single valued fields.
 
-The export performance will get slower as you add more sort fields. If there is enough physical memory available outside of the JVM to load up the sort fields then the performance will be linearly slower with addition of sort fields.
+The export performance will get slower as you add more sort fields.
+If there is enough physical memory available outside of the JVM to load up the sort fields then the performance will be linearly slower with addition of sort fields.
 It can get worse otherwise.
 
 === Specifying the Field List
 
-The `fl` property defines the fields that will be exported with the result set. Any of the field types that can be sorted (i.e., int, long, float, double, string, date, boolean) can be used in the field list. The fields can be single or multi-valued. However, returning scores and wildcards are not supported at this time.
+The `fl` property defines the fields that will be exported with the result set.
+Any of the field types that can be sorted (i.e., int, long, float, double, string, date, boolean) can be used in the field list.
+The fields can be single or multi-valued.
+However, returning scores and wildcards are not supported at this time.
 
 === Specifying the Local Streaming Expression
 
 The optional `expr` property defines a <<streaming-expressions.adoc#,stream expression>> that allows documents to be processed locally before they are exported in the result set.
 
-Expressions have to use a special `input()` stream that represents original results from the `/export` handler. Output from the stream expression then becomes the output from the `/export` handler. The `&streamLocalOnly=true` flag is always set for this streaming expression.
+Expressions have to use a special `input()` stream that represents original results from the `/export` handler.
+Output from the stream expression then becomes the output from the `/export` handler.
+The `&streamLocalOnly=true` flag is always set for this streaming expression.
 
 Only stream <<stream-decorator-reference.adoc#,decorators>> and <<stream-evaluator-reference.adoc#,evaluators>> are supported in these expressions - using any of the <<stream-source-reference.adoc#,source>> expressions except for the pre-defined `input()` will result in an error.
 
diff --git a/solr/solr-ref-guide/src/working-with-external-files-and-processes.adoc b/solr/solr-ref-guide/src/external-files-processes.adoc
similarity index 72%
rename from solr/solr-ref-guide/src/working-with-external-files-and-processes.adoc
rename to solr/solr-ref-guide/src/external-files-processes.adoc
index 49dacce..7f44ee4 100644
--- a/solr/solr-ref-guide/src/working-with-external-files-and-processes.adoc
+++ b/solr/solr-ref-guide/src/external-files-processes.adoc
@@ -1,4 +1,4 @@
-= Working with External Files and Processes
+= External Files and Processes
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -16,16 +16,27 @@
 // specific language governing permissions and limitations
 // under the License.
 
+Solr supports storing field values in an external file with a field type called the `ExternalFileFieldType`.
+It can also consume a stream of tokens that have already undergone analysis with a field type called the `PreAnalyzedFieldType`.
+
 == The ExternalFileField Type
 
-The `ExternalFileField` type makes it possible to specify the values for a field in a file outside the Solr index. For such a field, the file contains mappings from a key field to the field value. Another way to think of this is that, instead of specifying the field in documents as they are indexed, Solr finds values for this field in the external file.
+The `ExternalFileField` type makes it possible to specify the values for a field in a file outside the Solr index.
+For such a field, the file contains mappings from a key field to the field value.
+Another way to think of this is that, instead of specifying the field in documents as they are indexed, Solr finds values for this field in the external file.
 
 [IMPORTANT]
 ====
-External fields are not searchable. They can be used only for function queries or display. For more information on function queries, see the section on <<function-queries.adoc#,Function Queries>>.
+External fields are not searchable.
+They can be used only for function queries or display.
+For more information on function queries, see the section on <<function-queries.adoc#,Function Queries>>.
 ====
 
-The `ExternalFileField` type is handy for cases where you want to update a particular field in many documents more often than you want to update the rest of the documents. For example, suppose you have implemented a document rank based on the number of views. You might want to update the rank of all the documents daily or hourly, while the rest of the contents of the documents might be updated much less frequently. Without `ExternalFileField`, you would need to update each document just  [...]
+The `ExternalFileField` type is handy for cases where you want to update a particular field in many documents more often than you want to update the rest of the documents.
+For example, suppose you have implemented a document rank based on the number of views.
+You might want to update the rank of all the documents daily or hourly, while the rest of the contents of the documents might be updated much less frequently.
+Without `ExternalFileField`, you would need to update each document just to change the rank.
+Using `ExternalFileField` is much more efficient because all document values for a particular field are stored in an external file that can be updated as frequently as you wish.
 
 In `schema.xml`, the definition of this field type might look like this:
 
@@ -34,18 +45,23 @@ In `schema.xml`, the definition of this field type might look like this:
 <fieldType name="entryRankFile" keyField="pkId" defVal="0" stored="false" indexed="false" class="solr.ExternalFileField"/>
 ----
 
-The `keyField` attribute defines the key that will be defined in the external file. It is usually the unique key for the index, but it doesn't need to be as long as the `keyField` can be used to identify documents in the index. A `defVal` defines a default value that will be used if there is no entry in the external file for a particular document.
+The `keyField` attribute defines the key that will be defined in the external file.
+It is usually the unique key for the index, but it doesn't need to be as long as the `keyField` can be used to identify documents in the index.
+A `defVal` defines a default value that will be used if there is no entry in the external file for a particular document.
 
 === Format of the External File
 
-The file itself is located in Solr's index directory, which by default is `$SOLR_HOME/data`. The name of the file should be `external_fieldname_` or `external_fieldname_.*`. For the example above, then, the file could be named `external_entryRankFile` or `external_entryRankFile.txt`.
+The file itself is located in Solr's index directory, which by default is `$SOLR_HOME/data`.
+The name of the file should be `external_fieldname_` or `external_fieldname_.*`. For the example above, then, the file could be named `external_entryRankFile` or `external_entryRankFile.txt`.
 
 [TIP]
 ====
-If any files using the name pattern `.*` (such as `.txt`) appear, the last (after being sorted by name) will be used and previous versions will be deleted. This behavior supports implementations on systems where one may not be able to overwrite a file (for example, on Windows, if the file is in use).
+If any files using the name pattern `.*` (such as `.txt`) appear, the last (after being sorted by name) will be used and previous versions will be deleted.
+This behavior supports implementations on systems where one may not be able to overwrite a file (for example, on Windows, if the file is in use).
 ====
 
-The file contains entries that map a key field, on the left of the equals sign, to a value, on the right. Here are a few example entries:
+The file contains entries that map a key field, on the left of the equals sign, to a value, on the right.
+Here are a few example entries:
 
 [source,text]
 ----
@@ -54,11 +70,13 @@ doc34=3.14159
 doc40=42
 ----
 
-The keys listed in this file do not need to be unique. The file does not need to be sorted, but Solr will be able to perform the lookup faster if it is.
+The keys listed in this file do not need to be unique.
+The file does not need to be sorted, but Solr will be able to perform the lookup faster if it is.
 
 === Reloading an External File
 
-It's possible to define an event listener to reload an external file when either a searcher is reloaded or when a new searcher is started. See the section <<query-settings-in-solrconfig.adoc#query-related-listeners,Query-Related Listeners>> for more information, but a sample definition in `solrconfig.xml` might look like this:
+It's possible to define an event listener to reload an external file when either a searcher is reloaded or when a new searcher is started.
+See the section <<caches-warming.adoc#query-related-listeners,Query-Related Listeners>> for more information, but a sample definition in `solrconfig.xml` might look like this:
 
 [source,xml]
 ----
@@ -68,16 +86,23 @@ It's possible to define an event listener to reload an external file when either
 
 == The PreAnalyzedField Type
 
-The `PreAnalyzedField` type provides a way to send to Solr serialized token streams, optionally with independent stored values of a field, and have this information stored and indexed without any additional text processing applied in Solr. This is useful if user wants to submit field content that was already processed by some existing external text processing pipeline (e.g., it has been tokenized, annotated, stemmed, synonyms inserted, etc.), while using all the rich attributes that Luce [...]
+The `PreAnalyzedField` type provides a way to send to Solr serialized token streams, optionally with independent stored values of a field, and have this information stored and indexed without any additional text processing applied in Solr.
+This is useful if user wants to submit field content that was already processed by some existing external text processing pipeline (e.g., it has been tokenized, annotated, stemmed, synonyms inserted, etc.), while using all the rich attributes that Lucene's TokenStream provides (per-token attributes).
 
-The serialization format is pluggable using implementations of PreAnalyzedParser interface. There are two out-of-the-box implementations:
+The serialization format is pluggable using implementations of PreAnalyzedParser interface.
+There are two out-of-the-box implementations:
 
-* <<JsonPreAnalyzedParser>>: as the name suggests, it parses content that uses JSON to represent field's content. This is the default parser to use if the field type is not configured otherwise.
+* <<JsonPreAnalyzedParser>>: as the name suggests, it parses content that uses JSON to represent field's content.
+This is the default parser to use if the field type is not configured otherwise.
 * <<SimplePreAnalyzedParser>>: uses a simple strict plain text format, which in some situations may be easier to create than JSON.
 
-There is only one configuration parameter, `parserImpl`. The value of this parameter should be a fully qualified class name of a class that implements PreAnalyzedParser interface. The default value of this parameter is `org.apache.solr.schema.JsonPreAnalyzedParser`.
+There is only one configuration parameter, `parserImpl`.
+The value of this parameter should be a fully qualified class name of a class that implements PreAnalyzedParser interface.
+The default value of this parameter is `org.apache.solr.schema.JsonPreAnalyzedParser`.
 
-By default, the query-time analyzer for fields of this type will be the same as the index-time analyzer, which expects serialized pre-analyzed text. You must add a query type analyzer to your fieldType in order to perform analysis on non-pre-analyzed queries. In the example below, the index-time analyzer expects the default JSON serialization format, and the query-time analyzer will employ StandardTokenizer/LowerCaseFilter:
+By default, the query-time analyzer for fields of this type will be the same as the index-time analyzer, which expects serialized pre-analyzed text.
+You must add a query type analyzer to your fieldType in order to perform analysis on non-pre-analyzed queries.
+In the example below, the index-time analyzer expects the default JSON serialization format, and the query-time analyzer will employ StandardTokenizer/LowerCaseFilter:
 
 [source,xml]
 ----
@@ -91,11 +116,10 @@ By default, the query-time analyzer for fields of this type will be the same as
 
 === JsonPreAnalyzedParser
 
-This is the default serialization format used by PreAnalyzedField type. It uses a top-level JSON map with the following keys:
-
-// TODO: Change column width to %autowidth.spread when https://github.com/asciidoctor/asciidoctor-pdf/issues/599 is fixed
+This is the default serialization format used by PreAnalyzedField type.
+It uses a top-level JSON map with the following keys:
 
-[cols="20,60,20",options="header"]
+[%autowidth.stretch,options="header"]
 |===
 |Key |Description |Required
 |`v` |Version key. Currently the supported version is `1`. |required
@@ -108,11 +132,10 @@ Any other top-level key is silently ignored.
 
 ==== Token Stream Serialization
 
-The token stream is expressed as a JSON list of JSON maps. The map for each token consists of the following keys and values:
+The token stream is expressed as a JSON list of JSON maps.
+The map for each token consists of the following keys and values:
 
-// TODO: Change column width to %autowidth.spread when https://github.com/asciidoctor/asciidoctor-pdf/issues/599 is fixed
-
-[cols="10,20,20,30,20",options="header"]
+[%autowidth.stretch,options="header"]
 |===
 |Key |Description |Lucene Attribute |Value |Required?
 |`t` |token |{lucene-javadocs}/core/org/apache/lucene/analysis/tokenattributes/CharTermAttribute.html[CharTermAttribute] |UTF-8 string representing the current token |required
@@ -183,9 +206,7 @@ Please note that Unicode sequences (e.g., `\u0001`) are not supported.
 
 The following token attributes are supported, and identified with short symbolic names:
 
-// TODO: Change column width to %autowidth.spread when https://github.com/asciidoctor/asciidoctor-pdf/issues/599 is fixed
-
-[cols="10,30,30,30",options="header"]
+[%autowidth.stretch,options="header"]
 |===
 |Name |Description |Lucene attribute |Value format
 |`i` |position increment |{lucene-javadocs}/core/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.html[PositionIncrementAttribute] |integer
diff --git a/solr/solr-ref-guide/src/faceting.adoc b/solr/solr-ref-guide/src/faceting.adoc
index 258b100..81c7894 100644
--- a/solr/solr-ref-guide/src/faceting.adoc
+++ b/solr/solr-ref-guide/src/faceting.adoc
... 25985 lines suppressed ...