You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@solr.apache.org by ct...@apache.org on 2021/07/01 16:12:33 UTC

[solr] branch main updated: SOLR-14444: Sentences start on newlines (#199)

This is an automated email from the ASF dual-hosted git repository.

ctargett pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/solr.git


The following commit(s) were added to refs/heads/main by this push:
     new 97efead  SOLR-14444: Sentences start on newlines (#199)
97efead is described below

commit 97efeadd02a4720f98a40af0546eb29eb7a82827
Author: Cassandra Targett <ct...@apache.org>
AuthorDate: Thu Jul 1 11:12:25 2021 -0500

    SOLR-14444: Sentences start on newlines (#199)
---
 solr/solr-ref-guide/src/about-this-guide.adoc      |  38 ++-
 solr/solr-ref-guide/src/aliases.adoc               |   4 +-
 .../src/analytics-expression-sources.adoc          |   9 +-
 .../src/analytics-mapping-functions.adoc           |   6 +-
 .../src/analytics-reduction-functions.adoc         |  18 +-
 solr/solr-ref-guide/src/analytics.adoc             |  63 ++--
 solr/solr-ref-guide/src/analyzers.adoc             |   3 +-
 solr/solr-ref-guide/src/audit-logging.adoc         |   6 +-
 .../authentication-and-authorization-plugins.adoc  |   3 +-
 solr/solr-ref-guide/src/backup-restore.adoc        |   9 +-
 .../src/block-join-query-parser.adoc               |   3 +-
 solr/solr-ref-guide/src/circuit-breakers.adoc      |  65 ++--
 .../src/cluster-node-management.adoc               |   6 +-
 solr/solr-ref-guide/src/cluster-plugins.adoc       | 125 +++----
 solr/solr-ref-guide/src/codec-factory.adoc         |   5 +-
 .../src/collapse-and-expand-results.adoc           |  70 ++--
 solr/solr-ref-guide/src/collection-management.adoc |  12 +-
 solr/solr-ref-guide/src/collections-api.adoc       |  17 +-
 .../src/commits-transaction-logs.adoc              |   5 +-
 .../src/common-query-parameters.adoc               |   9 +-
 .../solr-ref-guide/src/computational-geometry.adoc |  68 ++--
 solr/solr-ref-guide/src/config-api.adoc            |   3 +-
 solr/solr-ref-guide/src/configsets-api.adoc        |   3 +-
 solr/solr-ref-guide/src/configuring-solr-xml.adoc  |   3 +-
 solr/solr-ref-guide/src/copy-fields.adoc           |   3 +-
 solr/solr-ref-guide/src/coreadmin-api.adoc         |   3 +-
 solr/solr-ref-guide/src/curve-fitting.adoc         | 105 +++---
 solr/solr-ref-guide/src/date-formatting-math.adoc  |  12 +-
 solr/solr-ref-guide/src/de-duplication.adoc        |   3 +-
 solr/solr-ref-guide/src/dismax-query-parser.adoc   |  77 +++--
 solr/solr-ref-guide/src/docker-faq.adoc            |  65 ++--
 solr/solr-ref-guide/src/docker-networking.adoc     |  10 +-
 solr/solr-ref-guide/src/document-analysis.adoc     |   8 +-
 solr/solr-ref-guide/src/document-transformers.adoc |  56 ++-
 .../src/documents-fields-schema-design.adoc        |   5 +-
 solr/solr-ref-guide/src/documents-screen.adoc      |  29 +-
 solr/solr-ref-guide/src/docvalues.adoc             |  47 ++-
 solr/solr-ref-guide/src/dsp.adoc                   | 231 ++++++-------
 solr/solr-ref-guide/src/edismax-query-parser.adoc  |   9 +-
 .../src/external-files-processes.adoc              |   6 +-
 solr/solr-ref-guide/src/faceting.adoc              | 207 +++++++----
 .../src/field-properties-by-use-case.adoc          |  15 +-
 .../src/field-type-definitions-and-properties.adoc |  72 ++--
 solr/solr-ref-guide/src/filters.adoc               | 380 +++++++++++++++------
 solr/solr-ref-guide/src/function-queries.adoc      | 160 ++++++---
 solr/solr-ref-guide/src/graph-traversal.adoc       | 238 +++++++++----
 solr/solr-ref-guide/src/graph.adoc                 | 186 ++++------
 .../src/hadoop-authentication-plugin.adoc          |   3 +-
 solr/solr-ref-guide/src/highlighting.adoc          | 215 ++++++++----
 .../src/implicit-requesthandlers.adoc              |   6 +-
 solr/solr-ref-guide/src/index-location-format.adoc |  14 +-
 .../solr-ref-guide/src/index-segments-merging.adoc |   8 +-
 solr/solr-ref-guide/src/index.adoc                 |   3 +-
 .../src/indexing-data-operations.adoc              |   6 +-
 .../src/indexing-nested-documents.adoc             |  12 +-
 solr/solr-ref-guide/src/indexing-with-tika.adoc    | 119 +++++--
 .../src/indexing-with-update-handlers.adoc         | 126 ++++---
 solr/solr-ref-guide/src/indexupgrader-tool.adoc    |  13 +-
 solr/solr-ref-guide/src/initparams.adoc            |   3 +-
 solr/solr-ref-guide/src/installing-solr.adoc       |  73 ++--
 solr/solr-ref-guide/src/jmx-with-solr.adoc         |  15 +-
 solr/solr-ref-guide/src/join-query-parser.adoc     |  43 ++-
 solr/solr-ref-guide/src/json-facet-api.adoc        |  78 +++--
 .../src/json-faceting-domain-changes.adoc          |  35 +-
 solr/solr-ref-guide/src/json-query-dsl.adoc        |  37 +-
 solr/solr-ref-guide/src/jvm-settings.adoc          |  40 ++-
 .../src/jwt-authentication-plugin.adoc             |  17 +-
 .../src/kerberos-authentication-plugin.adoc        |   9 +-
 solr/solr-ref-guide/src/language-analysis.adoc     | 114 ++++---
 solr/solr-ref-guide/src/language-detection.adoc    |  12 +-
 solr/solr-ref-guide/src/learning-to-rank.adoc      |  42 ++-
 solr/solr-ref-guide/src/loading.adoc               |  92 +++--
 solr/solr-ref-guide/src/logs.adoc                  |  99 ++----
 solr/solr-ref-guide/src/luke-request-handler.adoc  |  18 +-
 solr/solr-ref-guide/src/machine-learning.adoc      | 329 +++++++-----------
 .../src/major-changes-in-solr-8.adoc               |   3 +-
 solr/solr-ref-guide/src/managed-resources.adoc     |  12 +-
 solr/solr-ref-guide/src/metrics-reporting.adoc     |  21 +-
 solr/solr-ref-guide/src/numerical-analysis.adoc    |  16 +-
 .../src/package-manager-internals.adoc             |  53 ++-
 solr/solr-ref-guide/src/package-manager.adoc       |  26 +-
 solr/solr-ref-guide/src/pagination-of-results.adoc |   3 +-
 .../src/partial-document-updates.adoc              |  12 +-
 .../src/performance-statistics-reference.adoc      |  37 +-
 solr/solr-ref-guide/src/ping.adoc                  |   6 +-
 solr/solr-ref-guide/src/plugins-stats-screen.adoc  |  10 +-
 solr/solr-ref-guide/src/post-tool.adoc             |  13 +-
 .../src/probability-distributions.adoc             | 168 ++++-----
 solr/solr-ref-guide/src/query-re-ranking.adoc      |   3 +-
 solr/solr-ref-guide/src/query-screen.adoc          |  59 +++-
 .../src/query-syntax-and-parsers.adoc              |   3 +-
 solr/solr-ref-guide/src/rate-limiters.adoc         |  68 ++--
 solr/solr-ref-guide/src/regression.adoc            |  96 +++---
 solr/solr-ref-guide/src/reindexing.adoc            |  60 +++-
 solr/solr-ref-guide/src/relevance.adoc             |  28 +-
 .../src/replica-placement-plugins.adoc             | 148 +++-----
 .../solr-ref-guide/src/request-parameters-api.adoc |   3 +-
 solr/solr-ref-guide/src/response-writers.adoc      |   3 +-
 solr/solr-ref-guide/src/result-clustering.adoc     |  22 +-
 solr/solr-ref-guide/src/result-grouping.adoc       |   3 +-
 solr/solr-ref-guide/src/scalar-math.adoc           |  33 +-
 solr/solr-ref-guide/src/schema-browser-screen.adoc |  22 +-
 solr/solr-ref-guide/src/schemaless-mode.adoc       |  62 +++-
 .../src/script-update-processor.adoc               |  52 ++-
 solr/solr-ref-guide/src/search-sample.adoc         | 133 +++-----
 solr/solr-ref-guide/src/shard-management.adoc      | 111 +++---
 solr/solr-ref-guide/src/simulations.adoc           | 100 ++----
 solr/solr-ref-guide/src/solr-admin-ui.adoc         |   3 +-
 .../src/solr-control-script-reference.adoc         |  21 +-
 solr/solr-ref-guide/src/solr-glossary.adoc         |   4 +-
 solr/solr-ref-guide/src/solr-in-docker.adoc        |  67 ++--
 solr/solr-ref-guide/src/solr-on-hdfs.adoc          |   3 +-
 solr/solr-ref-guide/src/solr-plugins.adoc          |   7 +-
 solr/solr-ref-guide/src/solr-tutorial.adoc         |   3 +-
 solr/solr-ref-guide/src/solr-upgrade-notes.adoc    | 142 +++++---
 .../src/solrcloud-distributed-requests.adoc        |  22 +-
 .../solrcloud-recoveries-and-write-tolerance.adoc  |  32 +-
 .../src/solrcloud-shards-indexing.adoc             | 115 +++++--
 .../solrcloud-with-legacy-configuration-files.adoc |  11 +-
 solr/solr-ref-guide/src/solrj.adoc                 |  18 +-
 solr/solr-ref-guide/src/spatial-search.adoc        |  63 ++--
 solr/solr-ref-guide/src/spell-checking.adoc        | 163 ++++++---
 solr/solr-ref-guide/src/standard-query-parser.adoc | 144 +++++---
 solr/solr-ref-guide/src/statistics.adoc            | 280 ++++++---------
 solr/solr-ref-guide/src/stats-component.adoc       |   6 +-
 .../src/stream-decorator-reference.adoc            |  15 +-
 .../src/stream-evaluator-reference.adoc            |   9 +-
 solr/solr-ref-guide/src/stream-screen.adoc         |  11 +-
 .../src/stream-source-reference.adoc               | 249 ++++++++++----
 solr/solr-ref-guide/src/streaming-expressions.adoc |   3 +-
 solr/solr-ref-guide/src/suggester.adoc             | 217 ++++++++----
 solr/solr-ref-guide/src/system-requirements.adoc   |  11 +-
 solr/solr-ref-guide/src/tagger-handler.adoc        |  36 +-
 .../src/taking-solr-to-production.adoc             |  12 +-
 solr/solr-ref-guide/src/task-management.adoc       |   9 +-
 solr/solr-ref-guide/src/term-vector-component.adoc |  24 +-
 solr/solr-ref-guide/src/term-vectors.adoc          |  38 +--
 solr/solr-ref-guide/src/terms-component.adoc       |  68 ++--
 solr/solr-ref-guide/src/thread-dump.adoc           |   3 +-
 solr/solr-ref-guide/src/time-series.adoc           | 190 ++++-------
 solr/solr-ref-guide/src/tokenizers.adoc            |  67 ++--
 solr/solr-ref-guide/src/transform.adoc             |  78 ++---
 .../src/transforming-and-indexing-custom-json.adoc |  74 ++--
 solr/solr-ref-guide/src/tutorial-aws.adoc          |  21 +-
 solr/solr-ref-guide/src/tutorial-diy.adoc          |   9 +-
 solr/solr-ref-guide/src/tutorial-films.adoc        |  12 +-
 solr/solr-ref-guide/src/tutorial-solrcloud.adoc    |  50 ++-
 solr/solr-ref-guide/src/tutorial-techproducts.adoc |   6 +-
 .../src/update-request-processors.adoc             |  12 +-
 .../src/upgrading-a-solr-cluster.adoc              |   3 +-
 .../src/user-managed-distributed-search.adoc       |  19 +-
 .../src/user-managed-index-replication.adoc        |  16 +-
 solr/solr-ref-guide/src/v2-api.adoc                |   3 +-
 solr/solr-ref-guide/src/variables.adoc             |  44 +--
 .../src/zookeeper-access-control.adoc              |   3 +-
 solr/solr-ref-guide/src/zookeeper-ensemble.adoc    |  10 +-
 .../src/zookeeper-file-management.adoc             |  10 +-
 solr/solr-ref-guide/src/zookeeper-utilities.adoc   |  22 +-
 158 files changed, 4693 insertions(+), 3215 deletions(-)

diff --git a/solr/solr-ref-guide/src/about-this-guide.adoc b/solr/solr-ref-guide/src/about-this-guide.adoc
index 4f6a71a..00d243b 100644
--- a/solr/solr-ref-guide/src/about-this-guide.adoc
+++ b/solr/solr-ref-guide/src/about-this-guide.adoc
@@ -20,15 +20,20 @@ This guide describes all of the important features and functions of Apache Solr.
 
 Solr is free to download from http://solr.apache.org/.
 
-Designed to provide high-level documentation, this guide is intended to be more encyclopedic and less of a cookbook. It is structured to address a broad spectrum of needs, ranging from new developers getting started to well-experienced developers extending their application or troubleshooting. It will be of use at any point in the application life cycle, for whenever you need authoritative information about Solr.
+Designed to provide high-level documentation, this guide is intended to be more encyclopedic and less of a cookbook.
+It is structured to address a broad spectrum of needs, ranging from new developers getting started to well-experienced developers extending their application or troubleshooting.
+It will be of use at any point in the application life cycle, for whenever you need authoritative information about Solr.
 
-The material as presented assumes that you are familiar with some basic search concepts and that you can read XML. It does not assume that you are a Java programmer, although knowledge of Java is helpful when working directly with Lucene or when developing custom extensions to a Lucene/Solr installation.
+The material as presented assumes that you are familiar with some basic search concepts and that you can read XML.
+It does not assume that you are a Java programmer, although knowledge of Java is helpful when working directly with Lucene or when developing custom extensions to a Lucene/Solr installation.
 
 == Hosts and Port Examples
 
-The default port when running Solr is 8983. The samples, URLs and screenshots in this guide may show different ports, because the port number that Solr uses is configurable.
+The default port when running Solr is 8983.
+The samples, URLs and screenshots in this guide may show different ports, because the port number that Solr uses is configurable.
 
-If you have not customized your installation of Solr, please make sure that you use port 8983 when following the examples, or configure your own installation to use the port numbers shown in the examples. For information about configuring port numbers, see the section <<monitoring-solr.adoc#,Monitoring Solr>>.
+If you have not customized your installation of Solr, please make sure that you use port 8983 when following the examples, or configure your own installation to use the port numbers shown in the examples.
+For information about configuring port numbers, see the section <<monitoring-solr.adoc#,Monitoring Solr>>.
 
 Similarly, URL examples use `localhost` throughout; if you are accessing Solr from a location remote to the server hosting Solr, replace `localhost` with the proper domain or IP where Solr is running.
 
@@ -36,7 +41,11 @@ For example, we might provide a sample query like:
 
 `\http://localhost:8983/solr/gettingstarted/select?q=brown+cow`
 
-There are several items in this URL you might need to change locally. First, if your server is running at "www.example.com", you'll replace "localhost" with the proper domain. If you aren't using port 8983, you'll replace that also. Finally, you'll want to replace "gettingstarted" (the collection or core name) with the proper one in use in your implementation. The URL would then become:
+There are several items in this URL you might need to change locally.
+First, if your server is running at "www.example.com", you'll replace "localhost" with the proper domain.
+If you aren't using port 8983, you'll replace that also.
+Finally, you'll want to replace "gettingstarted" (the collection or core name) with the proper one in use in your implementation.
+The URL would then become:
 
 `\http://www.example.com/solr/mycollection/select?q=brown+cow`
 
@@ -44,27 +53,34 @@ There are several items in this URL you might need to change locally. First, if
 
 Path information is given relative to `solr.home`, which is the location under the main Solr installation where Solr's collections and their `conf` and `data` directories are stored.
 
-In many cases, this is is in the `server/solr` directory of your installation. However, there can be exceptions, particularly if your installation has customized this.
+In many cases, this is is in the `server/solr` directory of your installation.
+However, there can be exceptions, particularly if your installation has customized this.
 
-In several cases of this Guide, our examples are built from the the "techproducts" example (i.e., you have started Solr with the command `bin/solr -e techproducts`). In this case, `solr.home` will be a sub-directory of the `example/` directory created for you automatically.
+In several cases of this Guide, our examples are built from the the "techproducts" example (i.e., you have started Solr with the command `bin/solr -e techproducts`).
+In this case, `solr.home` will be a sub-directory of the `example/` directory created for you automatically.
 
 See also the section <<configuration-files.adoc#solr-home,Solr Home>> for further details on what is contained in this directory.
 
 == API Examples
 
-Solr has two styles of APIs that currently co-exist. The first has grown somewhat organically as Solr has developed over time, but the second, referred to as the "V2 API", redesigns many of the original APIs with a modernized and self-documenting API interface.
+Solr has two styles of APIs that currently co-exist.
+The first has grown somewhat organically as Solr has developed over time, but the second, referred to as the "V2 API", redesigns many of the original APIs with a modernized and self-documenting API interface.
 
-In many cases, but not all, the parameters and outputs of API calls are the same between the two styles. In all cases the paths and endpoints used are different.
+In many cases, but not all, the parameters and outputs of API calls are the same between the two styles.
+In all cases the paths and endpoints used are different.
 
 Throughout this Guide, we have added examples of both styles with sections labeled "V1 API" and "V2 API". As of the 7.2 version of this Guide, these examples are not yet complete - more coverage will be added as future versions of the Guide are released.
 
 The section <<v2-api.adoc#,V2 API>> provides more information about how to work with the new API structure, including how to disable it if you choose to do so.
 
-All APIs return a response header that includes the status of the request and the time to process it. Some APIs will also include the parameters used for the request. Many of the examples in this Guide omit this header information, which you can do locally by adding the parameter `omitHeader=true` to any request.
+All APIs return a response header that includes the status of the request and the time to process it.
+Some APIs will also include the parameters used for the request.
+Many of the examples in this Guide omit this header information, which you can do locally by adding the parameter `omitHeader=true` to any request.
 
 == Special Inline Notes
 
-Special notes are included throughout these pages. There are several types of notes:
+Special notes are included throughout these pages.
+There are several types of notes:
 
 NOTE: Information blocks provide additional information that's useful for you to know.
 
diff --git a/solr/solr-ref-guide/src/aliases.adoc b/solr/solr-ref-guide/src/aliases.adoc
index e30d712..7a2c512 100644
--- a/solr/solr-ref-guide/src/aliases.adoc
+++ b/solr/solr-ref-guide/src/aliases.adoc
@@ -149,8 +149,8 @@ First you create a category routed alias using the <<alias-management.adoc#creat
 Most of the settings are editable at a later time using the <<alias-management.adoc#aliasprop,ALIASPROP>> command.
 
 The alias will be created with a special place-holder collection which will always be named `myAlias\__CRA__NEW_CATEGORY_ROUTED_ALIAS_WAITING_FOR_DATA\__TEMP`.
-The first document indexed into the CRA will create a second collection named `myAlias__CRA__foo` (for a routed field value of `foo`). The second document
- indexed will cause the temporary place holder collection to be deleted.
+The first document indexed into the CRA will create a second collection named `myAlias__CRA__foo` (for a routed field value of `foo`).
+The second document indexed will cause the temporary place holder collection to be deleted.
 Thereafter collections will be created whenever a new value for the field is encountered.
 
 CAUTION: To guard against runaway collection creation options for limiting the total number of categories, and for rejecting values that don't match, a regular expression parameter is provided (see <<alias-management.adoc#category-routed-alias-parameters,Category Routed Alias Parameters>> for details).
diff --git a/solr/solr-ref-guide/src/analytics-expression-sources.adoc b/solr/solr-ref-guide/src/analytics-expression-sources.adoc
index 4096afa..8e82344 100644
--- a/solr/solr-ref-guide/src/analytics-expression-sources.adoc
+++ b/solr/solr-ref-guide/src/analytics-expression-sources.adoc
@@ -60,7 +60,8 @@ In order to save duplicates, you must use PointField types.
 
 == Constants
 
-Constants can be included in expressions to use along side fields and functions. The available constants are shown below.
+Constants can be included in expressions to use along side fields and functions.
+The available constants are shown below.
 Constants do not need to be surrounded by any function to define them, they can be used exactly like fields in an expression.
 
 === Strings
@@ -76,14 +77,16 @@ There are two possible ways of specifying constant strings, as shown below.
 
 === Dates
 
-Dates can be specified in the same way as they are in Solr queries. Just use ISO-8601 format.
+Dates can be specified in the same way as they are in Solr queries.
+Just use ISO-8601 format.
 For more information, refer to the <<date-formatting-math.adoc#,Working with Dates>> section.
 
 * `2017-07-17T19:35:08Z`
 
 === Numeric
 
-Any non-decimal number will be read as an integer, or as a long if it is too large for an integer. All decimal numbers will be read as doubles.
+Any non-decimal number will be read as an integer, or as a long if it is too large for an integer.
+All decimal numbers will be read as doubles.
 
 * `-123421`: Integer
 * `800000000000`: Long
diff --git a/solr/solr-ref-guide/src/analytics-mapping-functions.adoc b/solr/solr-ref-guide/src/analytics-mapping-functions.adoc
index debd9a5..ab4367c 100644
--- a/solr/solr-ref-guide/src/analytics-mapping-functions.adoc
+++ b/solr/solr-ref-guide/src/analytics-mapping-functions.adoc
@@ -181,7 +181,8 @@ Checks whether any value(s) exist for the expression.
 == Comparison
 
 === Equality
-Checks whether two expressions' values are equal. The parameters must be the same type, after implicit casting.
+Checks whether two expressions' values are equal.
+The parameters must be the same type, after implicit casting.
 
 `equal(< _Single_ T >, < _Single_ T >)` => `< _Single Bool_ >`::
     * `equal(F, F)` => `T`
@@ -316,7 +317,8 @@ Explicitly converts the values of a `String` or `Long` expression into `Dates`.
 
 [[analytics-date-math]]
 === Date Math
-Compute the given date math strings for the values of a `Date` expression. The date math strings *must* be <<analytics-expression-sources.adoc#strings, constant>>.
+Compute the given date math strings for the values of a `Date` expression.
+The date math strings *must* be <<analytics-expression-sources.adoc#strings, constant>>.
 
 `date_math(< _Date_ >, < _Constant String_ >...)` => `< _Date_ >`::
     * `date_math(1800-04-15, '+1DAY', '-1MONTH')` => `1800-03-16`
diff --git a/solr/solr-ref-guide/src/analytics-reduction-functions.adoc b/solr/solr-ref-guide/src/analytics-reduction-functions.adoc
index 83c4093..b6f57d8 100644
--- a/solr/solr-ref-guide/src/analytics-reduction-functions.adoc
+++ b/solr/solr-ref-guide/src/analytics-reduction-functions.adoc
@@ -27,7 +27,8 @@ These can be combined using mapping functions to implement more complex function
 == Counting Reductions
 
 === Count
-The number of existing values for an expression. For single-valued expressions, this is equivalent to `docCount`.
+The number of existing values for an expression.
+For single-valued expressions, this is equivalent to `docCount`.
 If no expression is given, the number of matching documents is returned.
 
 `count()` => `< _Single Long_ >`
@@ -35,7 +36,8 @@ If no expression is given, the number of matching documents is returned.
 `count(< T >)` => `< _Single Long_ >`
 
 === Doc Count
-The number of documents for which an expression has existing values. For single-valued expressions, this is equivalent to `count`.
+The number of documents for which an expression has existing values.
+For single-valued expressions, this is equivalent to `count`.
 If no expression is given, the number of matching documents is returned.
 
 `doc_count()` => `< _Single Long_ >`
@@ -49,7 +51,8 @@ The number of documents for which an expression has no existing value.
 
 [[analytics-unique]]
 === Unique
-The number of unique values for an expression. This function accepts `Numeric`, `Date` and `String` expressions.
+The number of unique values for an expression.
+This function accepts `Numeric`, `Date` and `String` expressions.
 
 `unique(< T >)` => `< _Single Long_ >`
 
@@ -85,17 +88,20 @@ NOTE: The expressions must satisfy the rules for `mult` function parameters.
 == Ordering Reductions
 
 === Minimum
-Returns the minimum value for the expression. This function accepts `Numeric`, `Date` and `String` expressions.
+Returns the minimum value for the expression.
+This function accepts `Numeric`, `Date` and `String` expressions.
 
 `min(< T >)` => `< _Single_ T >`
 
 === Maximum
-Returns the maximum value for the expression. This function accepts `Numeric`, `Date` and `String` expressions.
+Returns the maximum value for the expression.
+This function accepts `Numeric`, `Date` and `String` expressions.
 
 `max(< T >)` => `< _Single_ T >`
 
 === Median
-Returns the median of all values for the expression. This function accepts `Numeric` and `Date` expressions.
+Returns the median of all values for the expression.
+This function accepts `Numeric` and `Date` expressions.
 
 `median(< T >)` => `< _Single_ T >`
 
diff --git a/solr/solr-ref-guide/src/analytics.adoc b/solr/solr-ref-guide/src/analytics.adoc
index 6bcbe96..bfae49d 100644
--- a/solr/solr-ref-guide/src/analytics.adoc
+++ b/solr/solr-ref-guide/src/analytics.adoc
@@ -32,9 +32,12 @@ Since the Analytics framework is a _search component_, it must be declared as su
 For distributed analytics requests over cloud collections, the component uses the `AnalyticsHandler` strictly for inter-shard communication.
 The Analytics Handler should not be used by users to submit analytics requests.
 
-To use the Analytics Component, the first step is to install this contrib module's plugins into Solr -- see the <<solr-plugins.adoc#installing-plugins,Solr Plugins>> section on how to do this. Note: Method with `<lib/>` directive doesn't work. Instead copy `${solr.install.dir}/dist/solr-analytics-x.x.x.jar` to `${solr.install.dir}/server/solr-webapp/webapp/WEB-INF/lib/`, as described in the <<libs.adoc#lib-directories,lib directories documentation>>.
+To use the Analytics Component, the first step is to install this contrib module's plugins into Solr -- see the <<solr-plugins.adoc#installing-plugins,Solr Plugins>> section on how to do this.
+Note: Method with `<lib/>` directive doesn't work.
+Instead copy `${solr.install.dir}/dist/solr-analytics-x.x.x.jar` to `${solr.install.dir}/server/solr-webapp/webapp/WEB-INF/lib/`, as described in the <<libs.adoc#lib-directories,lib directories documentation>>.
 
-Next you need to register the request handler and search component. Add the following lines to `solrconfig.xml`, near the defintions for other request handlers:
+Next you need to register the request handler and search component.
+Add the following lines to `solrconfig.xml`, near the defintions for other request handlers:
 
 [source,xml]
 .solrconfig.xml
@@ -75,11 +78,14 @@ curl --data-binary 'analytics={
 There are 3 main parts of any analytics request:
 
 Expressions::
-A list of calculations to perform over the entire result set. Expressions aggregate the search results into a single value to return.
-This list is entirely independent of the expressions defined in each of the groupings. Find out more about them in the section <<Expressions>>.
+A list of calculations to perform over the entire result set.
+Expressions aggregate the search results into a single value to return.
+This list is entirely independent of the expressions defined in each of the groupings.
+Find out more about them in the section <<Expressions>>.
 
 Functions::
-One or more <<variable-functions, Variable Functions>> to be used throughout the rest of the request. These are essentially lambda functions and can be combined in a number of ways.
+One or more <<variable-functions, Variable Functions>> to be used throughout the rest of the request.
+These are essentially lambda functions and can be combined in a number of ways.
 These functions for the expressions defined in `expressions` as well as `groupings`.
 
 Groupings::
@@ -144,13 +150,15 @@ The `functions` parameter is always optional.
 
 == Expressions
 
-Expressions are the way to request pieces of information from the analytics component. These are the statistical expressions that you want computed and returned in your response.
+Expressions are the way to request pieces of information from the analytics component.
+These are the statistical expressions that you want computed and returned in your response.
 
 === Constructing an Expression
 
 ==== Expression Components
 
-An expression is built using fields, constants, mapping functions and reduction functions. The ways that these can be defined are described below.
+An expression is built using fields, constants, mapping functions and reduction functions.
+The ways that these can be defined are described below.
 
 Sources::
 * Constants: The values defined in the expression.
@@ -203,7 +211,8 @@ With the above definitions and ordering, an example expression can be broken up
 [source,bash]
 div(sum(a,fill_missing(b,0)),add(10.5,count(mult(a,c)))))
 
-As a whole, this is a reduced mapping function. The `div` function is a reduced mapping function since it is a <<analytics-mapping-functions.adoc#division,provided mapping function>> and has reduced arguments.
+As a whole, this is a reduced mapping function.
+The `div` function is a reduced mapping function since it is a <<analytics-mapping-functions.adoc#division,provided mapping function>> and has reduced arguments.
 
 If we break down the expression further:
 
@@ -227,10 +236,12 @@ If we break down the expression further:
 
 === Expression Cardinality (Multi-Valued and Single-Valued)
 
-The root of all multi-valued expressions are multi-valued fields. Single-valued expressions can be started with constants or single-valued fields.
+The root of all multi-valued expressions are multi-valued fields.
+Single-valued expressions can be started with constants or single-valued fields.
 All single-valued expressions can be treated as multi-valued expressions that contain one value.
 
-Single-valued expressions and multi-valued expressions can be used together in many mapping functions, as well as multi-valued expressions being used alone, and many single-valued expressions being used together. For example:
+Single-valued expressions and multi-valued expressions can be used together in many mapping functions, as well as multi-valued expressions being used alone, and many single-valued expressions being used together.
+For example:
 
 `add(<single-valued double>, <single-valued double>, ...)`::
 Returns a single-valued double expression where the value of the values of each expression are added.
@@ -265,7 +276,9 @@ An implicit cast means that if a function requires a certain type of value as a
 
 For example, `concat()` only accepts string parameters and since all types can be implicitly cast to strings, any type is accepted as an argument.
 
-This also goes for dynamically typed functions. `fill_missing()` requires two arguments of the same type. However, two types that implicitly cast to the same type can also be used.
+This also goes for dynamically typed functions.
+`fill_missing()` requires two arguments of the same type.
+However, two types that implicitly cast to the same type can also be used.
 
 For example, `fill_missing(<long>,<float>)` will be cast to `fill_missing(<double>,<double>)` since long cannot be cast to float and float cannot be cast to long implicitly.
 
@@ -285,7 +298,8 @@ However `round()`, `floor()` and `cell()` can return either int or long, dependi
 
 == Variable Functions
 
-Variable functions are a way to shorten your expressions and make writing analytics queries easier. They are essentially lambda functions defined in a request.
+Variable functions are a way to shorten your expressions and make writing analytics queries easier.
+They are essentially lambda functions defined in a request.
 
 [source,json]
 .Example Basic Function
@@ -301,7 +315,8 @@ Variable functions are a way to shorten your expressions and make writing analyt
 }
 ----
 
-In the above request, instead of writing `mult(price,quantity)` twice, a function `sale()` was defined to abstract this idea. Then that function was used in the multiple expressions.
+In the above request, instead of writing `mult(price,quantity)` twice, a function `sale()` was defined to abstract this idea.
+Then that function was used in the multiple expressions.
 
 Suppose that we want to look at the sales of specific categories:
 
@@ -486,7 +501,8 @@ The list of criteria to sort the facet by.
 +
 It takes the following parameters:
 
-`type`::: The type of sort. There are two possible values:
+`type`::: The type of sort.
+There are two possible values:
 * `expression`: Sort by the value of an expression defined in the same grouping.
 * `facetvalue`: Sort by the string-representation of the facet value.
 
@@ -594,11 +610,14 @@ The second pivot given will be treated like one value facet for each value of th
 Each of these second-level value facets will be limited to the documents in their first-level facet bucket.
 This continues for however many pivots are provided.
 
-Sorting is enabled on a per-pivot basis. This means that if your top pivot has a sort with `limit:1`, then only that first value of the facet will be drilled down into. Sorting in each pivot is independent of the other pivots.
+Sorting is enabled on a per-pivot basis.
+This means that if your top pivot has a sort with `limit:1`, then only that first value of the facet will be drilled down into.
+Sorting in each pivot is independent of the other pivots.
 
 ==== Parameters
 
-`pivots`:: The list of pivots to calculate a drill-down facet for. The list is ordered by top-most to bottom-most level.
+`pivots`:: The list of pivots to calculate a drill-down facet for.
+The list is ordered by top-most to bottom-most level.
 `name`::: The name of the pivot.
 `expression`::: The expression to choose a facet bucket for each document.
 `sort`::: A <<Facet Sorting,sort>> for the results of the pivot.
@@ -684,16 +703,20 @@ Refer to the <<faceting.adoc#range-faceting,Range Facet documentation>> for addi
 `field`:: Field to be faceted over
 `start`:: The bottom end of the range
 `end`:: The top end of the range
-`gap`:: A list of range gaps to generate facet buckets. If the buckets do not add up to fit the `start` to `end` range,
+`gap`:: A list of range gaps to generate facet buckets.
+If the buckets do not add up to fit the `start` to `end` range,
 then the last `gap` value will repeated as many times as needed to fill any unused range.
-`hardend`:: Whether to cutoff the last facet bucket range at the `end` value if it spills over. Defaults to `false`.
-`include`:: The boundaries to include in the facet buckets. Defaults to `lower`.
+`hardend`:: Whether to cutoff the last facet bucket range at the `end` value if it spills over.
+Defaults to `false`.
+`include`:: The boundaries to include in the facet buckets.
+Defaults to `lower`.
 * `lower` - All gap-based ranges include their lower bound.
 * `upper` - All gap-based ranges include their upper bound.
 * `edge` - The first and last gap ranges include their edge bounds (lower for the first one, upper for the last one) even if the corresponding upper/lower option is not specified.
 * `outer` - The `before` and `after` ranges will be inclusive of their bounds, even if the first or last ranges already include those boundaries.
 * `all` - Includes all options: `lower`, `upper`, `edge`, and `outer`
-`others`:: Additional ranges to include in the facet. Defaults to `none`.
+`others`:: Additional ranges to include in the facet.
+Defaults to `none`.
 * `before` - All records with field values lower then lower bound of the first range.
 * `after` - All records with field values greater then the upper bound of the last range.
 * `between` - All records with field values between the lower bound of the first range and the upper bound of the last range.
diff --git a/solr/solr-ref-guide/src/analyzers.adoc b/solr/solr-ref-guide/src/analyzers.adoc
index ac5b26e..535926a 100644
--- a/solr/solr-ref-guide/src/analyzers.adoc
+++ b/solr/solr-ref-guide/src/analyzers.adoc
@@ -58,7 +58,8 @@ For example:
   </analyzer>
 </fieldType>
 ----
-Tokenizer and filter factory classes are referred by their symbolic names (SPI names). Here, name="standard" refers `org.apache.lucene.analysis.standard.StandardTokenizerFactory`.
+Tokenizer and filter factory classes are referred by their symbolic names (SPI names).
+Here, name="standard" refers `org.apache.lucene.analysis.standard.StandardTokenizerFactory`.
 ====
 [example.tab-pane#byclass]
 ====
diff --git a/solr/solr-ref-guide/src/audit-logging.adoc b/solr/solr-ref-guide/src/audit-logging.adoc
index 86b3baf..44efcd7 100644
--- a/solr/solr-ref-guide/src/audit-logging.adoc
+++ b/solr/solr-ref-guide/src/audit-logging.adoc
@@ -19,8 +19,8 @@
 Solr has the ability to log an audit trail of all HTTP requests entering the system.
 Audit loggers are pluggable to suit any possible format or log destination.
 
-[quote]
-An audit trail (also called audit log) is a security-relevant chronological record, set of records, and/or destination and source of records that provide documentary evidence of the sequence of activities that have affected at any time a specific operation, procedure, event, or device. (https://en.wikipedia.org/wiki/Audit_trail[Wikipedia])
+[quote, Wikipedia, https://en.wikipedia.org/wiki/Audit_trail]
+An audit trail (also called audit log) is a security-relevant chronological record, set of records, and/or destination and source of records that provide documentary evidence of the sequence of activities that have affected at any time a specific operation, procedure, event, or device.
 
 == Configuring Audit Logging
 Audit logging is configured in `security.json` under the `auditlogging` key.
@@ -239,7 +239,7 @@ The individual metrics are:
 * `lost`: (_meter_) Records number and rate of events lost if the queue is full and `blockAsync=false`.
 * `requestTimes`: (_timer_) Records latency and percentiles for audit logging performance.
 * `totalTime`: (_counter_) Records total time spent logging.
-* `queueCapacity`: (_gauge_). Records the maximum size of the async logging queue.
+* `queueCapacity`: (_gauge_) Records the maximum size of the async logging queue.
 * `queueSize`: (_gauge_) Records the number of events currently waiting in the queue.
 * `queuedTime`: (_timer_) Records the amount of time events waited in queue.
 Adding this with the `requestTimes` metric will show the total time from event to logging complete.
diff --git a/solr/solr-ref-guide/src/authentication-and-authorization-plugins.adoc b/solr/solr-ref-guide/src/authentication-and-authorization-plugins.adoc
index 62a38ac..8cd2002 100644
--- a/solr/solr-ref-guide/src/authentication-and-authorization-plugins.adoc
+++ b/solr/solr-ref-guide/src/authentication-and-authorization-plugins.adoc
@@ -188,7 +188,8 @@ include::securing-solr.adoc[tag=list-of-authorization-plugins]
 Whenever an authentication plugin is enabled, authentication is also required for all or some operations in the Admin UI.
 The Admin UI is an AngularJS application running inside your browser, and is treated as any other external client by Solr.
 
-When authentication is required the Admin UI will presented you with a login dialogue. The authentication plugins currently supported by the Admin UI are:
+When authentication is required the Admin UI will presented you with a login dialogue.
+The authentication plugins currently supported by the Admin UI are:
 
 * <<basic-authentication-plugin.adoc#,Basic Authentication Plugin>>
 * <<jwt-authentication-plugin.adoc#,JWT Authentication Plugin>>
diff --git a/solr/solr-ref-guide/src/backup-restore.adoc b/solr/solr-ref-guide/src/backup-restore.adoc
index 5c05cfd..3a25bf6 100644
--- a/solr/solr-ref-guide/src/backup-restore.adoc
+++ b/solr/solr-ref-guide/src/backup-restore.adoc
@@ -201,7 +201,8 @@ If no repository is specified then the local filesystem repository will be used
 The `restore` command is an asynchronous call.
 Once the restore is complete the data reflected will be of the backed up index which was restored.
 
-Only one `restore` call can can be made against a core at one point in time. While an ongoing restore operation is happening subsequent calls for restoring will throw an exception.
+Only one `restore` call can can be made against a core at one point in time.
+While an ongoing restore operation is happening subsequent calls for restoring will throw an exception.
 
 === Restore Status API
 
@@ -376,7 +377,8 @@ A LocalFileSystemRepository instance is used as a default by any backup and rest
 LocalFileSystemRepository accepts the following configuration options:
 
 `location`::
-A valid file path (accessible to Solr locally) to use for backup storage and retrieval.  Used as a fallback when user's don't provide a `location` parameter in their Backup or Restore API commands
+A valid file path (accessible to Solr locally) to use for backup storage and retrieval.
+Used as a fallback when user's don't provide a `location` parameter in their Backup or Restore API commands
 
 An example configuration using this property can be found below.
 
@@ -411,7 +413,8 @@ A HDFS URI in the format `hdfs://<host>:<port>/<hdfsBaseFilePath>` that points S
 A permission umask used when creating files in HDFS.
 
 `location`::
-A valid directory path on the HDFS cluster to use for backup storage and retrieval.  Used as a fallback when users don't provide a `location` parameter in their Backup or Restore API commands
+A valid directory path on the HDFS cluster to use for backup storage and retrieval.
+Used as a fallback when users don't provide a `location` parameter in their Backup or Restore API commands.
 
 An example configuration using these properties can be found below:
 
diff --git a/solr/solr-ref-guide/src/block-join-query-parser.adoc b/solr/solr-ref-guide/src/block-join-query-parser.adoc
index 47ab4bf..5f3055d 100644
--- a/solr/solr-ref-guide/src/block-join-query-parser.adoc
+++ b/solr/solr-ref-guide/src/block-join-query-parser.adoc
@@ -166,7 +166,8 @@ Overall the idea is similar to <<faceting.adoc#tagging-and-excluding-filters, ex
 === Scoring with the Block Join Parent Query Parser
 
 You can optionally use the `score` local parameter to return scores of the subordinate query.
-The values to use for this parameter define the type of aggregation, which are `avg` (average), `max` (maximum), `min` (minimum), `total (sum)`. Implicit default is `none` which returns `0.0`.
+The values to use for this parameter define the type of aggregation, which are `avg` (average), `max` (maximum), `min` (minimum), `total (sum)`.
+Implicit default is `none` which returns `0.0`.
 
 === All Parents Syntax
 
diff --git a/solr/solr-ref-guide/src/circuit-breakers.adoc b/solr/solr-ref-guide/src/circuit-breakers.adoc
index b28bda6..83fa418 100644
--- a/solr/solr-ref-guide/src/circuit-breakers.adoc
+++ b/solr/solr-ref-guide/src/circuit-breakers.adoc
@@ -16,18 +16,18 @@
 // specific language governing permissions and limitations
 // under the License.
 
-Solr's circuit breaker infrastructure allows prevention of actions that can cause a node to go beyond its capacity or to go down. The
-premise of circuit breakers is to ensure a higher quality of service and only accept request loads that are serviceable in the current
+Solr's circuit breaker infrastructure allows prevention of actions that can cause a node to go beyond its capacity or to go down.
+The premise of circuit breakers is to ensure a higher quality of service and only accept request loads that are serviceable in the current
 resource configuration.
 
 == When To Use Circuit Breakers
-Circuit breakers should be used when the user wishes to trade request throughput for a higher Solr stability. If circuit breakers
-are enabled, requests may be rejected under the condition of high node duress with an appropriate HTTP error code (typically 503).
+Circuit breakers should be used when the user wishes to trade request throughput for a higher Solr stability.
+If circuit breakers are enabled, requests may be rejected under the condition of high node duress with an appropriate HTTP error code (typically 503).
 
 It is up to the client to handle this error and potentially build a retrial logic as this should ideally be a transient situation.
 
 == Circuit Breaker Configurations
-All circuit breaker configurations are listed in the circuitBreaker tags in solrconfig.xml as shown below:
+All circuit breaker configurations are listed in the `<circuitBreaker>` tags in `solrconfig.xml` as shown below:
 
 [source,xml]
 ----
@@ -36,21 +36,21 @@ All circuit breaker configurations are listed in the circuitBreaker tags in solr
 </circuitBreaker>
 ----
 
-The "enabled" attribute controls the global activation/deactivation of circuit breakers. If this flag is disabled, all circuit breakers
-will be disabled globally. Per circuit breaker configurations are specified in their respective sections later.
+The `enabled` attribute controls the global activation/deactivation of circuit breakers.
+If this flag is disabled, all circuit breakers will be disabled globally.
+Per circuit breaker configurations are specified in their respective sections later.
 
-This attribute acts as the highest authority and global controller of circuit breakers. For using specific circuit breakers, each one
-needs to be individually enabled in addition to this flag being enabled.
+This attribute acts as the highest authority and global controller of circuit breakers.
+For using specific circuit breakers, each one needs to be individually enabled in addition to this flag being enabled.
 
-CircuitBreakerManager is the default manager for all circuit breakers that should be defined in the tag unless the user wishes to use
-a custom implementation.
+`CircuitBreakerManager` is the default manager for all circuit breakers that should be defined in the tag unless the user wishes to use a custom implementation.
 
 == Currently Supported Circuit Breakers
 
-=== JVM Heap Usage Based Circuit Breaker
-This circuit breaker tracks JVM heap memory usage and rejects incoming search requests with a 503 error code if the heap usage
-exceeds a configured percentage of maximum heap allocated to the JVM (-Xmx). The main configuration for this circuit breaker is
-controlling the threshold percentage at which the breaker will trip.
+=== JVM Heap Usage
+
+This circuit breaker tracks JVM heap memory usage and rejects incoming search requests with a 503 error code if the heap usage exceeds a configured percentage of maximum heap allocated to the JVM (-Xmx).
+The main configuration for this circuit breaker is controlling the threshold percentage at which the breaker will trip.
 
 Configuration for JVM heap usage based circuit breaker:
 
@@ -59,30 +59,30 @@ Configuration for JVM heap usage based circuit breaker:
 <str name="memEnabled">true</str>
 ----
 
-Note that this configuration will be overridden by the global circuit breaker flag -- if circuit breakers are disabled, this flag
-will not help you.
+Note that this configuration will be overridden by the global circuit breaker flag -- if circuit breakers are disabled, this flag will not help you.
 
-The triggering threshold is defined as a percentage of the max heap allocated to the JVM. It is controlled by the below configuration:
+The triggering threshold is defined as a percentage of the max heap allocated to the JVM.
+It is controlled by the below configuration:
 
 [source,xml]
 ----
 <str name="memThreshold">75</str>
 ----
 
-It does not logically make sense to have a threshold below 50% and above 95% of the max heap allocated to the JVM. Hence, the range
-of valid values for this parameter is [50, 95], both inclusive.
+It does not logically make sense to have a threshold below 50% and above 95% of the max heap allocated to the JVM.
+Hence, the range of valid values for this parameter is [50, 95], both inclusive.
 
 Consider the following example:
 
-JVM has been allocated a maximum heap of 5GB (-Xmx) and memoryCircuitBreakerThresholdPct is set to 75. In this scenario, the heap usage
-at which the circuit breaker will trip is 3.75GB.
+JVM has been allocated a maximum heap of 5GB (-Xmx) and `memoryCircuitBreakerThresholdPct` is set to `75`.
+In this scenario, the heap usage at which the circuit breaker will trip is 3.75GB.
+
 
+=== CPU Utilization
 
-=== CPU Utilization Based Circuit Breaker
-This circuit breaker tracks CPU utilization and triggers if the average CPU utilization over the last one minute
-exceeds a configurable threshold. Note that the value used in computation is over the last one minute -- so a sudden
-spike in traffic that goes down might still cause the circuit breaker to trigger for a short while before it resolves
-and updates the value. For more details of the calculation, please see https://en.wikipedia.org/wiki/Load_(computing)
+This circuit breaker tracks CPU utilization and triggers if the average CPU utilization over the last one minute exceeds a configurable threshold.
+Note that the value used in computation is over the last one minute -- so a sudden spike in traffic that goes down might still cause the circuit breaker to trigger for a short while before it resolves and updates the value.
+For more details of the calculation, please see https://en.wikipedia.org/wiki/Load_(computing)
 
 Configuration for CPU utilization based circuit breaker:
 
@@ -91,10 +91,10 @@ Configuration for CPU utilization based circuit breaker:
 <str name="cpuEnabled">true</str>
 ----
 
-Note that this configuration will be overridden by the global circuit breaker flag -- if circuit breakers are disabled, this flag
-will not help you.
+Note that this configuration will be overridden by the global circuit breaker flag -- if circuit breakers are disabled, this flag will not help you.
 
-The triggering threshold is defined in units of CPU utilization. The configuration to control this is as below:
+The triggering threshold is defined in units of CPU utilization.
+The configuration to control this is as below:
 
 [source,xml]
 ----
@@ -102,8 +102,7 @@ The triggering threshold is defined in units of CPU utilization. The configurati
 ----
 
 == Performance Considerations
-It is worth noting that while JVM or CPU circuit breakers do not add any noticeable overhead per query, having too many
-circuit breakers checked for a single request can cause a performance overhead.
 
-In addition, it is a good practice to exponentially back off while retrying requests on a busy node.
+It is worth noting that while JVM or CPU circuit breakers do not add any noticeable overhead per query, having too many circuit breakers checked for a single request can cause a performance overhead.
 
+In addition, it is a good practice to exponentially back off while retrying requests on a busy node.
diff --git a/solr/solr-ref-guide/src/cluster-node-management.adoc b/solr/solr-ref-guide/src/cluster-node-management.adoc
index 2f37b5a..3e69179 100644
--- a/solr/solr-ref-guide/src/cluster-node-management.adoc
+++ b/solr/solr-ref-guide/src/cluster-node-management.adoc
@@ -314,7 +314,8 @@ Support for the "collectionDefaults" key will be removed in Solr 9.
 === Default Shard Preferences
 
 Using the `defaultShardPreferences` parameter, you can implement rack or availability zone awareness.
-First, make sure to "label" your nodes using a <<property-substitution.adoc#jvm-system-properties,system property>> (e.g., `-Drack=rack1`). Then, set the value of `defaultShardPreferences` to `node.sysprop:sysprop.YOUR_PROPERTY_NAME` like this:
+First, make sure to "label" your nodes using a <<property-substitution.adoc#jvm-system-properties,system property>> (e.g., `-Drack=rack1`).
+Then, set the value of `defaultShardPreferences` to `node.sysprop:sysprop.YOUR_PROPERTY_NAME` like this:
 
 [source,bash]
 ----
@@ -422,7 +423,8 @@ Examining the clusterstate after issuing this call should show exactly one repli
 [[replacenode]]
 == REPLACENODE: Move All Replicas in a Node to Another
 
-This command recreates replicas in one node (the source) on another node(s) (the target). After each replica is copied, the replicas in the source node are deleted.
+This command recreates replicas in one node (the source) on another node(s) (the target).
+After each replica is copied, the replicas in the source node are deleted.
 
 For source replicas that are also shard leaders the operation will wait for the number of seconds set with the `timeout` parameter to make sure there's an active replica that can become a leader, either an existing replica becoming a leader or the new replica completing recovery and becoming a leader).
 
diff --git a/solr/solr-ref-guide/src/cluster-plugins.adoc b/solr/solr-ref-guide/src/cluster-plugins.adoc
index 8649ad6..ad43eba 100644
--- a/solr/solr-ref-guide/src/cluster-plugins.adoc
+++ b/solr/solr-ref-guide/src/cluster-plugins.adoc
@@ -19,46 +19,43 @@
 // under the License.
 
 == Cluster (CoreContainer-level) Plugins Subsystem
-Cluster plugins are pluggable components that are defined and instantiated at the
-`CoreContainer` (node) level. These components usually provide admin-level functionality
-and APIs for additional functionality at the Solr node level.
+Cluster plugins are pluggable components that are defined and instantiated at the `CoreContainer` (node) level.
+These components usually provide admin-level functionality and APIs for additional functionality at the Solr node level.
 
 === Plugin Configurations
 Plugin configurations are maintained using `/cluster/plugin` API.
 
 This API endpoint allows adding, removing and updating plugin configurations.
 
-Each plugin MUST have a unique name under which it's registered. Attempting to
-add a plugin with a duplicate name is an error. Some types of plugins use
-pre-defined names, and they MUST be registered under these names in order to
-properly function.
+Each plugin MUST have a unique name under which it's registered.
+Attempting to add a plugin with a duplicate name is an error.
+Some types of plugins use pre-defined names, and they MUST be registered under these names in order to properly function.
 
-Internally, as of Solr 9.0 plugin configurations are maintained in ZooKeeper in the
-`/clusterprops.json` file, under the `plugin` entry. The configuration is a JSON map
-where keys are the unique plugin names, and values are serialized
-`org.apache.solr.client.solrj.request.beans.PluginMeta` beans.
+Internally, as of Solr 9.0 plugin configurations are maintained in ZooKeeper in the `/clusterprops.json` file, under the `plugin` entry.
+The configuration is a JSON map where keys are the unique plugin names, and values are serialized `org.apache.solr.client.solrj.request.beans.PluginMeta` beans.
 
 The following common plugin properties are supported:
 
 `name`::
-(required) unique plugin name. Some plugin types require using one of the
-pre-defined names to properly function. By convention such predefined names use
-a leading-dot prefix (e.g., `.placement-plugin`)
+(required) unique plugin name.
+Some plugin types require using one of the pre-defined names to properly function.
+By convention such predefined names use a leading-dot prefix (e.g., `.placement-plugin`)
 
 `class`::
-(required) implementation class. This can be specified as a fully-qualified
-class name if the class is available as a part of Solr, or it can be also
-specified using the `<package>:<className>` syntax to refer to a class inside
-one of the Solr packages.
+(required) implementation class.
+This can be specified as a fully-qualified class name if the class is available as a part of Solr, or it can be also specified using the `<package>:<className>` syntax to refer to a class inside one of the Solr packages.
 
 `version`::
-(required when class is loaded from a package). Solr package version.
+(required when class is loaded from a package).
+Solr package version.
 
 `path-prefix`::
-(optional, default is none). Path prefix to be added to the REST API endpoints defined in the plugin.
+(optional, default is `none`).
+Path prefix to be added to the REST API endpoints defined in the plugin.
 
 `config`::
-(optional, default is none). A JSON map of additional plugin configuration parameters.
+(optional, default is `none`).
+A JSON map of additional plugin configuration parameters.
 Plugins that implement `ConfigurablePlugin` interface will be initialized with a
 plugin-specific configuration object deserialized from this map.
 
@@ -85,42 +82,35 @@ curl -X POST -H 'Content-type: application/json' -d '{
 === Types of Cluster Plugins
 Classes loaded from plugins in general support two types of functionality (not mutually exclusive):
 
-* request handler plugins that expose REST API endpoints (the implementing class is annotated with
-`@EndPoint` and optionally `@Command` annotations). The APIs of these plugins are automatically
-registered as REST endpoints under the paths defined in the `@EndPoint` annotations.
+* request handler plugins that expose REST API endpoints (the implementing class is annotated with `@EndPoint` and optionally `@Command` annotations).
+The APIs of these plugins are automatically registered as REST endpoints under the paths defined in the `@EndPoint` annotations.
 
-* plugins that implement a specific interface, for use as an internal component. Upon loading they are
-automatically discovered and registered with sub-systems that use this type of plugin. Examples here
-include the `ClusterSingleton`, ClusterEventProducer`, `ClusterEventListener`
-and `PlacementPluginFactory`.
+* plugins that implement a specific interface, for use as an internal component.
+Upon loading they are automatically discovered and registered with sub-systems that use this type of plugin.
+Examples here include the `ClusterSingleton`, `ClusterEventProducer`, `ClusterEventListener` and `PlacementPluginFactory`.
 
 === Plugin Lifecycle
-Plugin instances are loaded and initialized when Solr's `CoreContainer` is first created during
-Solr node start-up.
+Plugin instances are loaded and initialized when Solr's `CoreContainer` is first created during Solr node start-up.
 
-Then on each update of the configurations each node is notified about the change,
-and then the existing plugins are compared with the new configs, and plugin instances
-present on the node are respectively created, removed, or
-replaced (i.e., removed and added using the new configuration).
+Then on each update of the configurations each node is notified about the change, and then the existing plugins are compared with the new configs, and plugin instances present on the node are respectively created, removed, or replaced (i.e., removed and added using the new configuration).
 
 In practice this means that cluster-level plugins managed by this API can be
-dynamically changed and reconfigured without restarting the Solr nodes, and the changes
-apply to all nodes nearly simultaneously.
+dynamically changed and reconfigured without restarting the Solr nodes, and the changes apply to all nodes nearly simultaneously.
 
 == Plugin Types
 
 === Predefined Plugin Names
 
-Plugins with these names are used in specific parts of Solr. Their names are reserved
-and cannot be used for other plugin types:
+Plugins with these names are used in specific parts of Solr.
+Their names are reserved and cannot be used for other plugin types:
 
 `.placement-plugin`::
-plugin that implements `PlacementPluginFactory` interface. This type of plugin
-determines the replica placement strategy in the cluster.
+A plugin that implements `PlacementPluginFactory` interface.
+This type of plugin determines the replica placement strategy in the cluster.
 
 `.cluster-event-producer`::
-plugin that implements `ClusterEventProducer` interface. This type of plugin
-is used for generating cluster-level events.
+A plugin that implements `ClusterEventProducer` interface.
+This type of plugin is used for generating cluster-level events.
 
 === PlacementPluginFactory Plugins
 This type of plugin supports configurable placement strategies for collection
@@ -128,9 +118,8 @@ replicas.
 
 === ClusterSingleton Plugins
 Plugins that implement `ClusterSingleton` interface are instantiated on each
-Solr node. However, their start/stop life-cycle, as defined in the interface,
-is controlled in such a way that only a single running instance of the plugin
-is present in the cluster at any time.
+Solr node.
+However, their start/stop life-cycle, as defined in the interface, is controlled in such a way that only a single running instance of the plugin is present in the cluster at any time.
 
 (Currently this is implemented by re-using the Overseer leader election, so all
 `ClusterSingleton`-s that are in the RUNNING state execute on the Overseer leader node).
@@ -140,18 +129,15 @@ it requires this cluster singleton behavior.
 
 === ClusterEventProducer Plugins
 In order to support the generation of cluster-level events an implementation of
-`ClusterEventProducer` is created on each Solr node. This component is also a
-`ClusterSingleton`, which means that only one active instance is present in the
+`ClusterEventProducer` is created on each Solr node.
+This component is also a `ClusterSingleton`, which means that only one active instance is present in the
 cluster at any time.
 
 If no plugin configuration is specified then the default implementation
-`org.apache.solr.cluster.events.impl.NoOpProducer` is used, which doesn't generate
-any events - this means that by default event generation is turned off. An implementation
-that supports node and collection event generation is also available in
-`org.apache.solr.cluster.events.impl.DefaultClusterEventProducer`.
+`org.apache.solr.cluster.events.impl.NoOpProducer` is used, which doesn't generate any events - this means that by default event generation is turned off.
+An implementation that supports node and collection event generation is also available in `org.apache.solr.cluster.events.impl.DefaultClusterEventProducer`.
 
-Event producer configuration can be changed dynamically by changing the predefined
-plugin configuration, for example:
+Event producer configuration can be changed dynamically by changing the predefined plugin configuration, for example:
 
 [source,bash]
 ----
@@ -175,21 +161,17 @@ curl -X POST -H 'Content-type: application/json' -d '{
 
 
 === ClusterEventListener Plugins
-Plugins that implement the `ClusterEventListener` interface will be automatically
-registered with the instance of `ClusterEventProducer`.
+Plugins that implement the `ClusterEventListener` interface will be automatically registered with the instance of `ClusterEventProducer`.
 
 // XXX edit this once SOLR-14977 is done
 Implementations will be notified of all events that are generated by the
 `ClusterEventProducer` and need to select only events that they are interested in.
 
 ==== org.apache.solr.cluster.events.impl.CollectionsRepairEventListener
-An implementation of listener that reacts to NODE_LOST events and checks what replicas
-need to be re-added to other nodes to keep the replication counts the same as before.
+An implementation of listener that reacts to NODE_LOST events and checks what replicas need to be re-added to other nodes to keep the replication counts the same as before.
 
-This implementation waits for a certain period (default is 30s) to make sure the node
-is really down, and for the replicas located on nodes that were down sufficiently long
-it generates appropriate ADDREPLICA commands to counter-balance the lost replicas on
-these nodes.
+This implementation waits for a certain period (default is 30s) to make sure the node is really down.
+For the replicas located on nodes that were down sufficiently long it generates appropriate ADDREPLICA commands to counter-balance the lost replicas on these nodes.
 
 Example plugin configuration:
 
@@ -214,8 +196,8 @@ curl http://localhost:8983/api/cluster/plugin
 ----
 
 === Add Plugin
-This command uses HTTP POST to add a new plugin configuration. If a plugin with the
-same name already exists this results in an error.
+This command uses HTTP POST to add a new plugin configuration.
+If a plugin with the same name already exists this results in an error.
 
 Example command, which adds a plugin contained in a Solr package:
 [source,bash]
@@ -230,12 +212,12 @@ curl -X POST -H 'Content-type: application/json' -d '{
 ----
 
 === Update Plugin
-This command uses HTTP POST to update an existing plugin configuration. If a plugin
-with this name doesn't exist this results in an error.
+This command uses HTTP POST to update an existing plugin configuration.
+If a plugin with this name doesn't exist this results in an error.
+
+This example updates an existing plugin, possibly changing its configuration parameters.
+The old instance of the plugin is removed and a new instance is created using the supplied configuration.
 
-This example updates an existing plugin, possibly changing its configuration paramers.
-The old instance of the plugin is removed and a new instance is created using the supplied
-configuration.
 [source,bash]
 ----
 curl -X POST -H 'Content-type: application/json' -d '{
@@ -249,11 +231,10 @@ curl -X POST -H 'Content-type: application/json' -d '{
 ----
 
 === Remove Plugin
-This command uses HTTP POST to delete an existing plugin configuration. If a plugin
-with this name doesn't exist this results in an error.
+This command uses HTTP POST to delete an existing plugin configuration.
+If a plugin with this name doesn't exist this results in an error.
 
-Unlike other commands the command payload here consists just of
-the name of the plugin to remove, as a string.
+Unlike other commands the command payload here consists just of the name of the plugin to remove, as a string.
 
 [source,bash]
 ----
diff --git a/solr/solr-ref-guide/src/codec-factory.adoc b/solr/solr-ref-guide/src/codec-factory.adoc
index 7f77028..590f73e 100644
--- a/solr/solr-ref-guide/src/codec-factory.adoc
+++ b/solr/solr-ref-guide/src/codec-factory.adoc
@@ -45,7 +45,10 @@ Example:
 
 This factory for Lucene's {solr-javadocs}/core/org/apache/solr/core/SimpleTextCodecFactory.html[`SimpleTextCodecFactory`] produces a plain text human-readable index format.
 
-CAUTION: *FOR RECREATIONAL USE ONLY*. This codec should never be used in production. `SimpleTextCodec` is relatively slow and takes up a large amount of disk space. Its use should be limited to educational and debugging purposes.
+CAUTION: *FOR RECREATIONAL USE ONLY*.
+This codec should never be used in production.
+`SimpleTextCodec` is relatively slow and takes up a large amount of disk space.
+Its use should be limited to educational and debugging purposes.
 
 Example:
 
diff --git a/solr/solr-ref-guide/src/collapse-and-expand-results.adoc b/solr/solr-ref-guide/src/collapse-and-expand-results.adoc
index ce39c1c..4f7d87a 100644
--- a/solr/solr-ref-guide/src/collapse-and-expand-results.adoc
+++ b/solr/solr-ref-guide/src/collapse-and-expand-results.adoc
@@ -18,16 +18,23 @@
 
 The Collapsing query parser and the Expand component combine to form an approach to grouping documents for field collapsing in search results.
 
-The Collapsing query parser groups documents (collapsing the result set) according to your parameters, while the Expand component provides access to documents in the collapsed group for use in results display or other processing by a client application. Collapse & Expand can together do what the older <<result-grouping.adoc#,Result Grouping>> (`group=true`) does for _most_ use-cases but not all. Collapse and Expand are not supported when Result Grouping is enabled. Generally, you should  [...]
+The Collapsing query parser groups documents (collapsing the result set) according to your parameters, while the Expand component provides access to documents in the collapsed group for use in results display or other processing by a client application.
+Collapse & Expand can together do what the older <<result-grouping.adoc#,Result Grouping>> (`group=true`) does for _most_ use-cases but not all.
+Collapse and Expand are not supported when Result Grouping is enabled.
+Generally, you should prefer Collapse & Expand.
 
 [IMPORTANT]
 ====
-In order to use these features with SolrCloud, the documents must be located on the same shard. To ensure document co-location, you can define the `router.name` parameter as `compositeId` when creating the collection. For more information on this option, see the section <<solrcloud-shards-indexing.adoc#document-routing,Document Routing>>.
+In order to use these features with SolrCloud, the documents must be located on the same shard.
+To ensure document co-location, you can define the `router.name` parameter as `compositeId` when creating the collection.
+For more information on this option, see the section <<solrcloud-shards-indexing.adoc#document-routing,Document Routing>>.
 ====
 
 == Collapsing Query Parser
 
-The `CollapsingQParser` is really a _post filter_ that provides more performant field collapsing than Solr's standard approach when the number of distinct groups in the result set is high. This parser collapses the result set to a single document per group before it forwards the result set to the rest of the search components. So all downstream components (faceting, highlighting, etc.) will work with the collapsed result set.
+The `CollapsingQParser` is really a _post filter_ that provides more performant field collapsing than Solr's standard approach when the number of distinct groups in the result set is high.
+This parser collapses the result set to a single document per group before it forwards the result set to the rest of the search components.
+So all downstream components (faceting, highlighting, etc.) will work with the collapsed result set.
 
 The CollapsingQParserPlugin fully supports the QueryElevationComponent.
 
@@ -36,26 +43,30 @@ The CollapsingQParserPlugin fully supports the QueryElevationComponent.
 The CollapsingQParser accepts the following local params:
 
 `field`::
-The field that is being collapsed on. The field must be a single valued String, Int or Float-type of field.
+The field that is being collapsed on.
+The field must be a single valued String, Int or Float-type of field.
 
 `min` or `max`::
 Selects the group head document for each group based on which document has the min or max value of the specified numeric field or <<function-queries.adoc#,function query>>.
 +
 At most only one of the `min`, `max`, or `sort` (see below) parameters may be specified.
 +
-If none are specified, the group head document of each group will be selected based on the highest scoring document in that group. The default is none.
+If none are specified, the group head document of each group will be selected based on the highest scoring document in that group.
+The default is none.
 
 `sort`::
 Selects the group head document for each group based on which document comes first according to the specified <<common-query-parameters.adoc#sort-parameter,sort string>>.
 +
 At most only one of the `min`, `max`, (see above) or `sort` parameters may be specified.
 +
-If none are specified, the group head document of each group will be selected based on the highest scoring document in that group. The default is none.
+If none are specified, the group head document of each group will be selected based on the highest scoring document in that group.
+The default is none.
 
 `nullPolicy`::
 There are three available null policies:
 +
-* `ignore`: removes documents with a null value in the collapse field. This is the default.
+* `ignore`: removes documents with a null value in the collapse field.
+This is the default.
 * `expand`: treats each document with a null value in the collapse field as a separate group.
 * `collapse`: collapses all documents with a null value into a single group using either highest score, or minimum/maximum.
 +
@@ -67,7 +78,10 @@ There are two hint options available:
 +
 `top_fc`::: This stands for top level FieldCache.
 +
-The `hint=top_fc` hint is only available when collapsing on String fields. `top_fc` usually provides the best query time speed but takes the longest to warm on startup or following a commit. `top_fc` will also result in having the collapsed field cached in memory twice if it's used for faceting or sorting. For very high cardinality (high distinct count) fields, `top_fc` may not fare so well.
+The `hint=top_fc` hint is only available when collapsing on String fields.
+`top_fc` usually provides the best query time speed but takes the longest to warm on startup or following a commit.
+`top_fc` will also result in having the collapsed field cached in memory twice if it's used for faceting or sorting.
+For very high cardinality (high distinct count) fields, `top_fc` may not fare so well.
 +
 `hint=block`::: This indicates that the field being collapsed on is suitable for the optimzed <<#block-collapsing,Block Collapse>> logic described below.
 +
@@ -76,7 +90,8 @@ The default is none.
 `size`::
 Sets the initial size of the collapse data structures when collapsing on a *numeric field only*.
 +
-The data structures used for collapsing grow dynamically when collapsing on numeric fields. Setting the size above the number of results expected in the result set will eliminate the resizing cost.
+The data structures used for collapsing grow dynamically when collapsing on numeric fields.
+Setting the size above the number of results expected in the result set will eliminate the resizing cost.
 +
 The default is 100,000.
 
@@ -108,14 +123,16 @@ Collapse on `group_field` selecting the document in each group with the maximum
 fq={!collapse field=group_field max=numeric_field}
 ----
 
-Collapse on `group_field` selecting the document in each group with the maximum value of a function. Note that the *cscore()* function can be used with the min/max options to use the score of the current document being collapsed.
+Collapse on `group_field` selecting the document in each group with the maximum value of a function.
+Note that the *cscore()* function can be used with the min/max options to use the score of the current document being collapsed.
 
 [source,text]
 ----
 fq={!collapse field=group_field max=sum(cscore(),numeric_field)}
 ----
 
-Collapse on `group_field` with a null policy so that all docs that do not have a value in the `group_field` will be treated as a single group. For each group, the selected document will be based first on a `numeric_field`, but ties will be broken by score:
+Collapse on `group_field` with a null policy so that all docs that do not have a value in the `group_field` will be treated as a single group.
+For each group, the selected document will be based first on a `numeric_field`, but ties will be broken by score:
 
 [source,text]
 ----
@@ -143,7 +160,9 @@ The default collapsing logic must keep track of all group head documents -- for
 
 When collapsing on the `\_root_` field however, the logic knows that as it scans over the index, it will never encounter any new documents in a group that it has previously processed.
 
-This more efficient logic can also be used with other `collapseField` values, via the `hint=block` local param.  This can be useful when you have deeply nested documents and you'd like to collapse on a field that does not contain identical values for all documents with a common `\_root_` but is a unique and identical value for sets of contiguous documents under a common `\_root_`.  For example: searching for "grand child" documents and collapsing on a field that is unique per "child document"
+This more efficient logic can also be used with other `collapseField` values, via the `hint=block` local param.
+This can be useful when you have deeply nested documents and you'd like to collapse on a field that does not contain identical values for all documents with a common `\_root_` but is a unique and identical value for sets of contiguous documents under a common `\_root_`.
+For example: searching for "grand child" documents and collapsing on a field that is unique per "child document"
 
 [CAUTION]
 ====
@@ -163,9 +182,11 @@ Example usage with the CollapsingQParserPlugin:
 q=foo&fq={!collapse field=ISBN}
 ----
 
-In the query above, the CollapsingQParserPlugin will collapse the search results on the _ISBN_ field. The main search results will contain the highest ranking document from each book.
+In the query above, the CollapsingQParserPlugin will collapse the search results on the _ISBN_ field.
+The main search results will contain the highest ranking document from each book.
 
-The ExpandComponent can now be used to expand the results so you can see the documents grouped by ISBN. For example:
+The ExpandComponent can now be used to expand the results so you can see the documents grouped by ISBN.
+For example:
 
 [source,text]
 ----
@@ -180,7 +201,8 @@ If there are multiple collapse groups with same cost then the first specified on
 
 When enabled, the ExpandComponent adds a new section to the search output labeled `expanded`.
 
-Inside the `expanded` section there is a _map_ with each group head pointing to the expanded documents that are within the group. As applications iterate the main collapsed result set, they can access the _expanded_ map to retrieve the expanded groups.
+Inside the `expanded` section there is a _map_ with each group head pointing to the expanded documents that are within the group.
+As applications iterate the main collapsed result set, they can access the _expanded_ map to retrieve the expanded groups.
 
 The ExpandComponent has the following parameters:
 
@@ -188,25 +210,31 @@ The ExpandComponent has the following parameters:
 When `true`, the ExpandComponent is enabled.
 
 `expand.field`::
-Field on which expand documents need to be populated. When `expand=true`, either this parameter needs to be specified or should be used with CollapsingQParserPlugin.
+Field on which expand documents need to be populated.
+When `expand=true`, either this parameter needs to be specified or should be used with CollapsingQParserPlugin.
 When both are specified, this parameter is given higher priority.
 
 `expand.sort`::
-Orders the documents within the expanded groups. The default is `score desc`.
+Orders the documents within the expanded groups.
+The default is `score desc`.
 
 `expand.rows`::
-The number of rows to display in each group. The default is 5 rows.
+The number of rows to display in each group.
+The default is 5 rows.
 +
 [IMPORTANT]
 ====
-When `expand.rows=0`, only the number of documents found for each expanded value is returned. Hence, scores won't be computed even if requested and `maxScore` is set to 0.
+When `expand.rows=0`, only the number of documents found for each expanded value is returned.
+Hence, scores won't be computed even if requested and `maxScore` is set to 0.
 ====
 
 `expand.q`::
-Overrides the main query (`q`), determines which documents to include in the main group. The default is to use the main query.
+Overrides the main query (`q`), determines which documents to include in the main group.
+The default is to use the main query.
 
 `expand.fq`::
-Overrides main filter queries (`fq`), determines which documents to include in the main group. The default is to use the main filter queries.
+Overrides main filter queries (`fq`), determines which documents to include in the main group.
+The default is to use the main filter queries.
 
 `expand.nullGroup`::
 Indicates if an expanded group can be returned containing documents with no value in the expanded field.
diff --git a/solr/solr-ref-guide/src/collection-management.adoc b/solr/solr-ref-guide/src/collection-management.adoc
index 788e0d5..fa658bf 100644
--- a/solr/solr-ref-guide/src/collection-management.adoc
+++ b/solr/solr-ref-guide/src/collection-management.adoc
@@ -149,7 +149,8 @@ If the field specified is null in the document, the document will be rejected.
 Please note that <<realtime-get.adoc#,RealTime Get>> or retrieval by document ID would also require the parameter `\_route_` (or `shard.keys`) to avoid a distributed search.
 
 `perReplicaState`::
-If `true` the states of individual replicas will be maintained as individual child of the `state.json`. The default is `false`.
+If `true` the states of individual replicas will be maintained as individual child of the `state.json`.
+The default is `false`.
 
 `property._name_=_value_`::
 Set core property _name_ to _value_. See the section <<core-discovery.adoc#,Core Discovery>> for details on supported properties and values.
@@ -663,7 +664,8 @@ This parameter is required.
 
 `split.key` (v1), `splitKey` (v2)::
 The routing key prefix.
-For example, if the uniqueKey of a document is "a!123", then you would use `split.key=a!`. This parameter is required.
+For example, if the uniqueKey of a document is "a!123", then you would use `split.key=a!`.
+This parameter is required.
 
 `forward.timeout` (v1), `forwardTimeout` (v2)::
 The timeout, in seconds, until which write requests made to the source collection for the given `split.key` will be forwarded to the target shard.
@@ -851,7 +853,8 @@ Such incompatibilities may result from incompatible schema changes or after migr
 === COLSTATUS Parameters
 
 `collection`::
-Collection name (optional). If missing then it means all collections.
+Collection name (optional).
+If missing then it means all collections.
 
 `coreInfo`::
 Optional boolean.
@@ -1704,7 +1707,8 @@ Values \<=0 are use the default value Integer.MAX_VALUE.
 When this number is reached, the process waits for one or more leaders to be successfully assigned before adding more to the queue.
 
 `maxWaitSeconds`::
-Defaults to `60`. This is the timeout value when waiting for leaders to be reassigned.
+Defaults to `60`.
+This is the timeout value when waiting for leaders to be reassigned.
 If `maxAtOnce` is less than the number of reassignments that will take place, this is the maximum interval that any _single_ wait for at least one reassignment.
 +
 For example, if 10 reassignments are to take place and `maxAtOnce` is `1` and `maxWaitSeconds` is `60`, the upper bound on the time that the command may wait is 10 minutes.
diff --git a/solr/solr-ref-guide/src/collections-api.adoc b/solr/solr-ref-guide/src/collections-api.adoc
index 03c778c..a2404a5 100644
--- a/solr/solr-ref-guide/src/collections-api.adoc
+++ b/solr/solr-ref-guide/src/collections-api.adoc
@@ -17,7 +17,8 @@
 // specific language governing permissions and limitations
 // under the License.
 
-A SolrCloud cluster includes a number of components. The Collections API is provided to allow you to control your cluster, including the collections, shards, replicas, backups, leader election, and other operations needs.
+A SolrCloud cluster includes a number of components.
+The Collections API is provided to allow you to control your cluster, including the collections, shards, replicas, backups, leader election, and other operations needs.
 
 Because this API has a large number of commands and options, we've grouped the commands into the following sub-sections:
 
@@ -33,9 +34,12 @@ Because this API has a large number of commands and options, we've grouped the c
 
 == Asynchronous Calls
 
-Since some collection API calls can be long running tasks (such as SPLITSHARD), you can optionally have the calls run asynchronously. Specifying `async=<request-id>` enables you to make an asynchronous call, the status of which can be requested using the <<requeststatus,REQUESTSTATUS>> call at any time.
+Since some collection API calls can be long running tasks (such as SPLITSHARD), you can optionally have the calls run asynchronously.
+Specifying `async=<request-id>` enables you to make an asynchronous call, the status of which can be requested using the <<requeststatus,REQUESTSTATUS>> call at any time.
 
-As of now, REQUESTSTATUS does not automatically clean up the tracking data structures, meaning the status of completed or failed tasks stays stored in ZooKeeper unless cleared manually. DELETESTATUS can be used to clear the stored statuses. However, there is a limit of 10,000 on the number of async call responses stored in a cluster.
+As of now, REQUESTSTATUS does not automatically clean up the tracking data structures, meaning the status of completed or failed tasks stays stored in ZooKeeper unless cleared manually.
+DELETESTATUS can be used to clear the stored statuses.
+However, there is a limit of 10,000 on the number of async call responses stored in a cluster.
 
 === Examples of Async Requests
 
@@ -93,7 +97,8 @@ curl -X POST http://localhost:8983/api/collections/collection1/shards -H 'Conten
 [[requeststatus]]
 == REQUESTSTATUS: Request Status of an Async Call
 
-Request the status and response of an already submitted <<Asynchronous Calls,Asynchronous Collection API>> (below) call. This call is also used to clear up the stored statuses.
+Request the status and response of an already submitted <<Asynchronous Calls,Asynchronous Collection API>> (below) call.
+This call is also used to clear up the stored statuses.
 
 [.dynamic-tabs]
 --
@@ -122,7 +127,9 @@ curl -X GET http://localhost:8983/api/cluster/command-status/1000
 === REQUESTSTATUS Parameters
 
 `requestid`::
-The user defined request ID for the request. This can be used to track the status of the submitted asynchronous task. This parameter is required.
+The user defined request ID for the request.
+This can be used to track the status of the submitted asynchronous task.
+This parameter is required.
 
 === Examples using REQUESTSTATUS
 
diff --git a/solr/solr-ref-guide/src/commits-transaction-logs.adoc b/solr/solr-ref-guide/src/commits-transaction-logs.adoc
index cf5903c..1d5c262 100644
--- a/solr/solr-ref-guide/src/commits-transaction-logs.adoc
+++ b/solr/solr-ref-guide/src/commits-transaction-logs.adoc
@@ -216,7 +216,8 @@ One point of confusion is how much data is contained in a transaction log.
 A tlog does not contain all documents, only the ones since the last hard commit.
 Older transaction log files are deleted when no longer needed.
 
-WARNING: Implicit in the above is that transaction logs will grow forever if hard commits are disabled. Therefore it is important that hard commits be enabled when indexing.
+WARNING: Implicit in the above is that transaction logs will grow forever if hard commits are disabled.
+Therefore it is important that hard commits be enabled when indexing.
 
 === Transaction Log Configuration
 
@@ -265,7 +266,7 @@ The number of update records to keep per log.
 |Optional |Default: `10`
 |===
 +
-The maximum number of logs keep. The default is `10`.
+The maximum number of logs keep.
 
 `numVersionBuckets`::
 +
diff --git a/solr/solr-ref-guide/src/common-query-parameters.adoc b/solr/solr-ref-guide/src/common-query-parameters.adoc
index 62627a4..314fc2a 100644
--- a/solr/solr-ref-guide/src/common-query-parameters.adoc
+++ b/solr/solr-ref-guide/src/common-query-parameters.adoc
@@ -79,7 +79,8 @@ Users looking to avoid this behavior can add an additional sort criteria on a un
 
 When specified, the `start` parameter specifies an offset into a query's result set and instructs Solr to begin displaying results from this offset.
 
-The default value is `0`. In other words, by default, Solr returns results without an offset, beginning where the results themselves begin.
+The default value is `0`.
+In other words, by default, Solr returns results without an offset, beginning where the results themselves begin.
 
 Setting the `start` parameter to some other number, such as `3`, causes Solr to skip over the preceding records and start at the document identified by the offset.
 
@@ -91,7 +92,8 @@ For example, if the `rows` parameter is set to 10, you could display three succe
 You can use the `rows` parameter to paginate results from a query.
 The parameter specifies the maximum number of documents from the complete result set that Solr should return to the client at one time.
 
-The default value is `10`. That is, by default, Solr returns 10 documents at a time in response to a query.
+The default value is `10`.
+That is, by default, Solr returns 10 documents at a time in response to a query.
 
 == canCancel Parameter
 
@@ -453,7 +455,8 @@ q=quick brown fox&minExactCount=100&rows=10
     "docs": [{"doc1"}]
 }
 ----
-Since `numFoundExact=false`, we know the number of documents matching the query is greater or equal to 153. If we specify a higher value for `minExactCount`:
+Since `numFoundExact=false`, we know the number of documents matching the query is greater or equal to 153.
+If we specify a higher value for `minExactCount`:
 
 [source,text]
 q=quick brown fox&minExactCount=200&rows=10
diff --git a/solr/solr-ref-guide/src/computational-geometry.adoc b/solr/solr-ref-guide/src/computational-geometry.adoc
index 50d7ac6..3be20a0 100644
--- a/solr/solr-ref-guide/src/computational-geometry.adoc
+++ b/solr/solr-ref-guide/src/computational-geometry.adoc
@@ -21,29 +21,26 @@ This section of the math expressions user guide covers computational geometry fu
 
 == Convex Hull
 
-A convex hull is the smallest convex set of points that encloses a data set. Math expressions has support for computing
-the convex hull of a 2D data set. Once a convex hull has been calculated, a set of math expression functions
+A convex hull is the smallest convex set of points that encloses a data set.
+Math expressions has support for computing the convex hull of a 2D data set.
+Once a convex hull has been calculated, a set of math expression functions
 can be applied to geometrically describe and visualize the convex hull.
 
 === Visualization
 
-The `convexHull` function can be used to visualize a border around a
-set of 2D points. Border visualizations can be useful for understanding where data points are
-in relation to the border.
+The `convexHull` function can be used to visualize a border around a set of 2D points.
+Border visualizations can be useful for understanding where data points are in relation to the border.
 
-In the examples below the `convexHull` function is used
-to visualize a border for a set of latitude and longitude points of rat sightings in the NYC311
-complaints database. An investigation of the border around the rat sightings can be done
-to better understand how rats may be entering or exiting the specific region.
+In the examples below the `convexHull` function is used to visualize a border for a set of latitude and longitude points of rat sightings in the NYC311
+complaints database.
+An investigation of the border around the rat sightings can be done to better understand how rats may be entering or exiting the specific region.
 
 ==== Scatter Plot
 
 Before visualizing the convex hull its often useful to visualize the 2D points as a scatter plot.
 
-In this example the `random` function draws a sample of records from the NYC311 (complaints database) collection where
-the complaint description matches "rat sighting" and the zip code is 11238. The latitude and longitude fields
-are then vectorized and plotted as a scatter plot with longitude on x-axis and latitude on the
-y-axis.
+In this example the `random` function draws a sample of records from the NYC311 (complaints database) collection where the complaint description matches "rat sighting" and the zip code is 11238.
+The latitude and longitude fields are then vectorized and plotted as a scatter plot with longitude on x-axis and latitude on the y-axis.
 
 image::images/math-expressions/convex0.png[]
 
@@ -51,10 +48,10 @@ Notice from the scatter plot that many of the points appear to lie near the bord
 
 ==== Convex Hull Plot
 
-The `convexHull` function can be used to visualize the border. The example uses the same points
-drawn from the NYC311 database. But instead of plotting the points directly the latitude and
-longitude points are added as rows to a matrix. The matrix is then transposed with `transpose`
-function so that each row of the matrix contains a single latitude and longitude point.
+The `convexHull` function can be used to visualize the border.
+The example uses the same points drawn from the NYC311 database.
+But instead of plotting the points directly the latitude and longitude points are added as rows to a matrix.
+The matrix is then transposed with `transpose` function so that each row of the matrix contains a single latitude and longitude point.
 
 The `convexHull` function is then used calculate the convex hull for the matrix of points.
 The convex hull is set a variable called `hull`.
@@ -62,49 +59,44 @@ The convex hull is set a variable called `hull`.
 Once the convex hull has been created the `getVertices` function can be used to
 retrieve the matrix of points in the scatter plot that comprise the convex border around the scatter plot.
 The `colAt` function can then be used to retrieve the latitude and longitude vectors from the matrix
-so they can visualized by the `zplot` function. In the example below the convex hull points are
-visualized as a scatter plot.
+so they can visualized by the `zplot` function.
+In the example below the convex hull points are visualized as a scatter plot.
 
 image::images/math-expressions/hullplot.png[]
 
-Notice that the 15 points in the scatter plot describe that latitude and longitude points of the
-convex hull.
+Notice that the 15 points in the scatter plot describe that latitude and longitude points of the convex hull.
 
 ==== Projecting and Clustering
 
-The once a convex hull as been calculated the `projectToBorder` can then be used to project
-points to the nearest point on the border. In the example below the `projectToBorder` function
-is used to project the original scatter scatter plot points to the nearest border.
+The once a convex hull as been calculated the `projectToBorder` can then be used to project points to the nearest point on the border.
+In the example below the `projectToBorder` function is used to project the original scatter scatter plot points to the nearest border.
 
-The `projectToBorder` function returns a matrix of lat/lon points for the border projections. In
-the example the matrix of border points is then clustered into 7 clusters using kmeans clustering.
+The `projectToBorder` function returns a matrix of lat/lon points for the border projections.
+In the example the matrix of border points is then clustered into 7 clusters using kmeans clustering.
 The `zplot` function is then used to plot the clustered border points.
 
 image::images/math-expressions/convex1.png[]
 
-Notice in the visualization its easy to see which spots along the border have the highest
-density of points. In the case or the rat sightings this information is useful in understanding
-which border points are closest for the rats to enter or exit from.
+Notice in the visualization its easy to see which spots along the border have the highest density of points.
+In the case of the rat sightings this information is useful in understanding which border points are closest for the rats to enter or exit from.
 
 ==== Plotting the Centroids
 
 Once the border points have been clustered its very easy to extract the centroids of the clusters
-and plot them on a map. The example below extracts the centroids from the clusters using the
-`getCentroids` function. `getCentroids` returns the matrix of lat/lon points which represent
-the centroids of border clusters. The `colAt` function can then be used to extract the lat/lon
-vectors so they can be plotted on a map using `zplot`.
+and plot them on a map.
+The example below extracts the centroids from the clusters using the `getCentroids` function.
+`getCentroids` returns the matrix of lat/lon points which represent the centroids of border clusters.
+The `colAt` function can then be used to extract the lat/lon vectors so they can be plotted on a map using `zplot`.
 
 image::images/math-expressions/convex2.png[]
 
-The map above shows the centroids of the border clusters. The centroids from the highest
-density clusters can now be zoomed and investigated geo-spatially to determine what might be
-the best places to begin an investigation of the border.
+The map above shows the centroids of the border clusters.
+The centroids from the highest density clusters can now be zoomed and investigated geo-spatially to determine what might be the best places to begin an investigation of the border.
 
 == Enclosing Disk
 
 The `enclosingDisk` function finds the smallest enclosing circle the encloses a 2D data set.
-Once an enclosing disk has been calculated, a set of math expression functions
-can be applied to geometrically describe the enclosing disk.
+Once an enclosing disk has been calculated, a set of math expression functions can be applied to geometrically describe the enclosing disk.
 
 In the example below an enclosing disk is calculated for a randomly generated set of 1000 2D observations.
 
diff --git a/solr/solr-ref-guide/src/config-api.adoc b/solr/solr-ref-guide/src/config-api.adoc
index 258f308..d2410a5 100644
--- a/solr/solr-ref-guide/src/config-api.adoc
+++ b/solr/solr-ref-guide/src/config-api.adoc
@@ -893,7 +893,8 @@ If `params.json` is modified, the params object is just updated without a core r
 
 === Empty Command
 
-If an empty command is sent to the `/config` endpoint, the watch is triggered on all cores using this configset. For example:
+If an empty command is sent to the `/config` endpoint, the watch is triggered on all cores using this configset.
+For example:
 
 [.dynamic-tabs]
 --
diff --git a/solr/solr-ref-guide/src/configsets-api.adoc b/solr/solr-ref-guide/src/configsets-api.adoc
index c0d9a9c..8b85228 100644
--- a/solr/solr-ref-guide/src/configsets-api.adoc
+++ b/solr/solr-ref-guide/src/configsets-api.adoc
@@ -19,7 +19,8 @@
 
 The Configsets API enables you to upload new configsets to ZooKeeper, create, and delete configsets when Solr is running SolrCloud mode.
 
-Configsets are a collection of configuration files such as `solrconfig.xml`, `synonyms.txt`, the schema, language-specific files, and other collection-level configuration files (everything that normally lives in the `conf` directory). Solr ships with two example configsets (`_default` and `sample_techproducts_configs`) which can be used when creating collections.
+Configsets are a collection of configuration files such as `solrconfig.xml`, `synonyms.txt`, the schema, language-specific files, and other collection-level configuration files (everything that normally lives in the `conf` directory).
+Solr ships with two example configsets (`_default` and `sample_techproducts_configs`) which can be used when creating collections.
 Using the same concept, you can create your own configsets and make them available when creating collections.
 
 This API provides a way to upload configuration files to ZooKeeper and share the same set of configuration files between two or more collections.
diff --git a/solr/solr-ref-guide/src/configuring-solr-xml.adoc b/solr/solr-ref-guide/src/configuring-solr-xml.adoc
index 0e0f74a..68eea5f 100644
--- a/solr/solr-ref-guide/src/configuring-solr-xml.adoc
+++ b/solr/solr-ref-guide/src/configuring-solr-xml.adoc
@@ -308,7 +308,8 @@ If you would like to customize the metrics for your installation, see the sectio
 == Substituting JVM System Properties in solr.xml
 
 Solr supports variable substitution of JVM system property values in `solr.xml`, which allows runtime specification of various configuration options.
-The syntax is `${propertyname[:option default value]}`. This allows defining a default that can be overridden when Solr is launched.
+The syntax is `${propertyname[:option default value]}`.
+This allows defining a default that can be overridden when Solr is launched.
 If a default value is not specified, then the property must be specified at runtime or the `solr.xml` file will generate an error when parsed.
 
 Any JVM system properties usually specified using the `-D` flag when starting the JVM, can be used as variables in the `solr.xml` file.
diff --git a/solr/solr-ref-guide/src/copy-fields.adoc b/solr/solr-ref-guide/src/copy-fields.adoc
index 2404b70..c51f6c4 100644
--- a/solr/solr-ref-guide/src/copy-fields.adoc
+++ b/solr/solr-ref-guide/src/copy-fields.adoc
@@ -33,7 +33,8 @@ In the example above, if the `text` destination field has data of its own in the
 Remember to configure your fields as `multivalued="true"` if they will ultimately get multiple values (either from a multivalued source or from multiple `copyField` directives).
 
 A common usage for this functionality is to create a single "search" field that will serve as the default query field when users or clients do not specify a field to query.
-For example, `title`, `author`, `keywords`, and `body` may all be fields that should be searched by default, with copy field rules for each field to copy to a `catchall` field (for example, it could be named anything). Later you can set a rule in `solrconfig.xml` to search the `catchall` field by default.
+For example, `title`, `author`, `keywords`, and `body` may all be fields that should be searched by default, with copy field rules for each field to copy to a `catchall` field (for example, it could be named anything).
+Later you can set a rule in `solrconfig.xml` to search the `catchall` field by default.
 One caveat to this is your index will grow when using copy fields.
 However, whether this becomes problematic for you and the final size will depend on the number of fields being copied, the number of destination fields being copied to, the analysis in use, and the available disk space.
 
diff --git a/solr/solr-ref-guide/src/coreadmin-api.adoc b/solr/solr-ref-guide/src/coreadmin-api.adoc
index 5900191..5f34f15 100644
--- a/solr/solr-ref-guide/src/coreadmin-api.adoc
+++ b/solr/solr-ref-guide/src/coreadmin-api.adoc
@@ -85,7 +85,8 @@ curl -X GET http://localhost:8983/api/cores?indexInfo=false
 === STATUS Parameters
 
 `core`::
-The name of a core, as listed in the "name" attribute of a `<core>` element in `solr.xml`. This parameter is required in v1, and part of the url in the v2 API.
+The name of a core, as listed in the "name" attribute of a `<core>` element in `solr.xml`.
+This parameter is required in v1, and part of the url in the v2 API.
 
 `indexInfo`::
 If `false`, information about the index will not be returned with a core STATUS request.
diff --git a/solr/solr-ref-guide/src/curve-fitting.adoc b/solr/solr-ref-guide/src/curve-fitting.adoc
index 966e888..d5a79ed 100644
--- a/solr/solr-ref-guide/src/curve-fitting.adoc
+++ b/solr/solr-ref-guide/src/curve-fitting.adoc
@@ -20,84 +20,68 @@ These functions support constructing a curve through bivariate non-linear data.
 
 == Polynomial Curve Fitting
 
-The `polyfit` function is a general purpose curve fitter used to model
-the non-linear relationship between two random variables.
+The `polyfit` function is a general purpose curve fitter used to model the non-linear relationship between two random variables.
 
 The `polyfit` function is passed x- and y-axes and fits a smooth curve to the data.
-If only a single array is provided it is treated as the y-axis and a sequence is generated
-for the x-axis. A third parameter can be added that specifies the degree of the polynomial. If the degree is
-not provided a 3 degree polynomial is used by default. The higher
-the degree the more curves that can be modeled.
+If only a single array is provided it is treated as the y-axis and a sequence is generated for the x-axis.
+A third parameter can be added that specifies the degree of the polynomial.
+If the degree is not provided a 3 degree polynomial is used by default.
+The higher the degree the more curves that can be modeled.
 
-The `polyfit` function can be visualized in a similar manner to linear regression with
-Zeppelin-Solr.
+The `polyfit` function can be visualized in a similar manner to linear regression with Zeppelin-Solr.
 
-The example below uses the `polyfit` function to fit a non-linear curve to a scatter
-plot of a random sample. The blue points are the scatter plot of the original observations and the red points
-are the predicted curve.
+The example below uses the `polyfit` function to fit a non-linear curve to a scatter plot of a random sample.
+The blue points are the scatter plot of the original observations and the red points are the predicted curve.
 
 image::images/math-expressions/polyfit.png[]
 
-In the example above a random sample containing two fields, `filesize_d`
-and `response_d`, is drawn from the `logs` collection.
+In the example above a random sample containing two fields, `filesize_d` and `response_d`, is drawn from the `logs` collection.
 The two fields are vectorized and set to the variables `x` and `y`.
 
-Then the `polyfit` function is used to fit a non-linear model to the data using a 5 degree
-polynomial. The `polyfit` function returns a model that is then directly plotted
-by `zplot` along with the original observations.
+Then the `polyfit` function is used to fit a non-linear model to the data using a 5 degree polynomial.
+The `polyfit` function returns a model that is then directly plotted by `zplot` along with the original observations.
 
-The fitted model can also be used
-by the `predict` function in the same manner as linear regression. The example below
-uses the fitted model to predict a response time for a file size of 42000.
+The fitted model can also be used by the `predict` function in the same manner as linear regression.
+The example below uses the fitted model to predict a response time for a file size of 42000.
 
 image::images/math-expressions/polyfit-predict.png[]
 
 If an array of predictor values is provided an array of predictions will be returned.
 
-The `polyfit` model performs both *interpolation* and *extrapolation*,
-which means that it can predict results both within the bounds of the data set
-and beyond the bounds.
+The `polyfit` model performs both *interpolation* and *extrapolation*, which means that it can predict results both within the bounds of the data set and beyond the bounds.
 
 === Residuals
 
-The residuals can be calculated and visualized in the same manner as linear
-regression as well. In the example below the `ebeSubtract` function is used
-to subtract the fitted model from the observed values, to
-calculate a vector of residuals. The residuals are then plotted in a *residual plot*
-with the predictions along the x-axis and the model error on the y-axis.
+The residuals can be calculated and visualized in the same manner as linear regression as well.
+In the example below the `ebeSubtract` function is used to subtract the fitted model from the observed values, to calculate a vector of residuals.
+The residuals are then plotted in a *residual plot* with the predictions along the x-axis and the model error on the y-axis.
 
 image::images/math-expressions/polyfit-resid.png[]
 
 
 == Gaussian Curve Fitting
 
-The `gaussfit` function fits a smooth curve through a Gaussian peak. The `gaussfit`
-function takes an x- and y-axis and fits a smooth gaussian curve to the data. If
-only one vector of numbers is passed, `gaussfit` will treat it as the y-axis
-and will generate a sequence for the x-axis.
+The `gaussfit` function fits a smooth curve through a Gaussian peak.
+The `gaussfit` function takes an x- and y-axis and fits a smooth gaussian curve to the data.
+If only one vector of numbers is passed, `gaussfit` will treat it as the y-axis and will generate a sequence for the x-axis.
 
-One of the interesting use cases for `gaussfit` is to visualize how well a regression
-model's residuals fit a normal distribution.
+One of the interesting use cases for `gaussfit` is to visualize how well a regression model's residuals fit a normal distribution.
 
 One of the characteristics of a well-fit regression model is that its residuals will ideally fit a normal distribution.
-We can
-test this by building a histogram of the residuals and then fitting a gaussian curve to the curve of the histogram.
+We can test this by building a histogram of the residuals and then fitting a gaussian curve to the curve of the histogram.
 
-In the example below the residuals from a `polyfit` regression are modeled with the
-`hist` function to return a histogram with 32 bins. The `hist` function returns
-a list of tuples with statistics about each bin. In the example the `col` function is
-used to return a vector with the `N` column for each bin, which is the count of
-observations in the
-bin. If the residuals are normally distributed we would expect the bin counts
-to roughly follow a gaussian curve.
+In the example below the residuals from a `polyfit` regression are modeled with the `hist` function to return a histogram with 32 bins.
+The `hist` function returns a list of tuples with statistics about each bin.
+In the example the `col` function is used to return a vector with the `N` column for each bin, which is the count of observations in the
+bin.
+If the residuals are normally distributed we would expect the bin counts to roughly follow a gaussian curve.
 
-The bin count vector is then passed to `gaussfit` as the y-axis. `gaussfit` generates
-a sequence for the x-axis and then fits the gaussian curve to data.
+The bin count vector is then passed to `gaussfit` as the y-axis.
+`gaussfit` generates a sequence for the x-axis and then fits the gaussian curve to data.
 
-`zplot` is then used to plot the original bin counts and the fitted curve. In the
-example below, the blue line is the bin counts, and the smooth yellow line is the
-fitted curve. We can see that the binned residuals fit fairly well to a normal
-distribution.
+`zplot` is then used to plot the original bin counts and the fitted curve.
+In the example below, the blue line is the bin counts, and the smooth yellow line is the fitted curve.
+We can see that the binned residuals fit fairly well to a normal distribution.
 
 image::images/math-expressions/gaussfit.png[]
 
@@ -110,26 +94,23 @@ image::images/math-expressions/gaussfit2.png[]
 
 The `harmonicFit` function (or `harmfit`, for short) fits a smooth line through control points of a sine wave.
 The `harmfit` function is passed x- and y-axes and fits a smooth curve to the data.
-If a single array is provided it is treated as the y-axis and a sequence is generated
-for the x-axis.
+If a single array is provided it is treated as the y-axis and a sequence is generated for the x-axis.
 
-The example below shows `harmfit` fitting a single oscillation of a sine wave. The `harmfit` function
-returns the smoothed values at each control point. The return value is also a model which can be used by
-the `predict`, `derivative` and `integrate` functions.
-
-NOTE: The `harmfit` function works best when run on a single oscillation rather than a long sequence of
-oscillations. This is particularly true if the sine wave has noise. After the curve has been fit it can be
-extrapolated to any point in time in the past or future.
+The example below shows `harmfit` fitting a single oscillation of a sine wave.
+The `harmfit` function returns the smoothed values at each control point.
+The return value is also a model which can be used by the `predict`, `derivative` and `integrate` functions.
 
+NOTE: The `harmfit` function works best when run on a single oscillation rather than a long sequence of oscillations.
+This is particularly true if the sine wave has noise.
+After the curve has been fit it can be extrapolated to any point in time in the past or future.
 
 In the example below the original control points are shown in blue and the fitted curve is shown in yellow.
 
 image::images/math-expressions/harmfit.png[]
 
-
-The output of `harmfit` is a model that can be used by the `predict` function to interpolate and extrapolate
-the sine wave. In the example below the `natural` function creates an x-axis from 0 to 127
-used to predict results for the model. This extrapolates the sine wave out to 128 points, when
-the original model curve had only 19 control points.
+The output of `harmfit` is a model that can be used by the `predict` function to interpolate and extrapolate the sine wave.
+In the example below the `natural` function creates an x-axis from 0 to 127
+used to predict results for the model.
+This extrapolates the sine wave out to 128 points, when the original model curve had only 19 control points.
 
 image::images/math-expressions/harmfit2.png[]
diff --git a/solr/solr-ref-guide/src/date-formatting-math.adoc b/solr/solr-ref-guide/src/date-formatting-math.adoc
index 8bd66c2..1753e27 100644
--- a/solr/solr-ref-guide/src/date-formatting-math.adoc
+++ b/solr/solr-ref-guide/src/date-formatting-math.adoc
@@ -31,7 +31,8 @@ The format used is a restricted form of the canonical representation of dateTime
 * `ss` is seconds.
 * `Z` is a literal 'Z' character indicating that this string representation of the date is in UTC
 
-Note that no time zone can be specified; the String representations of dates is always expressed in Coordinated Universal Time (UTC). Here is an example value:
+Note that no time zone can be specified; the String representations of dates is always expressed in Coordinated Universal Time (UTC).
+Here is an example value:
 
 `1972-05-20T17:33:18Z`
 
@@ -42,7 +43,8 @@ Here are example values with sub-seconds:
 * `1972-05-20T17:33:18.77Z`
 * `1972-05-20T17:33:18.7Z`
 
-There must be a leading `'-'` for dates prior to year 0000, and Solr will format dates with a leading `'+'` for years after 9999. Year 0000 is considered year 1 BC; there is no such thing as year 0 AD or BC.
+There must be a leading `'-'` for dates prior to year 0000, and Solr will format dates with a leading `'+'` for years after 9999.
+Year 0000 is considered year 1 BC; there is no such thing as year 0 AD or BC.
 
 .Query escaping may be required
 [WARNING]
@@ -62,7 +64,8 @@ These are valid queries: +
 
 Solr's `DateRangeField` supports the same point in time date syntax described above (with _date math_ described below) and more to express date ranges.
 One class of examples is truncated dates, which represent the entire date span to the precision indicated.
-The other class uses the range syntax (`[ TO ]`). Here are some examples:
+The other class uses the range syntax (`[ TO ]`).
+Here are some examples:
 
 * `2000-11` – The entire month of November, 2000.
 * `1605-11-05` – The Fifth of November.
@@ -185,6 +188,7 @@ fq={!field f=dateRange op=Contains}[2013 TO 2018]
 ----
 
 Unlike most local params, `op` is actually _not_ defined by any query parser (`field`), it is defined by the field type, in this case `DateRangeField`.
-In the above example, it would find documents with indexed ranges that _contain_ (or equals) the range 2013 thru 2018. Multi-valued overlapping indexed ranges in a document are effectively coalesced.
+In the above example, it would find documents with indexed ranges that _contain_ (or equals) the range 2013 thru 2018.
+Multi-valued overlapping indexed ranges in a document are effectively coalesced.
 
 For a DateRangeField example use-case, see https://cwiki.apache.org/confluence/display/solr/DateRangeField[see Solr's community wiki].
diff --git a/solr/solr-ref-guide/src/de-duplication.adoc b/solr/solr-ref-guide/src/de-duplication.adoc
index 893c0c7..9d24cff 100644
--- a/solr/solr-ref-guide/src/de-duplication.adoc
+++ b/solr/solr-ref-guide/src/de-duplication.adoc
@@ -78,7 +78,8 @@ By default, all fields on the document will be used.
 
 `signatureField`::
 The name of the field used to hold the fingerprint/signature.
-The field should be defined in `schema.xml`. The default is `signatureField`.
+The field should be defined in your schema.
+The default is `signatureField`.
 
 `enabled`::
 Set to *false* to disable de-duplication processing.
diff --git a/solr/solr-ref-guide/src/dismax-query-parser.adoc b/solr/solr-ref-guide/src/dismax-query-parser.adoc
index 46c0c89..c164898 100644
--- a/solr/solr-ref-guide/src/dismax-query-parser.adoc
+++ b/solr/solr-ref-guide/src/dismax-query-parser.adoc
@@ -16,13 +16,22 @@
 // specific language governing permissions and limitations
 // under the License.
 
-The DisMax query parser is designed to process simple phrases (without complex syntax) entered by users and to search for individual terms across several fields using different weighting (boosts) based on the significance of each field. Additional options enable users to influence the score based on rules specific to each use case (independent of user input).
+The DisMax query parser is designed to process simple phrases (without complex syntax) entered by users and to search for individual terms across several fields using different weighting (boosts) based on the significance of each field.
+Additional options enable users to influence the score based on rules specific to each use case (independent of user input).
 
-In general, the DisMax query parser's interface is more like that of Google than the interface of the 'lucene' Solr query parser. This similarity makes DisMax the appropriate query parser for many consumer applications. It accepts a simple syntax, and it rarely produces error messages.
+In general, the DisMax query parser's interface is more like that of Google than the interface of the 'lucene' Solr query parser.
+This similarity makes DisMax the appropriate query parser for many consumer applications.
+It accepts a simple syntax, and it rarely produces error messages.
 
-The DisMax query parser supports an extremely simplified subset of the Lucene QueryParser syntax. As in Lucene, quotes can be used to group phrases, and +/- can be used to denote mandatory and optional clauses. All other Lucene query parser special characters (except AND and OR) are escaped to simplify the user experience. The DisMax query parser takes responsibility for building a good query from the user's input using Boolean clauses containing DisMax queries across fields and boosts s [...]
+The DisMax query parser supports an extremely simplified subset of the Lucene QueryParser syntax.
+As in Lucene, quotes can be used to group phrases, and +/- can be used to denote mandatory and optional clauses.
+All other Lucene query parser special characters (except AND and OR) are escaped to simplify the user experience.
+The DisMax query parser takes responsibility for building a good query from the user's input using Boolean clauses containing DisMax queries across fields and boosts specified by the user.
+It also lets the Solr administrator provide additional boosting queries, boosting functions, and filtering queries to artificially affect the outcome of all searches.
+These options can all be specified as default parameters for the request handler in the `solrconfig.xml` file or overridden in the Solr query URL.
 
-Interested in the technical concept behind the DisMax name? DisMax stands for Maximum Disjunction. Here's a definition of a Maximum Disjunction or "DisMax" query:
+Interested in the technical concept behind the DisMax name? DisMax stands for Maximum Disjunction.
+Here's a definition of a Maximum Disjunction or "DisMax" query:
 
 [quote]
 ____
@@ -33,29 +42,37 @@ Whether or not you remember this explanation, do remember that the DisMax Query
 
 == DisMax Query Parser Parameters
 
-In addition to the common request parameters, highlighting parameters, and simple facet parameters, the DisMax query parser supports the parameters described below. Like the standard query parser, the DisMax query parser allows default parameter values to be specified in `solrconfig.xml`, or overridden by query-time values in the request.
+In addition to the common request parameters, highlighting parameters, and simple facet parameters, the DisMax query parser supports the parameters described below.
+Like the standard query parser, the DisMax query parser allows default parameter values to be specified in `solrconfig.xml`, or overridden by query-time values in the request.
 
 The sections below explain these parameters in detail.
 
 === q Parameter
 
-The `q` parameter defines the main "query" constituting the essence of the search. The parameter supports raw input strings provided by users with no special escaping. The + and - characters are treated as "mandatory" and "prohibited" modifiers for terms. Text wrapped in balanced quote characters (for example, "San Jose") is treated as a phrase. Any query containing an odd number of quote characters is evaluated as if there were no quote characters at all.
+The `q` parameter defines the main "query" constituting the essence of the search.
+The parameter supports raw input strings provided by users with no special escaping.
+The + and - characters are treated as "mandatory" and "prohibited" modifiers for terms.
+Text wrapped in balanced quote characters (for example, "San Jose") is treated as a phrase.
+Any query containing an odd number of quote characters is evaluated as if there were no quote characters at all.
 
 IMPORTANT: The `q` parameter does not support wildcard characters such as *.
 
 
 === q.alt Parameter
 
-If specified, the `q.alt` parameter defines a query (which by default will be parsed using standard query parsing syntax) when the main q parameter is not specified or is blank. The `q.alt` parameter comes in handy when you need something like a query to match all documents (don't forget `&rows=0` for that one!) in order to get collection-wide faceting counts.
+If specified, the `q.alt` parameter defines a query (which by default will be parsed using standard query parsing syntax) when the main q parameter is not specified or is blank.
+The `q.alt` parameter comes in handy when you need something like a query to match all documents (don't forget `&rows=0` for that one!) in order to get collection-wide faceting counts.
 
 
 === qf (Query Fields) Parameter
 
-The `qf` parameter introduces a list of fields, each of which is assigned a boost factor to increase or decrease that particular field's importance in the query. For example, the query below:
+The `qf` parameter introduces a list of fields, each of which is assigned a boost factor to increase or decrease that particular field's importance in the query.
+For example, the query below:
 
 `qf="fieldOne^2.3 fieldTwo fieldThree^0.4"`
 
-assigns `fieldOne` a boost of 2.3, leaves `fieldTwo` with the default boost (because no boost factor is specified), and `fieldThree` a boost of 0.4. These boost factors make matches in `fieldOne` much more significant than matches in `fieldTwo`, which in turn are much more significant than matches in `fieldThree`.
+assigns `fieldOne` a boost of 2.3, leaves `fieldTwo` with the default boost (because no boost factor is specified), and `fieldThree` a boost of 0.4.
+These boost factors make matches in `fieldOne` much more significant than matches in `fieldTwo`, which in turn are much more significant than matches in `fieldThree`.
 
 
 === mm (Minimum Should Match) Parameter
@@ -80,10 +97,16 @@ The table below explains the various ways that mm values can be specified.
 
 When specifying `mm` values, keep in mind the following:
 
-* When dealing with percentages, negative values can be used to get different behavior in edge cases. 75% and -25% mean the same thing when dealing with 4 clauses, but when dealing with 5 clauses 75% means 3 are required, but -25% means 4 are required.
-* If the calculations based on the parameter arguments determine that no optional clauses are needed, the usual rules about Boolean queries still apply at search time. (That is, a Boolean query containing no required clauses must still match at least one optional clause).
-* No matter what number the calculation arrives at, Solr will never use a value greater than the number of optional clauses, or a value less than 1. In other words, no matter how low or how high the calculated result, the minimum number of required matches will never be less than 1 or greater than the number of clauses.
-* When searching across multiple fields that are configured with different query analyzers, the number of optional clauses may differ between the fields. In such a case, the value specified by mm applies to the maximum number of optional clauses. For example, if a query clause is treated as stopword for one of the fields, the number of optional clauses for that field will be smaller than for the other fields. A query with such a stopword clause would not return a match in that field if m [...]
+* When dealing with percentages, negative values can be used to get different behavior in edge cases.
+75% and -25% mean the same thing when dealing with 4 clauses, but when dealing with 5 clauses 75% means 3 are required, but -25% means 4 are required.
+* If the calculations based on the parameter arguments determine that no optional clauses are needed, the usual rules about Boolean queries still apply at search time.
+(That is, a Boolean query containing no required clauses must still match at least one optional clause).
+* No matter what number the calculation arrives at, Solr will never use a value greater than the number of optional clauses, or a value less than 1.
+In other words, no matter how low or how high the calculated result, the minimum number of required matches will never be less than 1 or greater than the number of clauses.
+* When searching across multiple fields that are configured with different query analyzers, the number of optional clauses may differ between the fields.
+In such a case, the value specified by mm applies to the maximum number of optional clauses.
+For example, if a query clause is treated as stopword for one of the fields, the number of optional clauses for that field will be smaller than for the other fields.
+A query with such a stopword clause would not return a match in that field if mm is set to 100% because the removed clause does not count as matched.
 
 The default value of `mm` is 0% (all clauses optional), unless `q.op` is specified as "AND", in which case `mm` defaults to 100% (all clauses required).
 
@@ -97,26 +120,33 @@ The format is the same as that used by the `qf` parameter: a list of fields and
 
 === ps (Phrase Slop) Parameter
 
-The `ps` parameter specifies the amount of "phrase slop" to apply to queries specified with the pf parameter. Phrase slop is the number of positions one token needs to be moved in relation to another token in order to match a phrase specified in a query.
+The `ps` parameter specifies the amount of "phrase slop" to apply to queries specified with the pf parameter.
+Phrase slop is the number of positions one token needs to be moved in relation to another token in order to match a phrase specified in a query.
 
 
 === qs (Query Phrase Slop) Parameter
 
-The `qs` parameter specifies the amount of slop permitted on phrase queries explicitly included in the user's query string with the `qf` parameter. As explained above, slop refers to the number of positions one token needs to be moved in relation to another token in order to match a phrase specified in a query.
+The `qs` parameter specifies the amount of slop permitted on phrase queries explicitly included in the user's query string with the `qf` parameter.
+As explained above, slop refers to the number of positions one token needs to be moved in relation to another token in order to match a phrase specified in a query.
 
 
 === The tie (Tie Breaker) Parameter
 
 The `tie` parameter specifies a float value (which should be something much less than 1) to use as tiebreaker in DisMax queries.
 
-When a term from the user's input is tested against multiple fields, more than one field may match. If so, each field will generate a different score based on how common that word is in that field (for each document relative to all other documents). The `tie` parameter lets you control how much the final score of the query will be influenced by the scores of the lower scoring fields compared to the highest scoring field.
+When a term from the user's input is tested against multiple fields, more than one field may match.
+If so, each field will generate a different score based on how common that word is in that field (for each document relative to all other documents).
+The `tie` parameter lets you control how much the final score of the query will be influenced by the scores of the lower scoring fields compared to the highest scoring field.
 
-A value of "0.0" - the default - makes the query a pure "disjunction max query": that is, only the maximum scoring subquery contributes to the final score. A value of "1.0" makes the query a pure "disjunction sum query" where it doesn't matter what the maximum scoring sub query is, because the final score will be the sum of the subquery scores. Typically a low value, such as 0.1, is useful.
+A value of "0.0" - the default - makes the query a pure "disjunction max query": that is, only the maximum scoring subquery contributes to the final score.
+A value of "1.0" makes the query a pure "disjunction sum query" where it doesn't matter what the maximum scoring sub query is, because the final score will be the sum of the subquery scores.
+Typically a low value, such as 0.1, is useful.
 
 
 === bq (Boost Query) Parameter
 
-The `bq` parameter specifies an additional, optional, query clause that will be _added_ to the user's main query as optional clauses that will influence the score. For example, if you wanted to add a boost for documents that are in a particular category you could use:
+The `bq` parameter specifies an additional, optional, query clause that will be _added_ to the user's main query as optional clauses that will influence the score.
+For example, if you wanted to add a boost for documents that are in a particular category you could use:
 
 [source,text]
 ----
@@ -147,7 +177,8 @@ The only difference between the above examples, is that using the `bq` parameter
 [[bq-bf-shortcomings]]
 .Additive Boosts vs Multiplicative Boosts
 ====
-Generally speaking, using `bq` (or `bf`, below) is considered a poor way to "boost" documents by a secondary query because it has an "Additive" effect on the final score.  The overall impact a particular `bq` parameter will have on a given document can vary a lot depending on the _absolute_ values of the scores from the original query as well as the `bq` query, which in turn depends on the complexity of the original query, and various scoring factors (TF, IDF, average field length, etc.)
+Generally speaking, using `bq` (or `bf`, below) is considered a poor way to "boost" documents by a secondary query because it has an "Additive" effect on the final score.
+The overall impact a particular `bq` parameter will have on a given document can vary a lot depending on the _absolute_ values of the scores from the original query as well as the `bq` query, which in turn depends on the complexity of the original query, and various scoring factors (TF, IDF, average field length, etc.)
 
 "Multiplicative Boosting" is generally considered to be a more predictable method of influencing document score, because it acts as a "scaling factor" -- increasing (or decreasing) the scores of each document by a _relative_ amount.
 
@@ -157,7 +188,9 @@ The <<other-parsers.adoc#boost-query-parser,`{!boost}` QParser>> provides a conv
 
 === bf (Boost Functions) Parameter
 
-The `bf` parameter specifies functions (with optional <<standard-query-parser.adoc#boosting-a-term-with,query boost>>) that will be used to construct FunctionQueries which will be _added_ to the user's main query as optional clauses that will influence the score. Any <<function-queries.adoc#available-functions,function supported natively by Solr>> can be used, along with a boost value. For example:
+The `bf` parameter specifies functions (with optional <<standard-query-parser.adoc#boosting-a-term-with,query boost>>) that will be used to construct FunctionQueries which will be _added_ to the user's main query as optional clauses that will influence the score.
+Any <<function-queries.adoc#available-functions,function supported natively by Solr>> can be used, along with a boost value.
+For example:
 
 [source,text]
 ----
@@ -215,7 +248,9 @@ Another request handler is registered at "/instock" and has slightly different c
 
 `\http://localhost:8983/solr/techproducts/instock?defType=dismax&q=video&fl=name,score,inStock`
 
-One of the other really cool features in this parser is robust support for specifying the "BooleanQuery.minimumNumberShouldMatch" you want to be used based on how many terms are in your user's query. These allows flexibility for typos and partial matches. For the dismax parser, one and two word queries require that all of the optional clauses match, but for three to five word queries one missing word is allowed.
+One of the other really cool features in this parser is robust support for specifying the "BooleanQuery.minimumNumberShouldMatch" you want to be used based on how many terms are in your user's query.
+These allows flexibility for typos and partial matches.
+For the dismax parser, one and two word queries require that all of the optional clauses match, but for three to five word queries one missing word is allowed.
 
 `\http://localhost:8983/solr/techproducts/select?defType=dismax&q=belkin+ipod`
 
diff --git a/solr/solr-ref-guide/src/docker-faq.adoc b/solr/solr-ref-guide/src/docker-faq.adoc
index df90235..bbd0731 100644
--- a/solr/solr-ref-guide/src/docker-faq.adoc
+++ b/solr/solr-ref-guide/src/docker-faq.adoc
@@ -19,9 +19,8 @@
 == How do I persist Solr data and config?
 
 Your data is persisted already, in your container's filesystem.
-If you `docker run`, add data to Solr, then `docker stop` and later
-`docker start`, then your data is still there. The same is true for
-changes to configuration files.
+If you `docker run`, add data to Solr, then `docker stop` and later `docker start`, then your data is still there.
+The same is true for changes to configuration files.
 
 Equally, if you `docker commit` your container, you can later create a new
 container from that image, and that will have your data in it.
@@ -49,13 +48,10 @@ This is useful if you want to inspect or modify the data in the Docker host
 when the container is not running, and later easily run new containers against that data.
 This is indeed possible, but there are a few gotchas.
 
-Solr stores its core data in the `server/solr` directory, in sub-directories
-for each core. The `server/solr` directory also contains configuration files
-that are part of the Solr distribution.
-Now, if we mounted volumes for each core individually, then that would
-interfere with Solr trying to create those directories. If instead we make
-the whole directory a volume, then we need to provide those configuration files
-in our volume, which we can do by copying them from a temporary container.
+Solr stores its core data in the `server/solr` directory, in sub-directories for each core.
+The `server/solr` directory also contains configuration files that are part of the Solr distribution.
+Now, if we mounted volumes for each core individually, then that would interfere with Solr trying to create those directories.
+If instead we make the whole directory a volume, then we need to provide those configuration files in our volume, which we can do by copying them from a temporary container.
 For example:
 
 [source,bash]
@@ -181,12 +177,14 @@ This is especially a problem for ZooKeeper 3.4.6; future versions are better at
 
 Docker 1.10 has a new `--ip` configuration option that allows you to specify an IP address for a container.
 It also has a `--ip-range` option that allows you to specify the range that other containers get addresses from.
-Used together, you can implement static addresses. See the <<docker-networking.adoc#,Solr Docker networking guide>> for more information.
+Used together, you can implement static addresses.
+See the <<docker-networking.adoc#,Solr Docker networking guide>> for more information.
 
 == Can I run ZooKeeper and Solr with Docker Links?
 
 Docker's https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/[Legacy container links] provide a way to
-pass connection configuration between containers. It only works on a single machine, on the default bridge.
+pass connection configuration between containers.
+It only works on a single machine, on the default bridge.
 It provides no facilities for static IPs.
 Note: this feature is expected to be deprecated and removed in a future release.
 So really, see the "Can I run ZooKeeper and Solr clusters under Docker?" option above instead.
@@ -249,9 +247,8 @@ set by the `GC_TUNE` environment variable and further down the list the overridi
 
 == I'm confused about the different invocations of Solr -- help?
 
-The different invocations of the Solr docker image can look confusing, because the name of the
-image is "solr" and the Solr command is also "solr", and the image interprets various arguments in
-special ways. I'll illustrate the various invocations:
+The different invocations of the Solr docker image can look confusing, because the name of the image is "solr" and the Solr command is also "solr", and the image interprets various arguments in special ways.
+I'll illustrate the various invocations:
 
 To run an arbitrary command in the image:
 
@@ -270,8 +267,7 @@ To run the Solr server:
 docker run -it solr
 ----
 
-Here "solr" is the name of the image, and there is no specific command,
-so the image defaults to run the "solr" command with "-f" to run it in the foreground.
+Here "solr" is the name of the image, and there is no specific command, so the image defaults to run the "solr" command with "-f" to run it in the foreground.
 
 To run the Solr server with extra arguments:
 
@@ -290,32 +286,31 @@ To run solr as an arbitrary command:
 docker run -it solr solr zk --help
 ----
 
-Here the first "solr" is the image name, and the second "solr"
-is the "solr" command. The image runs the command exactly as specified;
-no "-f" is implicitly added. The container will print help text, and exit.
+Here the first "solr" is the image name, and the second "solr" is the "solr" command.
+The image runs the command exactly as specified; no "-f" is implicitly added.
+The container will print help text, and exit.
 
-If you find this visually confusing, it might be helpful to use more specific image tags,
-and specific command paths. For example:
+If you find this visually confusing, it might be helpful to use more specific image tags, and specific command paths.
+For example:
 
 [source,bash]
 ----
 docker run -it solr bin/solr -f -h myhostname
 ----
 
-Finally, the Solr docker image offers several commands that do some work before
-then invoking the Solr server, like "solr-precreate" and "solr-demo".
+Finally, the Solr docker image offers several commands that do some work before then invoking the Solr server, like "solr-precreate" and "solr-demo".
 See the README.md for usage.
-These are implemented by the `docker-entrypoint.sh` script, and must be passed
-as the first argument to the image. For example:
+These are implemented by the `docker-entrypoint.sh` script, and must be passed as the first argument to the image.
+For example:
 
 [source,bash]
 ----
 docker run -it solr solr-demo
 ----
 
-It's important to understand an implementation detail here. The Dockerfile uses
-`solr-foreground` as the `CMD`, and the `docker-entrypoint.sh` implements
-that by by running "solr -f". So these two are equivalent:
+It's important to understand an implementation detail here.
+The Dockerfile uses `solr-foreground` as the `CMD`, and the `docker-entrypoint.sh` implements that by by running "solr -f".
+So these two are equivalent:
 
 [source,bash]
 ----
@@ -330,13 +325,11 @@ whereas:
 docker run -it solr solr -f
 ----
 
-is slightly different: the "solr" there is a generic command, not treated in any
-special way by `docker-entrypoint.sh`. In particular, this means that the
-`docker-entrypoint-initdb.d` mechanism is not applied.
-So, if you want to use `docker-entrypoint-initdb.d`, then you must use one
-of the other two invocations.
-You also need to keep that in mind when you want to invoke solr from the bash
-command. For example, this does NOT run `docker-entrypoint-initdb.d` scripts:
+is slightly different: the "solr" there is a generic command, not treated in any special way by `docker-entrypoint.sh`.
+In particular, this means that the `docker-entrypoint-initdb.d` mechanism is not applied.
+So, if you want to use `docker-entrypoint-initdb.d`, then you must use one of the other two invocations.
+You also need to keep that in mind when you want to invoke solr from the bash command.
+For example, this does NOT run `docker-entrypoint-initdb.d` scripts:
 
 [source,bash]
 ----
diff --git a/solr/solr-ref-guide/src/docker-networking.adoc b/solr/solr-ref-guide/src/docker-networking.adoc
index 608cfc6..f0639df 100644
--- a/solr/solr-ref-guide/src/docker-networking.adoc
+++ b/solr/solr-ref-guide/src/docker-networking.adoc
@@ -30,10 +30,9 @@ NOTE: this example requires Docker 1.10.
 
 I'll run these commands from the first machine, trinity10.
 
-Create a network named "netzksolr" for this cluster. The `--ip-range` specifies the range of
-addresses to use for containers, whereas the `--subnet` specifies all possible addresses in this
-network. So effectively, addresses in the subnet but outside the range are reserved for containers
-that specifically use the `--ip` option.
+Create a network named "netzksolr" for this cluster.
+The `--ip-range` specifies the range of addresses to use for containers, whereas the `--subnet` specifies all possible addresses in this network.
+So effectively, addresses in the subnet but outside the range are reserved for containers that specifically use the `--ip` option.
 
 [source,bash]
 ----
@@ -233,7 +232,8 @@ docker port zksolrproxy 8002
 
 Or use a suitably configured HAProxy to round-robin between all Solr nodes. Or, instead of the overlay network, use http://www.projectcalico.org[Project Calico] and configure L3 routing so you do not need to mess with proxies.
 
-Now I can get to Solr on `http://trinity10:32774/solr/#/`. In the Cloud -> Tree -> /live_nodes view I see the Solr nodes.
+Now I can get to Solr on `http://trinity10:32774/solr/#/`.
+In the Cloud -> Tree -> /live_nodes view I see the Solr nodes.
 
 From the Solr UI select the collection1 core, and click on Cloud -> Graph to see how it has created
 two shards across our Solr nodes.
diff --git a/solr/solr-ref-guide/src/document-analysis.adoc b/solr/solr-ref-guide/src/document-analysis.adoc
index 2f60f12..e24ccaa 100644
--- a/solr/solr-ref-guide/src/document-analysis.adoc
+++ b/solr/solr-ref-guide/src/document-analysis.adoc
@@ -26,9 +26,13 @@
 The following sections describe how Solr breaks down and works with textual data.
 There are three main concepts to understand: analyzers, tokenizers, and filters.
 
-* <<analyzers.adoc#,Field analyzers>> are used both during ingestion, when a document is indexed, and at query time. An analyzer examines the text of fields and generates a token stream. Analyzers may be a single class or they may be composed of a series of tokenizer and filter classes.
+* <<analyzers.adoc#,Field analyzers>> are used both during ingestion, when a document is indexed, and at query time.
+An analyzer examines the text of fields and generates a token stream.
+Analyzers may be a single class or they may be composed of a series of tokenizer and filter classes.
 * <<tokenizers.adoc#,Tokenizers>> break field data into lexical units, or _tokens_.
-* <<filters.adoc#,Filters>> examine a stream of tokens and keep them, transform or discard them, or create new ones. Tokenizers and filters may be combined to form pipelines, or _chains_, where the output of one is input to the next. Such a sequence of tokenizers and filters is called an _analyzer_ and the resulting output of an analyzer is used to match query results or build indices.
+* <<filters.adoc#,Filters>> examine a stream of tokens and keep them, transform or discard them, or create new ones.
+Tokenizers and filters may be combined to form pipelines, or _chains_, where the output of one is input to the next.
+Such a sequence of tokenizers and filters is called an _analyzer_ and the resulting output of an analyzer is used to match query results or build indices.
 
 == Using Analyzers, Tokenizers, and Filters
 
diff --git a/solr/solr-ref-guide/src/document-transformers.adoc b/solr/solr-ref-guide/src/document-transformers.adoc
index 689fdf7..c6d3a8e 100644
--- a/solr/solr-ref-guide/src/document-transformers.adoc
+++ b/solr/solr-ref-guide/src/document-transformers.adoc
@@ -98,7 +98,8 @@ Augments each document with an inline explanation of its score exactly like the
 q=features:cache&fl=id,[explain style=nl]
 ----
 
-Supported values for `style` are `text`, `html`, and `nl` which returns the information as structured data. Here is the output of the above request using `style=nl`:
+Supported values for `style` are `text`, `html`, and `nl` which returns the information as structured data.
+Here is the output of the above request using `style=nl`:
 
 [source,json]
 ----
@@ -125,7 +126,8 @@ A default style can be configured by specifying an `args` parameter in your `sol
 === [child] - ChildDocTransformerFactory
 
 
-This transformer returns all <<indexing-nested-documents.adoc#,descendant documents>> of each parent document matching your query.  This is useful when you have indexed nested child documents and want to retrieve the child documents for the relevant parent documents for any type of search query.
+This transformer returns all <<indexing-nested-documents.adoc#,descendant documents>> of each parent document matching your query.
+This is useful when you have indexed nested child documents and want to retrieve the child documents for the relevant parent documents for any type of search query.
 
 Note that this transformer can be used even when the query used to match the result documents is not a <<block-join-query-parser.adoc#,Block Join query>>.
 
@@ -138,20 +140,25 @@ q=book_title:Solr&fl=id,[child childFilter=doc_type:chapter limit=100]
 If the documents involved include a `\_nest_path_` field, then it is used to re-create the hierarchical structure of the descendent documents using the original pseudo-field names the documents were indexed with, otherwise the descendent documents are returned as a flat list of <<indexing-nested-documents#indexing-anonymous-children,anonymous children>>.
 
 `childFilter`::
-A query to filter which child documents should be included. This can be particularly useful when you have multiple levels of hierarchical documents. The default is all children.
+A query to filter which child documents should be included.
+This can be particularly useful when you have multiple levels of hierarchical documents.
+The default is all children.
 
 `limit`::
-The maximum number of child documents to be returned per parent document. The default is `10`.
+The maximum number of child documents to be returned per parent document.
+The default is `10`.
 
 `fl`::
-The field list which the transformer is to return. The default is the top level `fl`).
+The field list which the transformer is to return.
+The default is the top level `fl`).
 +
 There is a further limitation in which the fields here should be a subset of those specified by the top level `fl` parameter.
 
 `parentFilter`::
 Serves the same purpose as the `of`/`which` params in `{!child}`/`{!parent}` query parsers: to
 identify the set of "all parents" for the purpose of identifying the beginning & end of each
-nested document block.  This recently became fully optional and appears to be obsolete.
+nested document block.
+This recently became fully optional and appears to be obsolete.
 It is likely to be removed in a future Solr release, so _if you find it has some use, let the
 project know!_
 
@@ -229,7 +236,8 @@ fl=id,[elevated],[excluded]&excludeIds=GB18030TEST&elevateIds=6H500F0&markExclud
 
 === [json] / [xml]
 
-These transformers replace a field value containing a string representation of a valid XML or JSON structure with the actual raw XML or JSON structure instead of just the string value. Each applies only to the specific writer, such that `[json]` only applies to `wt=json` and `[xml]` only applies to `wt=xml`.
+These transformers replace a field value containing a string representation of a valid XML or JSON structure with the actual raw XML or JSON structure instead of just the string value.
+Each applies only to the specific writer, such that `[json]` only applies to `wt=json` and `[xml]` only applies to `wt=xml`.
 
 [source,plain]
 ----
@@ -239,12 +247,14 @@ fl=id,source_s:[json]&wt=json
 
 === [subquery]
 
-This transformer executes a separate query per transforming document passing document fields as an input for subquery parameters. It's usually used with `{!join}` and `{!parent}` query parsers, and is intended to be an improvement for `[child]`.
+This transformer executes a separate query per transforming document passing document fields as an input for subquery parameters.
+It's usually used with `{!join}` and `{!parent}` query parsers, and is intended to be an improvement for `[child]`.
 
 * It must be given an unique name: `fl=*,children:[subquery]`
 * There might be a few of them, e.g., `fl=*,sons:[subquery],daughters:[subquery]`.
 * Every `[subquery]` occurrence adds a field into a result document with the given name, the value of this field is a document list, which is a result of executing subquery using document fields as an input.
-* Subquery will use the `/select` search handler by default, and will return an error if `/select` is not configured. This can be changed by supplying `foo.qt` parameter.
+* Subquery will use the `/select` search handler by default, and will return an error if `/select` is not configured.
+This can be changed by supplying `foo.qt` parameter.
 
 Here is how it looks like using various formats:
 
@@ -299,7 +309,8 @@ Here is how it looks like using various formats:
 
 To appear in subquery document list, a field should be specified in both `fl` parameters: in the main `fl` (despite the main result documents have no this field), and in subquery's `fl` (e.g., `foo.fl`).
 
-Wildcards can be used in one or both of these parameters. For example, if field `title` should appear in categories subquery, it can be done via one of these ways:
+Wildcards can be used in one or both of these parameters.
+For example, if field `title` should appear in categories subquery, it can be done via one of these ways:
 
 [source,plain]
 ----
@@ -311,21 +322,25 @@ fl=...*,categories:[subquery]&categories.fl=*&categories.q=...
 
 ==== Subquery Parameters Shift
 
-If a subquery is declared as `fl=*,foo:[subquery]`, subquery parameters are prefixed with the given name and period. For example:
+If a subquery is declared as `fl=*,foo:[subquery]`, subquery parameters are prefixed with the given name and period.
+For example:
 
 [source,plain]
 q=*:*&fl=*,**foo**:[subquery]&**foo.**q=to be continued&**foo.**rows=10&**foo.**sort=id desc
 
 ==== Document Field as an Input for Subquery Parameters
 
-It's necessary to pass some document field values as a parameter for subquery. It's supported via an implicit *`row.__fieldname__`* parameter, and can be (but might not only) referred via local params syntax:
+It's necessary to pass some document field values as a parameter for subquery.
+It's supported via an implicit *`row.__fieldname__`* parameter, and can be (but might not only) referred via local params syntax:
 
 [source,plain,subs="quotes"]
 q=name:john&fl=name,id,depts:[subquery]&depts.q={!terms f=id **v=$row.dept_id**}&depts.rows=10
 
-Here departments are retrieved per every employee in search result. We can say that it's like SQL `join ON emp.dept_id=dept.id`.
+Here departments are retrieved per every employee in search result.
+We can say that it's like SQL `join ON emp.dept_id=dept.id`.
 
-Note, when a document field has multiple values they are concatenated with a comma by default. This can be changed with the local parameter `foo:[subquery separator=' ']`, this mimics *`{!terms}`* to work smoothly with it.
+Note, when a document field has multiple values they are concatenated with a comma by default.
+This can be changed with the local parameter `foo:[subquery separator=' ']`, this mimics *`{!terms}`* to work smoothly with it.
 
 To log substituted subquery request parameters, add the corresponding parameter names, as in: `depts.logParamsList=q,fl,rows,**row.dept_id**`
 
@@ -351,11 +366,18 @@ Otherwise you'll get `NullPointerException` from `QueryComponent.mergeIds`.
 
 === [geo] - Geospatial formatter
 
-Formats spatial data from a spatial field using a designated format type name. Two inner parameters are required: `f` for the field name, and `w` for the format name. Example: `geojson:[geo f=mySpatialField w=GeoJSON]`.
+Formats spatial data from a spatial field using a designated format type name.
+Two inner parameters are required: `f` for the field name, and `w` for the format name.
+Example: `geojson:[geo f=mySpatialField w=GeoJSON]`.
 
-Normally you'll simply be consistent in choosing the format type you want by setting the `format` attribute on the spatial field type to `WKT` or `GeoJSON` – see the section <<spatial-search.adoc#,Spatial Search>> for more information. If you are consistent, it'll come out the way you stored it. This transformer offers a convenience to transform the spatial format to something different on retrieval.
+Normally you'll simply be consistent in choosing the format type you want by setting the `format` attribute on the spatial field type to `WKT` or `GeoJSON` – see the section <<spatial-search.adoc#,Spatial Search>> for more information.
+If you are consistent, it'll come out the way you stored it.
+This transformer offers a convenience to transform the spatial format to something different on retrieval.
 
-In addition, this feature is very useful with the `RptWithGeometrySpatialField` to avoid double-storage of the potentially large vector geometry. This transformer will detect that field type and fetch the geometry from an internal compact binary representation on disk (in docValues), and then format it as desired. As such, you needn't mark the field as stored, which would be redundant. In a sense this double-storage between docValues and stored-value storage isn't unique to spatial but w [...]
+In addition, this feature is very useful with the `RptWithGeometrySpatialField` to avoid double-storage of the potentially large vector geometry.
+This transformer will detect that field type and fetch the geometry from an internal compact binary representation on disk (in docValues), and then format it as desired.
+As such, you needn't mark the field as stored, which would be redundant.
+In a sense this double-storage between docValues and stored-value storage isn't unique to spatial but with polygonal geometry it can be a lot of data, and furthermore you'd like to avoid storing it in a verbose format (like GeoJSON or WKT).
 
 
 === [features] - LTRFeatureLoggerTransformerFactory
diff --git a/solr/solr-ref-guide/src/documents-fields-schema-design.adoc b/solr/solr-ref-guide/src/documents-fields-schema-design.adoc
index 33f5ac5..b105683 100644
--- a/solr/solr-ref-guide/src/documents-fields-schema-design.adoc
+++ b/solr/solr-ref-guide/src/documents-fields-schema-design.adoc
@@ -44,7 +44,8 @@ A recipe document would contain the ingredients, the instructions, the preparati
 A document about a person, for example, might contain the person's name, biography, favorite color, and shoe size.
 A document about a book could contain the title, author, year of publication, number of pages, and so on.
 
-In the Solr universe, documents are composed of _fields_, which are more specific pieces of information. Shoe size could be a field.
+In the Solr universe, documents are composed of _fields_, which are more specific pieces of information.
+Shoe size could be a field.
 First name and last name could be fields.
 
 Fields can contain different kinds of data.
@@ -94,6 +95,6 @@ How will fields from documents be displayed to users?
 
 If you aren't sure yet, plan on some test indexing runs to see how the data in your documents is indexed with default settings.
 Build into your implementation plan some time for iteration and start small.
-The more you're able to define your schema before indexing all of your documents, the higher your chances for a successful search application for your users.  
+The more you're able to define your schema before indexing all of your documents, the higher your chances for a successful search application for your users.
 
 More information about the schema is in the section <<schema-elements.adoc#,Schema Elements>>.
diff --git a/solr/solr-ref-guide/src/documents-screen.adoc b/solr/solr-ref-guide/src/documents-screen.adoc
index 15fe607..fa527e1 100644
--- a/solr/solr-ref-guide/src/documents-screen.adoc
+++ b/solr/solr-ref-guide/src/documents-screen.adoc
@@ -36,11 +36,18 @@ There are other ways to load data, see also these sections:
 ====
 
 == Common Fields
-* Request-Handler: The first step is to define the RequestHandler. By default `/update` will be defined. Change the request handler to `/update/extract` to use Solr Cell.
-* Document Type: Select the Document Type to define the format of document to load. The remaining parameters may change depending on the document type selected.
-* Document(s): Enter a properly-formatted Solr document corresponding to the `Document Type` selected. XML and JSON documents must be formatted in a Solr-specific format, a small illustrative document will be shown. CSV files should have headers corresponding to fields defined in the schema. More details can be found at: <<indexing-with-update-handlers.adoc#,Indexing with Update Handlers>>.
+* Request-Handler: The first step is to define the RequestHandler.
+By default `/update` will be defined.
+Change the request handler to `/update/extract` to use Solr Cell.
+* Document Type: Select the Document Type to define the format of document to load.
+The remaining parameters may change depending on the document type selected.
+* Document(s): Enter a properly-formatted Solr document corresponding to the `Document Type` selected.
+XML and JSON documents must be formatted in a Solr-specific format, a small illustrative document will be shown.
+CSV files should have headers corresponding to fields defined in the schema.
+More details can be found at: <<indexing-with-update-handlers.adoc#,Indexing with Update Handlers>>.
 * Commit Within: Specify the number of milliseconds between the time the document is submitted and when it is available for searching.
-* Overwrite: If `true` the new document will replace an existing document with the same value in the `id` field. If `false` multiple documents with the same id can be added.
+* Overwrite: If `true` the new document will replace an existing document with the same value in the `id` field.
+If `false` multiple documents with the same id can be added.
 
 [TIP]
 ====
@@ -49,7 +56,9 @@ Setting `Overwrite` to `false` is very rare in production situations, the defaul
 
 == CSV, JSON and XML Documents
 
-When using these document types the functionality is similar to submitting documents via `curl` or similar. The document structure must be in a Solr-specific format appropriate for the document type. Examples are illustrated in the Document(s) text box when you select the various types.
+When using these document types the functionality is similar to submitting documents via `curl` or similar.
+The document structure must be in a Solr-specific format appropriate for the document type.
+Examples are illustrated in the Document(s) text box when you select the various types.
 
 These options will only add or overwrite documents; for other update tasks, see the <<Solr Command>> option.
 
@@ -59,14 +68,18 @@ The Document Builder provides a wizard-like interface to enter fields of a docum
 
 == File Upload
 
-The File Upload option allows choosing a prepared file and uploading it. If using `/update` for the Request-Handler option, you will be limited to XML, CSV, and JSON.
+The File Upload option allows choosing a prepared file and uploading it.
+If using `/update` for the Request-Handler option, you will be limited to XML, CSV, and JSON.
 
-Other document types (e.g., Word, PDF, etc.) can be indexed using the ExtractingRequestHandler (aka, Solr Cell). You must modify the RequestHandler to `/update/extract`, which must be defined in your `solrconfig.xml` file with your desired defaults. You should also add `&literal.id` shown in the "Extracting Request Handler Params" field so the file chosen is given a unique id.
+Other document types (e.g., Word, PDF, etc.) can be indexed using the ExtractingRequestHandler (aka, Solr Cell).
+You must modify the RequestHandler to `/update/extract`, which must be defined in your `solrconfig.xml` file with your desired defaults.
+You should also add `&literal.id` shown in the "Extracting Request Handler Params" field so the file chosen is given a unique id.
 More information can be found at: <<indexing-with-tika.adoc#,Indexing with Solr Cell and Apache Tika>>.
 
 == Solr Command
 
-The Solr Command option allows you use the `/update` request handler with XML or JSON formatted commands to perform specific actions. A few examples are:
+The Solr Command option allows you use the `/update` request handler with XML or JSON formatted commands to perform specific actions.
+A few examples are:
 
 * Deleting documents
 * Updating only certain fields of documents
diff --git a/solr/solr-ref-guide/src/docvalues.adoc b/solr/solr-ref-guide/src/docvalues.adoc
index ecf4a6f..e46d1c1 100644
--- a/solr/solr-ref-guide/src/docvalues.adoc
+++ b/solr/solr-ref-guide/src/docvalues.adoc
@@ -20,15 +20,22 @@ DocValues are a way of recording field values internally that is more efficient
 
 == Why DocValues?
 
-The standard way that Solr builds the index is with an _inverted index_. This style builds a list of terms found in all the documents in the index and next to each term is a list of documents that the term appears in (as well as how many times the term appears in that document). This makes search very fast - since users search by terms, having a ready list of term-to-document values makes the query process faster.
+The standard way that Solr builds the index is with an _inverted index_. This style builds a list of terms found in all the documents in the index and next to each term is a list of documents that the term appears in (as well as how many times the term appears in that document).
+This makes search very fast - since users search by terms, having a ready list of term-to-document values makes the query process faster.
 
-For other features that we now commonly associate with search, such as sorting, faceting, and highlighting, this approach is not very efficient. The faceting engine, for example, must look up each term that appears in each document that will make up the result set and pull the document IDs in order to build the facet list. In Solr, this is maintained in memory, and can be slow to load (depending on the number of documents, terms, etc.).
+For other features that we now commonly associate with search, such as sorting, faceting, and highlighting, this approach is not very efficient.
+The faceting engine, for example, must look up each term that appears in each document that will make up the result set and pull the document IDs in order to build the facet list.
+In Solr, this is maintained in memory, and can be slow to load (depending on the number of documents, terms, etc.).
 
-In Lucene 4.0, a new approach was introduced. DocValue fields are now column-oriented fields with a document-to-value mapping built at index time. This approach promises to relieve some of the memory requirements of the fieldCache and make lookups for faceting, sorting, and grouping much faster.
+In Lucene 4.0, a new approach was introduced.
+DocValue fields are now column-oriented fields with a document-to-value mapping built at index time.
+This approach promises to relieve some of the memory requirements of the fieldCache and make lookups for faceting, sorting, and grouping much faster.
 
 == Enabling DocValues
 
-To use docValues, you only need to enable it for a field that you will use it with. As with all schema design, you need to define a field type and then define fields of that type with docValues enabled. All of these actions are done in `schema.xml`.
+To use docValues, you only need to enable it for a field that you will use it with.
+As with all schema design, you need to define a field type and then define fields of that type with docValues enabled.
+All of these actions are done in `schema.xml`.
 
 Enabling a field for docValues only requires adding `docValues="true"` to the field (or field type) definition, as in this example from the `schema.xml` of Solr's `sample_techproducts_configs` <<config-sets.adoc#,configset>>:
 
@@ -40,24 +47,31 @@ Enabling a field for docValues only requires adding `docValues="true"` to the fi
 [IMPORTANT]
 If you have already indexed data into your Solr index, you will need to completely reindex your content after changing your field definitions in `schema.xml` in order to successfully use docValues.
 
-DocValues are only available for specific field types. The types chosen determine the underlying Lucene docValue type that will be used. The available Solr field types are:
+DocValues are only available for specific field types.
+The types chosen determine the underlying Lucene docValue type that will be used.
+The available Solr field types are:
 
 * `StrField`, and `UUIDField`:
 ** If the field is single-valued (i.e., multi-valued is false), Lucene will use the `SORTED` type.
-** If the field is multi-valued, Lucene will use the `SORTED_SET` type. Entries are kept in sorted order and duplicates are removed.
+** If the field is multi-valued, Lucene will use the `SORTED_SET` type.
+Entries are kept in sorted order and duplicates are removed.
 * `BoolField`:
 ** If the field is single-valued (i.e., multi-valued is false), Lucene will use the `SORTED` type.
-** If the field is multi-valued, Lucene will use the `SORTED_SET` type. Entries are kept in sorted order and duplicates are removed.
+** If the field is multi-valued, Lucene will use the `SORTED_SET` type.
+Entries are kept in sorted order and duplicates are removed.
 * Any `*PointField` Numeric or Date fields, `EnumFieldType`, and `CurrencyFieldType`:
 ** If the field is single-valued (i.e., multi-valued is false), Lucene will use the `NUMERIC` type.
-** If the field is multi-valued, Lucene will use the `SORTED_NUMERIC` type. Entries are kept in sorted order and duplicates are kept.
+** If the field is multi-valued, Lucene will use the `SORTED_NUMERIC` type.
+Entries are kept in sorted order and duplicates are kept.
 * Any of the deprecated `Trie*` Numeric or Date fields, `EnumField` and `CurrencyField`:
 ** If the field is single-valued (i.e., multi-valued is false), Lucene will use the `NUMERIC` type.
-** If the field is multi-valued, Lucene will use the `SORTED_SET` type. Entries are kept in sorted order and duplicates are removed.
+** If the field is multi-valued, Lucene will use the `SORTED_SET` type.
+Entries are kept in sorted order and duplicates are removed.
 
 These Lucene types are related to how the {lucene-javadocs}/core/org/apache/lucene/index/DocValuesType.html[values are sorted and stored].
 
-There is an additional configuration option available, which is to modify the `docValuesFormat` <<field-type-definitions-and-properties.adoc#docvaluesformat,used by the field type>>. The default implementation employs a mixture of loading some things into memory and keeping some on disk. In some cases, however, you may choose to specify an alternative {lucene-javadocs}/core/org/apache/lucene/codecs/DocValuesFormat.html[DocValuesFormat implementation]. For example, you could choose to kee [...]
+There is an additional configuration option available, which is to modify the `docValuesFormat` <<field-type-definitions-and-properties.adoc#docvaluesformat,used by the field type>>. The default implementation employs a mixture of loading some things into memory and keeping some on disk.
+In some cases, however, you may choose to specify an alternative {lucene-javadocs}/core/org/apache/lucene/codecs/DocValuesFormat.html[DocValuesFormat implementation]. For example, you could choose to keep everything in memory by specifying `docValuesFormat="Direct"` on a field type:
 
 [source,xml]
 ----
@@ -67,7 +81,8 @@ There is an additional configuration option available, which is to modify the `d
 Please note that the `docValuesFormat` option may change in future releases.
 
 [NOTE]
-Lucene index back-compatibility is only supported for the default codec. If you choose to customize the `docValuesFormat` in your `schema.xml`, upgrading to a future version of Solr may require you to either switch back to the default codec and optimize your index to rewrite it into the default codec before upgrading, or re-build your entire index from scratch after upgrading.
+Lucene index back-compatibility is only supported for the default codec.
+If you choose to customize the `docValuesFormat` in your `schema.xml`, upgrading to a future version of Solr may require you to either switch back to the default codec and optimize your index to rewrite it into the default codec before upgrading, or re-build your entire index from scratch after upgrading.
 
 == Using DocValues
 
@@ -78,7 +93,8 @@ If `docValues="true"` for a field, then DocValues will automatically be used any
 === Retrieving DocValues During Search
 
 Field values retrieved during search queries are typically returned from stored values.
-However, non-stored docValues fields will be also returned along with other stored fields when all fields (or pattern matching globs) are specified to be returned (e.g., "`fl=*`") for search queries depending on the effective value of the `useDocValuesAsStored` parameter for each field. For schema versions >= 1.6, the implicit default is `useDocValuesAsStored="true"`.
+However, non-stored docValues fields will be also returned along with other stored fields when all fields (or pattern matching globs) are specified to be returned (e.g., "`fl=*`") for search queries depending on the effective value of the `useDocValuesAsStored` parameter for each field.
+For schema versions >= 1.6, the implicit default is `useDocValuesAsStored="true"`.
 See <<field-type-definitions-and-properties.adoc#,Field Type Definitions and Properties>> & <<fields.adoc#,Fields>> for more details.
 
 When `useDocValuesAsStored="false"`, non-stored DocValues fields can still be explicitly requested by name in the <<common-query-parameters.adoc#fl-field-list-parameter,`fl` parameter>>, but will not match glob patterns (`"*"`).
@@ -92,5 +108,8 @@ In cases where the query is returning _only_ docValues fields performance may im
 
 When retrieving fields from their docValues form (such as when using the <<exporting-result-sets.adoc#,/export handler>>, <<streaming-expressions.adoc#,streaming expressions>> or if the field is requested in the `fl` parameter), two important differences between regular stored fields and docValues fields must be understood:
 
-1.  Order is _not_ preserved. When retrieving stored fields, the insertion order is the return order. For docValues, it is the _sorted_ order.
-2.  For field types using `SORTED_SET` (see above), multiple identical entries are collapsed into a single value. Thus if values 4, 5, 2, 4, 1 are inserted, the values returned will be 1, 2, 4, 5.
+. Order is _not_ preserved.
+When retrieving stored fields, the insertion order is the return order.
+For docValues, it is the _sorted_ order.
+. For field types using `SORTED_SET` (see above), multiple identical entries are collapsed into a single value.
+Thus if values 4, 5, 2, 4, 1 are inserted, the values returned will be 1, 2, 4, 5.
diff --git a/solr/solr-ref-guide/src/dsp.adoc b/solr/solr-ref-guide/src/dsp.adoc
index 50923e8..54775c0 100644
--- a/solr/solr-ref-guide/src/dsp.adoc
+++ b/solr/solr-ref-guide/src/dsp.adoc
@@ -16,107 +16,101 @@
 // specific language governing permissions and limitations
 // under the License.
 
-This section of the user guide explores functions that are commonly used in the field of
-Digital Signal Processing (DSP).
+This section of the user guide explores functions that are commonly used in the field of Digital Signal Processing (DSP).
 
 == Convolution
 
-The `conv` function calculates the convolution of two vectors. The convolution is calculated by *reversing*
-the second vector and sliding it across the first vector. The dot product of the two vectors
-is calculated at each point as the second vector is slid across the first vector.
+The `conv` function calculates the convolution of two vectors.
+The convolution is calculated by *reversing* the second vector and sliding it across the first vector.
+The dot product of the two vectors is calculated at each point as the second vector is slid across the first vector.
 The dot products are collected in a third vector which is the convolution of the two vectors.
 
 === Moving Average Function
 
-Before looking at an example of convolution it's useful to review the `movingAvg` function. The moving average
-function computes a moving average by sliding a window across a vector and computing
-the average of the window at each shift. If that sounds similar to convolution, that's because the `movingAvg`
-function involves a sliding window approach similar to convolution.
+Before looking at an example of convolution it's useful to review the `movingAvg` function.
+The moving average function computes a moving average by sliding a window across a vector and computing the average of the window at each shift.
+If that sounds similar to convolution, that's because the `movingAvg` function involves a sliding window approach similar to convolution.
 
-Below is an example of a moving average with a window size of 5. Notice that the original vector has 13 elements
-but the result of the moving average has only 9 elements. This is because the `movingAvg` function
-only begins generating results when it has a full window. The `ltrim` function is used to trim the
-first four elements from the original `y` array to line up with the moving average.
+Below is an example of a moving average with a window size of 5.
+Notice that the original vector has 13 elements but the result of the moving average has only 9 elements.
+This is because the `movingAvg` function only begins generating results when it has a full window.
+The `ltrim` function is used to trim the first four elements from the original `y` array to line up with the moving average.
 
 image::images/math-expressions/conv1.png[]
 
 
 === Convolutional Smoothing
 
-The moving average can also be computed using convolution. In the example
-below the `conv` function is used to compute the moving average of the first array
-by applying the second array as a filter.
+The moving average can also be computed using convolution.
+In the example below the `conv` function is used to compute the moving average of the first array by applying the second array as a filter.
 
-Looking at the result, we see that the convolution produced an array with 17 values instead of the 9 values created by the
-moving average. That is because the `conv` function pads zeros
-to the front and back of the first vector so that the window size is always full.
+Looking at the result, we see that the convolution produced an array with 17 values instead of the 9 values created by the moving average.
+That is because the `conv` function pads zeros to the front and back of the first vector so that the window size is always full.
 
 image::images/math-expressions/conv2.png[]
 
-We achieve the same result as the `movingAvg` function by trimming the first and last 4 values of
-the convolution result using the `ltrim` and `rtrim` functions.
+We achieve the same result as the `movingAvg` function by trimming the first and last 4 values of the convolution result using the `ltrim` and `rtrim` functions.
 
-The example below plots both the trimmed convolution and the moving average on the same plot. Notice that
-they perfectly overlap.
+The example below plots both the trimmed convolution and the moving average on the same plot.
+Notice that they perfectly overlap.
 
 image::images/math-expressions/conv3.png[]
 
-This demonstrates how convolution can be used to smooth a signal by sliding a filter across the signal and
-computing the dot product at each point. The smoothing effect is caused by the design of the filter.
-In the example, the filter length is 5 and each value in the filter is .2. This filter calculates a
-simple moving average with a window size of 5.
+This demonstrates how convolution can be used to smooth a signal by sliding a filter across the signal and computing the dot product at each point.
+The smoothing effect is caused by the design of the filter.
+In the example, the filter length is 5 and each value in the filter is .2.
+This filter calculates a simple moving average with a window size of 5.
 
-The formula for computing a simple moving average using convolution is to make the filter length the window
-size and make the values of the filter all the same and sum to 1. A moving average with a window size of 4
-can be computed by changing the filter to a length of 4 with each value being .25.
+The formula for computing a simple moving average using convolution is to make the filter length the window size and make the values of the filter all the same and sum to 1.
+A moving average with a window size of 4 can be computed by changing the filter to a length of 4 with each value being .25.
 
 ==== Changing the Weights
 
-The filter, which is sometimes called the *kernel*, can be viewed as a vector of weights. In the initial
-example all values in the filter have the same weight (.2). The weights in the filter can be changed to
-produce different smoothing effects. This is demonstrated in the example below.
+The filter, which is sometimes called the *kernel*, can be viewed as a vector of weights.
+In the initial example all values in the filter have the same weight (.2).
+The weights in the filter can be changed to produce different smoothing effects.
+This is demonstrated in the example below.
 
-In this example the filter increases in weight from .1 to .3. This places more weight towards the front
-of the filter. Notice that the filter is reversed with the `rev` function before the `conv` function applies it.
-This is done because convolution will reverse
-the filter. In this case we reverse it ahead of time and when convolution reverses it back, it is the same
-as the original filter.
+In this example the filter increases in weight from .1 to .3.
+This places more weight towards the front of the filter.
+Notice that the filter is reversed with the `rev` function before the `conv` function applies it.
+This is done because convolution will reverse the filter.
+In this case we reverse it ahead of time and when convolution reverses it back, it is the same as the original filter.
 
-The plot shows the effect of the different weights in the filter. The dark blue line is the initial array.
-The light blue line is the convolution and the orange line is the moving average. Notice that the convolution
-responds quicker to the movements in the underlying array. This is because more weight has been placed
-at the front of the filter.
+The plot shows the effect of the different weights in the filter.
+The dark blue line is the initial array.
+The light blue line is the convolution and the orange line is the moving average.
+Notice that the convolution responds quicker to the movements in the underlying array.
+This is because more weight has been placed at the front of the filter.
 
 image::images/math-expressions/conv4.png[]
 
-
-
 == Cross-Correlation
 
-Cross-correlation is used to determine the delay between two signals. This is accomplished by sliding one signal across another
-and calculating the dot product at each shift. The dot products are collected into a vector which represents the correlation
-at each shift. The highest dot product in the cross-correlation vector is the point where the two signals are most closely correlated.
+Cross-correlation is used to determine the delay between two signals.
+This is accomplished by sliding one signal across another and calculating the dot product at each shift.
+The dot products are collected into a vector which represents the correlation at each shift.
+The highest dot product in the cross-correlation vector is the point where the two signals are most closely correlated.
 
-The sliding dot product used in convolution can also be used to represent cross-correlation between two vectors. The only
-difference in the formula when representing correlation is that the second vector is *not reversed*.
+The sliding dot product used in convolution can also be used to represent cross-correlation between two vectors.
+The only difference in the formula when representing correlation is that the second vector is *not reversed*.
 
 Notice in the example below that the second vector is reversed by the `rev` function before it is operated on by the `conv` function.
-The `conv` function reverses the second vector so it will be flipped back to its original order to perform the correlation calculation
-rather than the convolution calculation.
+The `conv` function reverses the second vector so it will be flipped back to its original order to perform the correlation calculation rather than the convolution calculation.
 
-Notice in the result the highest value is 217. This is the point where the two vectors have the highest correlation.
+Notice in the result the highest value is 217.
+This is the point where the two vectors have the highest correlation.
 
 image::images/math-expressions/crosscorr.png[]
 
 
 == Find Delay
 
-It is fairly simple to compute the delay from the cross-correlation result, but a convenience function called `finddelay` can
-be used to find the delay directly. Under the covers `finddelay` uses convolutional math to compute the cross-correlation vector
-and then computes the delay between the two signals.
+It is fairly simple to compute the delay from the cross-correlation result, but a convenience function called `finddelay` can be used to find the delay directly.
+Under the covers `finddelay` uses convolutional math to compute the cross-correlation vector and then computes the delay between the two signals.
 
-Below is an example of the `finddelay` function. Notice that the `finddelay` function reports a 3 period delay between the first
-and second signal.
+Below is an example of the `finddelay` function.
+Notice that the `finddelay` function reports a 3 period delay between the first and second signal.
 
 image::images/math-expressions/delay.png[]
 
@@ -146,132 +140,125 @@ image::images/math-expressions/sinewave256.png[]
 
 == Autocorrelation
 
-Autocorrelation measures the degree to which a signal is correlated with itself. Autocorrelation is used to determine
-if a vector contains a signal or is purely random.
+Autocorrelation measures the degree to which a signal is correlated with itself.
+Autocorrelation is used to determine if a vector contains a signal or is purely random.
 
 A few examples, with plots, will help to understand the concepts.
 
-The first example simply revisits the example above of an extrapolated sine wave. The result of this
-is plotted in the image below. Notice that there is a structure to the plot that is clearly not random.
+The first example simply revisits the example above of an extrapolated sine wave.
+The result of this is plotted in the image below.
+Notice that there is a structure to the plot that is clearly not random.
 
 
 image::images/math-expressions/sinewave256.png[]
 
 
-In the next example the `sample` function is used to draw 256 samples from a `uniformDistribution` to create a
-vector of random data. The result of this is plotted in the image below. Notice that there is no clear structure to the
-data and the data appears to be random.
+In the next example the `sample` function is used to draw 256 samples from a `uniformDistribution` to create a vector of random data.
+The result of this is plotted in the image below.
+Notice that there is no clear structure to the data and the data appears to be random.
 
 image::images/math-expressions/noise.png[]
 
-
 In the next example the random noise is added to the sine wave using the `ebeAdd` function.
-The result of this is plotted in the image below. Notice that the sine wave has been hidden
-somewhat within the noise. Its difficult to say for sure if there is structure. As plots
-becomes more dense it can become harder to see a pattern hidden within noise.
-
+The result of this is plotted in the image below.
+Notice that the sine wave has been hidden somewhat within the noise.
+Its difficult to say for sure if there is structure.
+As plots becomes more dense it can become harder to see a pattern hidden within noise.
 
 image::images/math-expressions/hidden-signal.png[]
 
-
-In the next examples autocorrelation is performed with each of the vectors shown above to see what the
-autocorrelation plots look like.
+In the next examples autocorrelation is performed with each of the vectors shown above to see what the autocorrelation plots look like.
 
 In the example below the `conv` function is used to autocorrelate the first vector which is the sine wave.
 Notice that the `conv` function is simply correlating the sine wave with itself.
 
-The plot has a very distinct structure to it. As the sine wave is slid across a copy of itself the correlation
-moves up and down in increasing intensity until it reaches a peak. This peak is directly in the center and is the
-the point where the sine waves are directly lined up. Following the peak the correlation moves up and down in decreasing
-intensity as the sine wave slides farther away from being directly lined up.
+The plot has a very distinct structure to it.
+As the sine wave is slid across a copy of itself the correlation moves up and down in increasing intensity until it reaches a peak.
+This peak is directly in the center and is the the point where the sine waves are directly lined up.
+Following the peak the correlation moves up and down in decreasing intensity as the sine wave slides farther away from being directly lined up.
 
 This is the autocorrelation plot of a pure signal.
 
-
 image::images/math-expressions/signal-autocorrelation.png[]
 
-
-In the example below autocorrelation is performed with the vector of pure noise. Notice that the autocorrelation
-plot has a very different plot then the sine wave. In this plot there is long period of low intensity correlation that appears
-to be random. Then in the center a peak of high intensity correlation where the vectors are directly lined up.
+In the example below autocorrelation is performed with the vector of pure noise.
+Notice that the autocorrelation plot has a very different plot then the sine wave.
+In this plot there is long period of low intensity correlation that appears to be random.
+Then in the center a peak of high intensity correlation where the vectors are directly lined up.
 This is followed by another long period of low intensity correlation.
 
 This is the autocorrelation plot of pure noise.
 
-
 image::images/math-expressions/noise-autocorrelation.png[]
 
-
 In the example below autocorrelation is performed on the vector with the sine wave hidden within the noise.
-Notice that this plot shows very clear signs of structure which is similar to autocorrelation plot of the
-pure signal. The correlation is less intense due to noise but the shape of the correlation plot suggests
-strongly that there is an underlying signal hidden within the noise.
-
+Notice that this plot shows very clear signs of structure which is similar to autocorrelation plot of the pure signal.
+The correlation is less intense due to noise but the shape of the correlation plot suggests strongly that there is an underlying signal hidden within the noise.
 
 image::images/math-expressions/hidden-signal-autocorrelation.png[]
 
-
 == Discrete Fourier Transform
 
-The convolution-based functions described above are operating on signals in the time domain. In the time
-domain the x-axis is time and the y-axis is the quantity of some value at a specific point in time.
+The convolution-based functions described above are operating on signals in the time domain.
+In the time domain the x-axis is time and the y-axis is the quantity of some value at a specific point in time.
 
 The discrete Fourier Transform translates a time domain signal into the frequency domain.
 In the frequency domain the x-axis is frequency, and y-axis is the accumulated power at a specific frequency.
 
-The basic principle is that every time domain signal is composed of one or more signals (sine waves)
-at different frequencies. The discrete Fourier transform decomposes a time domain signal into its component
-frequencies and measures the power at each frequency.
+The basic principle is that every time domain signal is composed of one or more signals (sine waves) at different frequencies.
+The discrete Fourier transform decomposes a time domain signal into its component frequencies and measures the power at each frequency.
 
-The discrete Fourier transform has many important uses. In the example below, the discrete Fourier transform is used
-to determine if a signal has structure or if it is purely random.
+The discrete Fourier transform has many important uses.
+In the example below, the discrete Fourier transform is used to determine if a signal has structure or if it is purely random.
 
 === Complex Result
 
-The `fft` function performs the discrete Fourier Transform on a vector of *real* data. The result
-of the `fft` function is returned as *complex* numbers. A complex number has two parts, *real* and *imaginary*.
+The `fft` function performs the discrete Fourier Transform on a vector of *real* data.
+The result of the `fft` function is returned as *complex* numbers.
+A complex number has two parts, *real* and *imaginary*.
 The *real* part of the result describes the magnitude of the signal at different frequencies.
-The *imaginary* part of the result describes the *phase*. The examples below deal only with the *real*
-part of the result.
+The *imaginary* part of the result describes the *phase*.
+The examples below deal only with the *real* part of the result.
 
-The `fft` function returns a `matrix` with two rows. The first row in the matrix is the *real*
-part of the complex result. The second row in the matrix is the *imaginary* part of the complex result.
+The `fft` function returns a `matrix` with two rows.
+The first row in the matrix is the *real* part of the complex result.
+The second row in the matrix is the *imaginary* part of the complex result.
 The `rowAt` function can be used to access the rows so they can be processed as vectors.
 
-
 === Fast Fourier Transform Examples
 
 In the first example the `fft` function is called on the sine wave used in the autocorrelation example.
 
-The results of the `fft` function is a matrix. The `rowAt` function is used to return the first row of
-the matrix which is a vector containing the real values of the `fft` response.
-
-The plot of the real values of the `fft` response is shown below. Notice there are two
-peaks on opposite sides of the plot. The plot is actually showing a mirrored response. The right side
-of the plot is an exact mirror of the left side. This is expected when the `fft` is run on real rather than
-complex data.
+The results of the `fft` function is a matrix.
+The `rowAt` function is used to return the first row of the matrix which is a vector containing the real values of the `fft` response.
 
-Also notice that the `fft` has accumulated significant power in a single peak. This is the power associated with
-the specific frequency of the sine wave. The vast majority of frequencies in the plot have close to 0 power
-associated with them. This `fft` shows a clear signal with very low levels of noise.
+The plot of the real values of the `fft` response is shown below.
+Notice there are two peaks on opposite sides of the plot.
+The plot is actually showing a mirrored response.
+The right side of the plot is an exact mirror of the left side.
+This is expected when the `fft` is run on real rather than complex data.
 
+Also notice that the `fft` has accumulated significant power in a single peak.
+This is the power associated with the specific frequency of the sine wave.
+The vast majority of frequencies in the plot have close to 0 power associated with them.
+This `fft` shows a clear signal with very low levels of noise.
 
 image::images/math-expressions/signal-fft.png[]
 
-In the second example the `fft` function is called on a vector of random data similar to one used in the
-autocorrelation example. The plot of the real values of the `fft` response is shown below.
+In the second example the `fft` function is called on a vector of random data similar to one used in the autocorrelation example.
+The plot of the real values of the `fft` response is shown below.
 
-Notice that in is this response there is no clear peak. Instead all frequencies have accumulated a random level of
-power. This `fft` shows no clear sign of signal and appears to be noise.
+Notice that in is this response there is no clear peak.
+Instead all frequencies have accumulated a random level of power.
+This `fft` shows no clear sign of signal and appears to be noise.
 
 image::images/math-expressions/noise-fft.png[]
 
-In the third example the `fft` function is called on the same signal hidden within noise that was used for
-the autocorrelation example. The plot of the real values of the `fft` response is shown below.
-
-Notice that there are two clear mirrored peaks, at the same locations as the `fft` of the pure signal. But
-there is also now considerable noise on the frequencies. The `fft` has found the signal and but also
-shows that there is considerable noise along with the signal.
+In the third example the `fft` function is called on the same signal hidden within noise that was used for the autocorrelation example.
+The plot of the real values of the `fft` response is shown below.
 
+Notice that there are two clear mirrored peaks, at the same locations as the `fft` of the pure signal.
+But there is also now considerable noise on the frequencies.
+The `fft` has found the signal and but also shows that there is considerable noise along with the signal.
 
 image::images/math-expressions/hidden-signal-fft.png[]
diff --git a/solr/solr-ref-guide/src/edismax-query-parser.adoc b/solr/solr-ref-guide/src/edismax-query-parser.adoc
index c015ac7..9aeff4a 100644
--- a/solr/solr-ref-guide/src/edismax-query-parser.adoc
+++ b/solr/solr-ref-guide/src/edismax-query-parser.adoc
@@ -79,7 +79,8 @@ Defaults to `false`.
 
 `ps`::
 Phrase Slop.
-The default amount of slop - distance between terms - on phrase queries built with `pf`, `pf2` and/or `pf3` fields (affects boosting). See also the section <<Using 'Slop'>> below.
+The default amount of slop - distance between terms - on phrase queries built with `pf`, `pf2` and/or `pf3` fields (affects boosting).
+See also the section <<Using 'Slop'>> below.
 
 `pf2`::
 
@@ -87,14 +88,16 @@ A multivalued list of fields with optional weights.
 Similar to `pf`, but based on word _pair_ shingles.
 
 `ps2`::
-This is similar to `ps` but overrides the slop factor used for `pf2`. If not specified, `ps` is used.
+This is similar to `ps` but overrides the slop factor used for `pf2.
+If not specified, `ps` is used.
 
 `pf3`::
 A multivalued list of fields with optional weights, based on triplets of word shingles.
 Similar to `pf`, except that instead of building a phrase per field out of all the words in the input, it builds a set of phrases for each field out of word _triplet_ shingles.
 
 `ps3`::
-This is similar to `ps` but overrides the slop factor used for `pf3`. If not specified, `ps` is used.
+This is similar to `ps` but overrides the slop factor used for `pf3`.
+If not specified, `ps` is used.
 
 `stopwords`::
 A Boolean parameter indicating if the `StopFilterFactory` configured in the query analyzer should be respected when parsing the query.
diff --git a/solr/solr-ref-guide/src/external-files-processes.adoc b/solr/solr-ref-guide/src/external-files-processes.adoc
index 7f44ee4..2925180 100644
--- a/solr/solr-ref-guide/src/external-files-processes.adoc
+++ b/solr/solr-ref-guide/src/external-files-processes.adoc
@@ -52,7 +52,8 @@ A `defVal` defines a default value that will be used if there is no entry in the
 === Format of the External File
 
 The file itself is located in Solr's index directory, which by default is `$SOLR_HOME/data`.
-The name of the file should be `external_fieldname_` or `external_fieldname_.*`. For the example above, then, the file could be named `external_entryRankFile` or `external_entryRankFile.txt`.
+The name of the file should be `external_fieldname_` or `external_fieldname_.*`.
+For the example above, then, the file could be named `external_entryRankFile` or `external_entryRankFile.txt`.
 
 [TIP]
 ====
@@ -186,7 +187,8 @@ name ::= text
 value ::= text
 ----
 
-Special characters in "text" values can be escaped using the escape character `\`. The following escape sequences are recognized:
+Special characters in "text" values can be escaped using the escape character `\`.
+The following escape sequences are recognized:
 
 [width="60%",options="header",]
 |===
diff --git a/solr/solr-ref-guide/src/faceting.adoc b/solr/solr-ref-guide/src/faceting.adoc
index 81c7894..c75db25 100644
--- a/solr/solr-ref-guide/src/faceting.adoc
+++ b/solr/solr-ref-guide/src/faceting.adoc
@@ -18,7 +18,8 @@
 
 Faceting is the arrangement of search results into categories based on indexed terms.
 
-Searchers are presented with the indexed terms, along with numerical counts of how many matching documents were found for each term. Faceting makes it easy for users to explore search results, narrowing in on exactly the results they are looking for.
+Searchers are presented with the indexed terms, along with numerical counts of how many matching documents were found for each term.
+Faceting makes it easy for users to explore search results, narrowing in on exactly the results they are looking for.
 
 See also <<json-facet-api.adoc#, JSON Facet API>> for an alternative approach to this.
 
@@ -27,16 +28,23 @@ See also <<json-facet-api.adoc#, JSON Facet API>> for an alternative approach to
 There are two general parameters for controlling faceting.
 
 `facet`::
-If set to `true`, this parameter enables facet counts in the query response. If set to `false`, a blank or missing value, this parameter disables faceting. None of the other parameters listed below will have any effect unless this parameter is set to `true`. The default value is blank (false).
+If set to `true`, this parameter enables facet counts in the query response.
+If set to `false`, a blank or missing value, this parameter disables faceting.
+None of the other parameters listed below will have any effect unless this parameter is set to `true`.
+The default value is blank (false).
 
 `facet.query`::
 This parameter allows you to specify an arbitrary query in the Lucene default syntax to generate a facet count.
 +
-By default, Solr's faceting feature automatically determines the unique terms for a field and returns a count for each of those terms. Using `facet.query`, you can override this default behavior and select exactly which terms or expressions you would like to see counted. In a typical implementation of faceting, you will specify a number of `facet.query` parameters. This parameter can be particularly useful for numeric-range-based facets or prefix-based facets.
+By default, Solr's faceting feature automatically determines the unique terms for a field and returns a count for each of those terms.
+Using `facet.query`, you can override this default behavior and select exactly which terms or expressions you would like to see counted.
+In a typical implementation of faceting, you will specify a number of `facet.query` parameters.
+This parameter can be particularly useful for numeric-range-based facets or prefix-based facets.
 +
 You can set the `facet.query` parameter multiple times to indicate that multiple queries should be used as separate facet constraints.
 +
-To use facet queries in a syntax other than the default syntax, prefix the facet query with the name of the query notation. For example, to use the hypothetical `myfunc` query parser, you could set the `facet.query` parameter like so:
+To use facet queries in a syntax other than the default syntax, prefix the facet query with the name of the query notation.
+For example, to use the hypothetical `myfunc` query parser, you could set the `facet.query` parameter like so:
 +
 `facet.query={!myfunc}name~fred`
 
@@ -44,24 +52,30 @@ To use facet queries in a syntax other than the default syntax, prefix the facet
 
 Several parameters can be used to trigger faceting based on the indexed terms in a field.
 
-When using these parameters, it is important to remember that "term" is a very specific concept in Lucene: it relates to the literal field/value pairs that are indexed after any analysis occurs. For text fields that include stemming, lowercasing, or word splitting, the resulting terms may not be what you expect.
+When using these parameters, it is important to remember that "term" is a very specific concept in Lucene: it relates to the literal field/value pairs that are indexed after any analysis occurs.
+For text fields that include stemming, lowercasing, or word splitting, the resulting terms may not be what you expect.
 
-If you want Solr to perform both analysis (for searching) and faceting on the full literal strings, use the `copyField` directive in your Schema to create two versions of the field: one Text and one String. The Text field should have `indexed="true" docValues=“false"` if used for searching but not faceting and the String field should have `indexed="false" docValues="true"` if used for faceting but not searching.
+If you want Solr to perform both analysis (for searching) and faceting on the full literal strings, use the `copyField` directive in your Schema to create two versions of the field: one Text and one String.
+The Text field should have `indexed="true" docValues=“false"` if used for searching but not faceting and the String field should have `indexed="false" docValues="true"` if used for faceting but not searching.
 (For more information about the `copyField` directive, see <<fields-and-schema-design.adoc#,Fields and Schema Design>>.)
 
 Unless otherwise specified, all of the parameters below can be specified on a per-field basis with the syntax of `f.<fieldname>.facet.<parameter>`
 
 `facet.field`::
-The `facet.field` parameter identifies a field that should be treated as a facet. It iterates over each Term in the field and generate a facet count using that Term as the constraint. This parameter can be specified multiple times in a query to select multiple facet fields.
+The `facet.field` parameter identifies a field that should be treated as a facet.
+It iterates over each Term in the field and generate a facet count using that Term as the constraint.
+This parameter can be specified multiple times in a query to select multiple facet fields.
 +
 IMPORTANT: If you do not set this parameter to at least one field in the schema, none of the other parameters described in this section will have any effect.
 
 `facet.prefix`::
-The `facet.prefix` parameter limits the terms on which to facet to those starting with the given string prefix. This does not limit the query in any way, only the facets that would be returned in response to the query.
+The `facet.prefix` parameter limits the terms on which to facet to those starting with the given string prefix.
+This does not limit the query in any way, only the facets that would be returned in response to the query.
 +
 
 `facet.contains`::
-The `facet.contains` parameter limits the terms on which to facet to those containing the given substring. This does not limit the query in any way, only the facets that would be returned in response to the query.
+The `facet.contains` parameter limits the terms on which to facet to those containing the given substring.
+This does not limit the query in any way, only the facets that would be returned in response to the query.
 
 `facet.contains.ignoreCase`::
 
@@ -78,13 +92,16 @@ There are two options for this parameter.
 +
 --
 `count`::: Sort the constraints by count (highest count first).
-`index`::: Return the constraints sorted in their index order (lexicographic by indexed term). For terms in the ASCII range, this will be alphabetically sorted.
+`index`::: Return the constraints sorted in their index order (lexicographic by indexed term).
+For terms in the ASCII range, this will be alphabetically sorted.
 --
 +
-The default is `count` if `facet.limit` is greater than 0, otherwise, the default is `index`. Note that the default logic is changed when <<#limiting-facet-with-certain-terms>>
+The default is `count` if `facet.limit` is greater than 0, otherwise, the default is `index`.
+Note that the default logic is changed when <<#limiting-facet-with-certain-terms>>
 
 `facet.limit`::
-This parameter specifies the maximum number of constraint counts (essentially, the number of facets for a field that are returned) that should be returned for the facet fields. A negative value means that Solr will return unlimited number of constraint counts.
+This parameter specifies the maximum number of constraint counts (essentially, the number of facets for a field that are returned) that should be returned for the facet fields.
+A negative value means that Solr will return unlimited number of constraint counts.
 +
 The default value is `100`.
 
@@ -96,7 +113,8 @@ The default value is `0`.
 
 `facet.mincount`::
 
-The `facet.mincount` parameter specifies the minimum counts required for a facet field to be included in the response. If a field's counts are below the minimum, the field's facet is not returned.
+The `facet.mincount` parameter specifies the minimum counts required for a facet field to be included in the response.
+If a field's counts are below the minimum, the field's facet is not returned.
 +
 The default value is `0`.
 
@@ -113,72 +131,98 @@ The following methods are available.
 --
 `enum`::: Enumerates all terms in a field, calculating the set intersection of documents that match the term with documents that match the query.
 +
-This method is recommended for faceting multi-valued fields that have only a few distinct values. The average number of values per document does not matter.
+This method is recommended for faceting multi-valued fields that have only a few distinct values.
+The average number of values per document does not matter.
 +
-For example, faceting on a field with U.S. States such as `Alabama, Alaska, ... Wyoming` would lead to fifty cached filters which would be used over and over again. The `filterCache` should be large enough to hold all the cached filters.
+For example, faceting on a field with U.S. States such as `Alabama, Alaska, ... Wyoming` would lead to fifty cached filters which would be used over and over again.
+The `filterCache` should be large enough to hold all the cached filters.
 
 `fc`::: Calculates facet counts by iterating over documents that match the query and summing the terms that appear in each document.
 +
-This is currently implemented using an `UnInvertedField` cache if the field either is multi-valued or is tokenized (according to `FieldType.isTokened()`). Each document is looked up in the cache to see what terms/values it contains, and a tally is incremented for each value.
+This is currently implemented using an `UnInvertedField` cache if the field either is multi-valued or is tokenized (according to `FieldType.isTokened()`).
+Each document is looked up in the cache to see what terms/values it contains, and a tally is incremented for each value.
 +
-This method is excellent for situations where the number of indexed values for the field is high, but the number of values per document is low. For multi-valued fields, a hybrid approach is used that uses term filters from the `filterCache` for terms that match many documents. The letters `fc` stand for field cache.
+This method is excellent for situations where the number of indexed values for the field is high, but the number of values per document is low.
+For multi-valued fields, a hybrid approach is used that uses term filters from the `filterCache` for terms that match many documents.
+The letters `fc` stand for field cache.
 
-`fcs`::: Per-segment field faceting for single-valued string fields. Enable with `facet.method=fcs` and control the number of threads used with the `threads` local parameter. This parameter allows faceting to be faster in the presence of rapid index changes.
+`fcs`::: Per-segment field faceting for single-valued string fields.
+Enable with `facet.method=fcs` and control the number of threads used with the `threads` local parameter.
+This parameter allows faceting to be faster in the presence of rapid index changes.
 --
 +
 The default value is `fc` (except for fields using the `BoolField` field type and when `facet.exists=true` is requested) since it tends to use less memory and is faster when a field has many unique terms in the index.
 
 `facet.enum.cache.minDf`::
-This parameter indicates the minimum document frequency (the number of documents matching a term) for which the filterCache should be used when determining the constraint count for that term. This is only used with the `facet.method=enum` method of faceting.
+This parameter indicates the minimum document frequency (the number of documents matching a term) for which the filterCache should be used when determining the constraint count for that term.
+This is only used with the `facet.method=enum` method of faceting.
 +
-A value greater than zero decreases the filterCache's memory usage, but increases the time required for the query to be processed. If you are faceting on a field with a very large number of terms, and you wish to decrease memory usage, try setting this parameter to a value between `25` and `50`, and run a few tests. Then, optimize the parameter setting as necessary.
+A value greater than zero decreases the filterCache's memory usage, but increases the time required for the query to be processed.
+If you are faceting on a field with a very large number of terms, and you wish to decrease memory usage, try setting this parameter to a value between `25` and `50`, and run a few tests.
+Then, optimize the parameter setting as necessary.
 +
 The default value is `0`, causing the filterCache to be used for all terms in the field.
 
 `facet.exists`::
-To cap facet counts by 1, specify `facet.exists=true`. This parameter can be used with `facet.method=enum` or when it's omitted. It can be used only on non-trie fields (such as strings). It may speed up facet counting on large indices and/or high-cardinality facet values.
+To cap facet counts by 1, specify `facet.exists=true`.
+This parameter can be used with `facet.method=enum` or when it's omitted.
+It can be used only on non-trie fields (such as strings).
+It may speed up facet counting on large indices and/or high-cardinality facet values.
 
 `facet.excludeTerms`::
 
 If you want to remove terms from facet counts but keep them in the index, the `facet.excludeTerms` parameter allows you to do that.
 
 `facet.overrequest.count` and `facet.overrequest.ratio`::
-In some situations, the accuracy in selecting the "top" constraints returned for a facet in a distributed Solr query can be improved by "over requesting" the number of desired constraints (i.e., `facet.limit`) from each of the individual shards. In these situations, each shard is by default asked for the top `10 + (1.5 * facet.limit)` constraints.
+In some situations, the accuracy in selecting the "top" constraints returned for a facet in a distributed Solr query can be improved by "over requesting" the number of desired constraints (i.e., `facet.limit`) from each of the individual shards.
+In these situations, each shard is by default asked for the top `10 + (1.5 * facet.limit)` constraints.
 +
-In some situations, depending on how your docs are partitioned across your shards and what `facet.limit` value you used, you may find it advantageous to increase or decrease the amount of over-requesting Solr does. This can be achieved by setting the `facet.overrequest.count` (defaults to `10`) and `facet.overrequest.ratio` (defaults to `1.5`) parameters.
+In some situations, depending on how your docs are partitioned across your shards and what `facet.limit` value you used, you may find it advantageous to increase or decrease the amount of over-requesting Solr does.
+This can be achieved by setting the `facet.overrequest.count` (defaults to `10`) and `facet.overrequest.ratio` (defaults to `1.5`) parameters.
 
 `facet.threads`::
-This parameter will cause loading the underlying fields used in faceting to be executed in parallel with the number of threads specified. Specify as `facet.threads=N` where `N` is the maximum number of threads used.
+This parameter will cause loading the underlying fields used in faceting to be executed in parallel with the number of threads specified.
+Specify as `facet.threads=N` where `N` is the maximum number of threads used.
 +
-Omitting this parameter or specifying the thread count as `0` will not spawn any threads, and only the main request thread will be used. Specifying a negative number of threads will create up to `Integer.MAX_VALUE` threads.
+Omitting this parameter or specifying the thread count as `0` will not spawn any threads, and only the main request thread will be used.
+Specifying a negative number of threads will create up to `Integer.MAX_VALUE` threads.
 
 == Range Faceting
 
-You can use Range Faceting on any date field or any numeric field that supports range queries. This is particularly useful for stitching together a series of range queries (as facet by query) for things like prices.
+You can use Range Faceting on any date field or any numeric field that supports range queries.
+This is particularly useful for stitching together a series of range queries (as facet by query) for things like prices.
 
 `facet.range`::
-The `facet.range` parameter defines the field for which Solr should create range facets. For example:
+The `facet.range` parameter defines the field for which Solr should create range facets.
+For example:
 +
 `facet.range=price&facet.range=age`
 +
 `facet.range=lastModified_dt`
 
 `facet.range.start`::
-The `facet.range.start` parameter specifies the lower bound of the ranges. You can specify this parameter on a per field basis with the syntax of `f.<fieldname>.facet.range.start`. For example:
+The `facet.range.start` parameter specifies the lower bound of the ranges.
+You can specify this parameter on a per field basis with the syntax of `f.<fieldname>.facet.range.start`.
+For example:
 +
 `f.price.facet.range.start=0.0&f.age.facet.range.start=10`
 +
 `f.lastModified_dt.facet.range.start=NOW/DAY-30DAYS`
 
 `facet.range.end`::
-The `facet.range.end` specifies the upper bound of the ranges. You can specify this parameter on a per field basis with the syntax of `f.<fieldname>.facet.range.end`. For example:
+The `facet.range.end` specifies the upper bound of the ranges.
+You can specify this parameter on a per field basis with the syntax of `f.<fieldname>.facet.range.end`.
+For example:
 +
 `f.price.facet.range.end=1000.0&f.age.facet.range.start=99`
 +
 `f.lastModified_dt.facet.range.end=NOW/DAY+30DAYS`
 
 `facet.range.gap`::
-The span of each range expressed as a value to be added to the lower bound. For date fields, this should be expressed using the {solr-javadocs}/core/org/apache/solr/util/DateMathParser.html[`DateMathParser` syntax] (such as, `facet.range.gap=%2B1DAY ... '+1DAY'`). You can specify this parameter on a per-field basis with the syntax of `f.<fieldname>.facet.range.gap`. For example:
+The span of each range expressed as a value to be added to the lower bound.
+For date fields, this should be expressed using the {solr-javadocs}/core/org/apache/solr/util/DateMathParser.html[`DateMathParser` syntax] (such as, `facet.range.gap=%2B1DAY ... '+1DAY'`).
+You can specify this parameter on a per-field basis with the syntax of `f.<fieldname>.facet.range.gap`.
+For example:
 +
 `f.price.facet.range.gap=100&f.age.facet.range.gap=10`
 +
@@ -187,12 +231,17 @@ The span of each range expressed as a value to be added to the lower bound. For
 `facet.range.hardend`::
 The `facet.range.hardend` parameter is a Boolean parameter that specifies how Solr should handle cases where the `facet.range.gap` does not divide evenly between `facet.range.start` and `facet.range.end`.
 +
-If `true`, the last range constraint will have the `facet.range.end` value as an upper bound. If `false`, the last range will have the smallest possible upper bound greater then `facet.range.end` such that the range is the exact width of the specified range gap. The default value for this parameter is false.
+If `true`, the last range constraint will have the `facet.range.end` value as an upper bound.
+If `false`, the last range will have the smallest possible upper bound greater then `facet.range.end` such that the range is the exact width of the specified range gap.
+The default value for this parameter is false.
 +
 This parameter can be specified on a per field basis with the syntax `f.<fieldname>.facet.range.hardend`.
 
 `facet.range.include`::
-By default, the ranges used to compute range faceting between `facet.range.start` and `facet.range.end` are inclusive of their lower bounds and exclusive of the upper bounds. The "before" range defined with the `facet.range.other` parameter is exclusive and the "after" range is inclusive. This default, equivalent to "lower" below, will not result in double counting at the boundaries. You can use the `facet.range.include` parameter to modify this behavior using the following options:
+By default, the ranges used to compute range faceting between `facet.range.start` and `facet.range.end` are inclusive of their lower bounds and exclusive of the upper bounds.
+The "before" range defined with the `facet.range.other` parameter is exclusive and the "after" range is inclusive.
+This default, equivalent to "lower" below, will not result in double counting at the boundaries.
+You can use the `facet.range.include` parameter to modify this behavior using the following options:
 +
 --
 * `lower`: All gap-based ranges include their lower bound.
@@ -217,15 +266,20 @@ The `facet.range.other` parameter specifies that in addition to the counts for e
 * `all`: Compute counts for before, between, and after.
 --
 +
-This parameter can be specified on a per field basis with the syntax of `f.<fieldname>.facet.range.other`. In addition to the `all` option, this parameter can be specified multiple times to indicate multiple choices, but `none` will override all other options.
+This parameter can be specified on a per field basis with the syntax of `f.<fieldname>.facet.range.other`.
+In addition to the `all` option, this parameter can be specified multiple times to indicate multiple choices, but `none` will override all other options.
 
 `facet.range.method`::
-The `facet.range.method` parameter selects the type of algorithm or method Solr should use for range faceting. Both methods produce the same results, but performance may vary.
+The `facet.range.method` parameter selects the type of algorithm or method Solr should use for range faceting.
+Both methods produce the same results, but performance may vary.
 +
 --
-filter::: This method generates the ranges based on other facet.range parameters, and for each of them executes a filter that later intersects with the main query resultset to get the count. It will make use of the filterCache, so it will benefit of a cache large enough to contain all ranges.
+filter::: This method generates the ranges based on other facet.range parameters, and for each of them executes a filter that later intersects with the main query resultset to get the count.
+It will make use of the filterCache, so it will benefit of a cache large enough to contain all ranges.
 +
-dv::: This method iterates the documents that match the main query, and for each of them finds the correct range for the value. This method will make use of <<docvalues.adoc#,docValues>> (if enabled for the field) or fieldCache. The `dv` method is not supported for field type DateRangeField or when using <<result-grouping.adoc#,group.facets>>.
+dv::: This method iterates the documents that match the main query, and for each of them finds the correct range for the value.
+This method will make use of <<docvalues.adoc#,docValues>> (if enabled for the field) or fieldCache.
+The `dv` method is not supported for field type DateRangeField or when using <<result-grouping.adoc#,group.facets>>.
 --
 +
 The default value for this parameter is `filter`.
@@ -242,19 +296,26 @@ For more information, see the examples in the <<date-formatting-math.adoc#,Worki
 
 === facet.mincount in Range Faceting
 
-The `facet.mincount` parameter, the same one as used in field faceting is also applied to range faceting. When used, no ranges with a count below the minimum will be included in the response.
+The `facet.mincount` parameter, the same one as used in field faceting is also applied to range faceting.
+When used, no ranges with a count below the minimum will be included in the response.
 
 == Pivot (Decision Tree) Faceting
 
-Pivoting is a summarization tool that lets you automatically sort, count, total or average data stored in a table. The results are typically displayed in a second table showing the summarized data. Pivot faceting lets you create a summary table of the results from a faceting documents by multiple fields.
+Pivoting is a summarization tool that lets you automatically sort, count, total or average data stored in a table.
+The results are typically displayed in a second table showing the summarized data.
+Pivot faceting lets you create a summary table of the results from a faceting documents by multiple fields.
 
-Another way to look at it is that the query produces a Decision Tree, in that Solr tells you "for facet A, the constraints/counts are X/N, Y/M, etc. If you were to constrain A by X, then the constraint counts for B would be S/P, T/Q, etc.". In other words, it tells you in advance what the "next" set of facet results would be for a field if you apply a constraint from the current facet results.
+Another way to look at it is that the query produces a Decision Tree, in that Solr tells you "for facet A, the constraints/counts are X/N, Y/M, etc.
+If you were to constrain A by X, then the constraint counts for B would be S/P, T/Q, etc.". In other words, it tells you in advance what the "next" set of facet results would be for a field if you apply a constraint from the current facet results.
 
 `facet.pivot`::
-The `facet.pivot` parameter defines the fields to use for the pivot. Multiple `facet.pivot` values will create multiple "facet_pivot" sections in the response. Separate each list of fields with a comma.
+The `facet.pivot` parameter defines the fields to use for the pivot.
+Multiple `facet.pivot` values will create multiple "facet_pivot" sections in the response.
+Separate each list of fields with a comma.
 
 `facet.pivot.mincount`::
-The `facet.pivot.mincount` parameter defines the minimum number of documents that need to match in order for the facet to be included in results. The default is 1.
+The `facet.pivot.mincount` parameter defines the minimum number of documents that need to match in order for the facet to be included in results.
+The default is 1.
 +
 Using the "`bin/solr -e techproducts`" example, A query URL like this one will return the data below, with the pivot faceting results found in the section "facet_pivot":
 
@@ -384,7 +445,8 @@ Results:
 
 === Combining Facet Queries And Facet Ranges With Pivot Facets
 
-A `query` local parameter can be used with `facet.pivot` to refer to `facet.query` instances (by tag) that should be computed for each pivot constraint. Similarly, a `range` local parameter can be used with `facet.pivot` to refer to `facet.range` instances.
+A `query` local parameter can be used with `facet.pivot` to refer to `facet.query` instances (by tag) that should be computed for each pivot constraint.
+Similarly, a `range` local parameter can be used with `facet.pivot` to refer to `facet.range` instances.
 
 In the example below, two query facets are computed for h of the `facet.pivot` result hierarchies:
 
@@ -520,11 +582,14 @@ Although `facet.pivot.mincount` deviates in name from the `facet.mincount` param
 
 == Interval Faceting
 
-Another supported form of faceting is interval faceting. This sounds similar to range faceting, but the functionality is really closer to doing facet queries with range queries. Interval faceting allows you to set variable intervals and count the number of documents that have values within those intervals in the specified field.
+Another supported form of faceting is interval faceting.
+This sounds similar to range faceting, but the functionality is really closer to doing facet queries with range queries.
+Interval faceting allows you to set variable intervals and count the number of documents that have values within those intervals in the specified field.
 
 Even though the same functionality can be achieved by using a facet query with range queries, the implementation of these two methods is very different and will provide different performance depending on the context.
 
-If you are concerned about the performance of your searches you should test with both options. Interval faceting tends to be better with multiple intervals for the same fields, while facet query tend to be better in environments where filter cache is more effective (static indexes for example).
+If you are concerned about the performance of your searches you should test with both options.
+Interval faceting tends to be better with multiple intervals for the same fields, while facet query tend to be better in environments where filter cache is more effective (static indexes for example).
 
 This method will use <<docvalues.adoc#,docValues>> if they are enabled for the field, will use fieldCache otherwise.
 
@@ -532,12 +597,15 @@ Use these parameters for interval faceting:
 
 `facet.interval`::
 
-This parameter Indicates the field where interval faceting must be applied. It can be used multiple times in the same request to indicate multiple fields.
+This parameter Indicates the field where interval faceting must be applied.
+It can be used multiple times in the same request to indicate multiple fields.
 +
 `facet.interval=price&facet.interval=size`
 
 `facet.interval.set`::
-This parameter is used to set the intervals for the field, it can be specified multiple times to indicate multiple intervals. This parameter is global, which means that it will be used for all fields indicated with `facet.interval` unless there is an override for a specific field. To override this parameter on a specific field you can use: `f.<fieldname>.facet.interval.set`, for example:
+This parameter is used to set the intervals for the field, it can be specified multiple times to indicate multiple intervals.
+This parameter is global, which means that it will be used for all fields indicated with `facet.interval` unless there is an override for a specific field.
+To override this parameter on a specific field you can use: `f.<fieldname>.facet.interval.set`, for example:
 +
 [source,text]
 f.price.facet.interval.set=[0,10]&f.price.facet.interval.set=(10,100]
@@ -555,15 +623,25 @@ For example:
 
 The initial and end values cannot be empty.
 
-If the interval needs to be unbounded, the special character `\*` can be used for both, start and end, limits. When using this special character, the start syntax options (`(` and `[`), and end syntax options (`)` and `]`) will be treated the same. `[*,*]` will include all documents with a value in the field.
+If the interval needs to be unbounded, the special character `\*` can be used for both, start and end, limits.
+When using this special character, the start syntax options (`(` and `[`), and end syntax options (`)` and `]`) will be treated the same.
+`[*,*]` will include all documents with a value in the field.
 
-The interval limits may be strings but there is no need to add quotes. All the text until the comma will be treated as the start limit, and the text after that will be the end limit. For example: `[Buenos Aires,New York]`. Keep in mind that a string-like comparison will be done to match documents in string intervals (case-sensitive). The comparator can't be changed.
+The interval limits may be strings but there is no need to add quotes.
+All the text until the comma will be treated as the start limit, and the text after that will be the end limit.
+For example: `[Buenos Aires,New York]`.
+Keep in mind that a string-like comparison will be done to match documents in string intervals (case-sensitive).
+The comparator can't be changed.
 
-Commas, brackets and square brackets can be escaped by using `\` in front of them. Whitespaces before and after the values will be omitted.
+Commas, brackets and square brackets can be escaped by using `\` in front of them.
+Whitespaces before and after the values will be omitted.
 
-The start limit can't be grater than the end limit. Equal limits are allowed, this allows you to indicate the specific values that you want to count, like `[A,A]`, `[B,B]` and `[C,Z]`.
+The start limit can't be grater than the end limit.
+Equal limits are allowed, this allows you to indicate the specific values that you want to count, like `[A,A]`, `[B,B]` and `[C,Z]`.
 
-Interval faceting supports output key replacement described below. Output keys can be replaced in both the `facet.interval parameter` and in the `facet.interval.set parameter`. For example:
+Interval faceting supports output key replacement described below.
+Output keys can be replaced in both the `facet.interval parameter` and in the `facet.interval.set parameter`.
+For example:
 
 [source,text]
 ----
@@ -575,11 +653,13 @@ Interval faceting supports output key replacement described below. Output keys c
 
 == Local Params for Faceting
 
-The <<local-params.adoc#,LocalParams syntax>> allows overriding global settings. It can also provide a method of adding metadata to other parameter values, much like XML attributes.
+The <<local-params.adoc#,LocalParams syntax>> allows overriding global settings.
+It can also provide a method of adding metadata to other parameter values, much like XML attributes.
 
 === Tagging and Excluding Filters
 
-You can tag specific filters and exclude those filters when faceting. This is useful when doing multi-select faceting.
+You can tag specific filters and exclude those filters when faceting.
+This is useful when doing multi-select faceting.
 
 Consider the following example query with faceting:
 
@@ -587,7 +667,8 @@ Consider the following example query with faceting:
 
 Because everything is already constrained by the filter `doctype:pdf`, the `facet.field=doctype` facet command is currently redundant and will return 0 counts for everything except `doctype:pdf`.
 
-To implement a multi-select facet for doctype, a GUI may want to still display the other doctype values and their associated counts, as if the `doctype:pdf` constraint had not yet been applied. For example:
+To implement a multi-select facet for doctype, a GUI may want to still display the other doctype values and their associated counts, as if the `doctype:pdf` constraint had not yet been applied.
+For example:
 
 [source,text]
 ----
@@ -602,23 +683,31 @@ To return counts for doctype values that are currently not selected, tag filters
 
 `q=mainquery&fq=status:public&fq={!tag=dt}doctype:pdf&facet=true&facet.field={!ex=dt}doctype`
 
-Filter exclusion is supported for all types of facets. Both the `tag` and `ex` local params may specify multiple values by separating them with commas.
+Filter exclusion is supported for all types of facets.
+Both the `tag` and `ex` local params may specify multiple values by separating them with commas.
 
 === Changing the Output Key
 
-To change the output key for a faceting command, specify a new name with the `key` local parameter. For example:
+To change the output key for a faceting command, specify a new name with the `key` local parameter.
+For example:
 
 `facet.field={!ex=dt key=mylabel}doctype`
 
-The parameter setting above causes the field facet results for the "doctype" field to be returned using the key "mylabel" rather than "doctype" in the response. This can be helpful when faceting on the same field multiple times with different exclusions.
+The parameter setting above causes the field facet results for the "doctype" field to be returned using the key "mylabel" rather than "doctype" in the response.
+This can be helpful when faceting on the same field multiple times with different exclusions.
 
 === Limiting Facet with Certain Terms
 
-To limit field facet with certain terms specify them comma separated with `terms` local parameter. Commas and quotes in terms can be escaped with backslash, as in `\,`. In this case facet is calculated on a way similar to `facet.method=enum`, but ignores `facet.enum.cache.minDf`. For example:
+To limit field facet with certain terms specify them comma separated with `terms` local parameter.
+Commas and quotes in terms can be escaped with backslash, as in `\,`.
+In this case facet is calculated on a way similar to `facet.method=enum`, but ignores `facet.enum.cache.minDf`.
+For example:
 
 `facet.field={!terms='alfa,betta,with\,with\',with space'}symbol`
 
-This local parameter overrides default logic for `facet.sort`. if `facet.sort` is omitted, facets are returned in the given terms order that might be changed with `index` and `count` values. Note: other parameters might not be fully supported when this parameter is supplied.
+This local parameter overrides default logic for `facet.sort`.
+if `facet.sort` is omitted, facets are returned in the given terms order that might be changed with `index` and `count` values.
+Note: other parameters might not be fully supported when this parameter is supplied.
 
 == Related Topics
 
diff --git a/solr/solr-ref-guide/src/field-properties-by-use-case.adoc b/solr/solr-ref-guide/src/field-properties-by-use-case.adoc
index 8425b64..3b84c6b 100644
--- a/solr/solr-ref-guide/src/field-properties-by-use-case.adoc
+++ b/solr/solr-ref-guide/src/field-properties-by-use-case.adoc
@@ -16,7 +16,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
-Here is a summary of common use cases, and the attributes the fields or field types should have to support the case. An entry of true or false in the table indicates that the option must be set to the given value for the use case to function correctly. If no entry is provided, the setting of that attribute has no impact on the case.
+Here is a summary of common use cases, and the attributes the fields or field types should have to support the case.
+An entry of true or false in the table indicates that the option must be set to the given value for the use case to function correctly.
+If no entry is provided, the setting of that attribute has no impact on the case.
 
 // NOTE: not currently using footnoteref here because:
 //  - it has issues with tables in the PDF
@@ -43,7 +45,12 @@ Notes:
 3. [[fpbuc_3,3]] (if termVectors=true)
 4. [[fpbuc_4,4]] A tokenizer must be defined for the field, but it doesn't need to be indexed.
 5. [[fpbuc_5,5]] Described in <<document-analysis.adoc#,Document Analysis in Solr>>.
-6. [[fpbuc_6,6]] Term vectors are not mandatory here. If not true, then a stored field is analyzed. So term vectors are recommended, but only required if `stored=false`.
-7. [[fpbuc_7,7]] For most field types, either `indexed` or `docValues` must be true, but both are not required. <<docvalues.adoc#,DocValues>> can be more efficient in many cases. For `[Int/Long/Float/Double/Date]PointFields`, `docValues=true` is required.
-8. [[fpbuc_8,8]] Stored content will be used by default, but docValues can alternatively be used. See <<docvalues.adoc#,DocValues>>.
+6. [[fpbuc_6,6]] Term vectors are not mandatory here.
+If not true, then a stored field is analyzed.
+So term vectors are recommended, but only required if `stored=false`.
+7. [[fpbuc_7,7]] For most field types, either `indexed` or `docValues` must be true, but both are not required.
+<<docvalues.adoc#,DocValues>> can be more efficient in many cases.
+For `[Int/Long/Float/Double/Date]PointFields`, `docValues=true` is required.
+8. [[fpbuc_8,8]] Stored content will be used by default, but docValues can alternatively be used.
+See <<docvalues.adoc#,DocValues>>.
 9. [[fpbuc_9,9]] Multi-valued sorting may be performed on docValues-enabled fields using the two-argument `field()` function, e.g., `field(myfield,min)`; see the <<function-queries.adoc#field-function,field() function in Function Queries>>.
diff --git a/solr/solr-ref-guide/src/field-type-definitions-and-properties.adoc b/solr/solr-ref-guide/src/field-type-definitions-and-properties.adoc
index 92a85eb..a7f5533 100644
--- a/solr/solr-ref-guide/src/field-type-definitions-and-properties.adoc
+++ b/solr/solr-ref-guide/src/field-type-definitions-and-properties.adoc
@@ -25,9 +25,13 @@ A field type definition can include four types of information:
 * If the field type is `TextField`, a description of the field analysis for the field type.
 * Field type properties - depending on the implementation class, some properties may be mandatory.
 
-== Field Type Definitions in schema.xml
+== Field Type Definitions in the Schema
 
-Field types are defined in `schema.xml`. Each field type is defined between `fieldType` elements. They can optionally be grouped within a `types` element. Here is an example of a field type definition for a type called `text_general`:
+Field types are defined in the collection's <<solr-schema.adoc#,schema>>.
+Each field type is defined between `fieldType` elements.
+They can optionally be grouped within a `types` element.
+
+Here is an example of a field type definition for a type called `text_general`:
 
 [source,xml,subs="verbatim,callouts"]
 ----
@@ -52,11 +56,14 @@ Field types are defined in `schema.xml`. Each field type is defined between `fie
 <1> The first line in the example above contains the field type name, `text_general`, and the name of the implementing class, `solr.TextField`.
 <2> The rest of the definition is about field analysis, described in <<document-analysis.adoc#,Document Analysis in Solr>>.
 
-The implementing class is responsible for making sure the field is handled correctly. In the class names in `schema.xml`, the string `solr` is shorthand for `org.apache.solr.schema` or `org.apache.solr.analysis`. Therefore, `solr.TextField` is really `org.apache.solr.schema.TextField`.
+The implementing class is responsible for making sure the field is handled correctly.
+In the class names in `schema.xml`, the string `solr` is shorthand for `org.apache.solr.schema` or `org.apache.solr.analysis`.
+Therefore, `solr.TextField` is really `org.apache.solr.schema.TextField`.
 
 == Field Type Properties
 
-The field type `class` determines most of the behavior of a field type, but optional properties can also be defined. For example, the following definition of a date field type defines two properties, `sortMissingLast` and `omitNorms`.
+The field type `class` determines most of the behavior of a field type, but optional properties can also be defined.
+For example, the following definition of a date field type defines two properties, `sortMissingLast` and `omitNorms`.
 
 [source,xml]
 ----
@@ -75,48 +82,65 @@ The properties that can be specified for a given field type fall into three majo
 These are the general properties for fields:
 
 `name`::
-The name of the fieldType. This value gets used in field definitions, in the "type" attribute. It is strongly recommended that names consist of alphanumeric or underscore characters only and not start with a digit. This is not currently strictly enforced.
+The name of the fieldType.
+This value gets used in field definitions, in the "type" attribute.
+It is strongly recommended that names consist of alphanumeric or underscore characters only and not start with a digit.
+This is not currently strictly enforced.
 
 `class`::
-The class name that gets used to store and index the data for this type. Note that you may prefix included class names with "solr." and Solr will automatically figure out which packages to search for the class - so `solr.TextField` will work.
+The class name that gets used to store and index the data for this type.
+Note that you may prefix included class names with "solr." and Solr will automatically figure out which packages to search for the class - so `solr.TextField` will work.
 +
-If you are using a third-party class, you will probably need to have a fully qualified class name. The fully qualified equivalent for `solr.TextField` is `org.apache.solr.schema.TextField`.
+If you are using a third-party class, you will probably need to have a fully qualified class name.
+The fully qualified equivalent for `solr.TextField` is `org.apache.solr.schema.TextField`.
 
 `positionIncrementGap`::
 For multivalued fields, specifies a distance between multiple values, which prevents spurious phrase matches.
 
-`autoGeneratePhraseQueries`:: For text fields. If `true`, Solr automatically generates phrase queries for adjacent terms. If `false`, terms must be enclosed in double-quotes to be treated as phrases.
+`autoGeneratePhraseQueries`:: For text fields.
+If `true`, Solr automatically generates phrase queries for adjacent terms.
+If `false`, terms must be enclosed in double-quotes to be treated as phrases.
 
 `synonymQueryStyle`::
-Query used to combine scores of overlapping query terms (i.e., synonyms). Consider a search for "blue tee" with query-time synonyms `tshirt,tee`.
+Query used to combine scores of overlapping query terms (i.e., synonyms).
+Consider a search for "blue tee" with query-time synonyms `tshirt,tee`.
 +
-Use `as_same_term` (default) to blend terms, i.e., `SynonymQuery(tshirt,tee)` where each term will be treated as equally important. Use `pick_best` to select the most significant synonym when scoring `Dismax(tee,tshirt)`. Use `as_distinct_terms` to bias scoring towards the most significant synonym `(pants OR slacks)`.
+Use `as_same_term` (default) to blend terms, i.e., `SynonymQuery(tshirt,tee)` where each term will be treated as equally important.
+Use `pick_best` to select the most significant synonym when scoring `Dismax(tee,tshirt)`.
+Use `as_distinct_terms` to bias scoring towards the most significant synonym `(pants OR slacks)`.
 +
-`as_same_term` is appropriate when terms are true synonyms (television, tv). Use `pick_best` or `as_distinct_terms` when synonyms are expanding to hyponyms `(q=jeans w/ jeans=>jeans,pants)` and you want exact to come before parent and sibling concepts. See this http://opensourceconnections.com/blog/2017/11/21/solr-synonyms-mea-culpa/[blog article].
+`as_same_term` is appropriate when terms are true synonyms (television, tv).
+Use `pick_best` or `as_distinct_terms` when synonyms are expanding to hyponyms `(q=jeans w/ jeans=>jeans,pants)` and you want exact to come before parent and sibling concepts.
+See this http://opensourceconnections.com/blog/2017/11/21/solr-synonyms-mea-culpa/[blog article].
 
 `enableGraphQueries`::
-For text fields, applicable when querying with <<standard-query-parser.adoc#standard-query-parser-parameters,`sow=false`>> (which is the default for the `sow` parameter). Use `true`, the default, for field types with query analyzers including graph-aware filters, e.g., <<filters.adoc#synonym-graph-filter,Synonym Graph Filter>> and <<filters.adoc#word-delimiter-graph-filter,Word Delimiter Graph Filter>>.
+For text fields, applicable when querying with <<standard-query-parser.adoc#standard-query-parser-parameters,`sow=false`>> (which is the default for the `sow` parameter).
+Use `true`, the default, for field types with query analyzers including graph-aware filters, e.g., <<filters.adoc#synonym-graph-filter,Synonym Graph Filter>> and <<filters.adoc#word-delimiter-graph-filter,Word Delimiter Graph Filter>>.
 +
 Use `false` for field types with query analyzers including filters that can match docs when some tokens are missing, e.g., <<filters.adoc#shingle-filter,Shingle Filter>>.
 
 [[docvaluesformat]]
 `docValuesFormat`::
-Defines a custom `DocValuesFormat` to use for fields of this type. This requires that a schema-aware codec, such as the `SchemaCodecFactory`, has been configured in `solrconfig.xml`.
+Defines a custom `DocValuesFormat` to use for fields of this type.
+This requires that a schema-aware codec, such as the `SchemaCodecFactory`, has been configured in `solrconfig.xml`.
 
 `postingsFormat`::
-Defines a custom `PostingsFormat` to use for fields of this type. This requires that a schema-aware codec, such as the `SchemaCodecFactory`, has been configured in `solrconfig.xml`.
+Defines a custom `PostingsFormat` to use for fields of this type.
+This requires that a schema-aware codec, such as the `SchemaCodecFactory`, has been configured in `solrconfig.xml`.
 
 
 [NOTE]
 ====
-Lucene index back-compatibility is only supported for the default codec. If you choose to customize the `postingsFormat` or `docValuesFormat` in your `schema.xml`, upgrading to a future version of Solr may require you to either switch back to the default codec and optimize your index to rewrite it into the default codec before upgrading, or re-build your entire index from scratch after upgrading.
+Lucene index back-compatibility is only supported for the default codec.
+If you choose to customize the `postingsFormat` or `docValuesFormat` in your `schema.xml`, upgrading to a future version of Solr may require you to either switch back to the default codec and optimize your index to rewrite it into the default codec before upgrading, or re-build your entire index from scratch after upgrading.
 ====
 
 === Field Default Properties
 
 These are properties that can be specified either on the field types, or on individual fields to override the values provided by the field types.
 
-The default values for each property depend on the underlying `FieldType` class, which in turn may depend on the `version` attribute of the `<schema/>`. The table below includes the default value for most `FieldType` implementations provided by Solr, assuming a `schema.xml` that declares `version="1.6"`.
+The default values for each property depend on the underlying `FieldType` class, which in turn may depend on the `version` attribute of the `<schema/>`.
+The table below includes the default value for most `FieldType` implementations provided by Solr, assuming a `schema.xml` that declares `version="1.6"`.
 
 // TODO: SOLR-10655 BEGIN: refactor this into a 'field-default-properties.include.adoc' file for reuse
 
@@ -142,19 +166,27 @@ The default values for each property depend on the underlying `FieldType` class,
 
 == Choosing Appropriate Numeric Types
 
-For general numeric needs, consider using one of the `IntPointField`, `LongPointField`, `FloatPointField`, or `DoublePointField` classes, depending on the specific values you expect. These "Dimensional Point" based numeric classes use specially encoded data structures to support efficient range queries regardless of the size of the ranges used. Enable <<docvalues.adoc#,DocValues>> on these fields as needed for sorting and/or faceting.
+For general numeric needs, consider using one of the `IntPointField`, `LongPointField`, `FloatPointField`, or `DoublePointField` classes, depending on the specific values you expect.
+These "Dimensional Point" based numeric classes use specially encoded data structures to support efficient range queries regardless of the size of the ranges used.
+Enable <<docvalues.adoc#,DocValues>> on these fields as needed for sorting and/or faceting.
 
-Some Solr features may not yet work with "Dimensional Points", in which case you may want to consider the equivalent `TrieIntField`, `TrieLongField`, `TrieFloatField`, and `TrieDoubleField` classes. These field types are deprecated and are likely to be removed in a future major Solr release, but they can still be used if necessary. Configure a `precisionStep="0"` if you wish to minimize index size, but if you expect users to make frequent range queries on numeric types, use the default ` [...]
+Some Solr features may not yet work with "Dimensional Points", in which case you may want to consider the equivalent `TrieIntField`, `TrieLongField`, `TrieFloatField`, and `TrieDoubleField` classes.
+These field types are deprecated and are likely to be removed in a future major Solr release, but they can still be used if necessary.
+Configure a `precisionStep="0"` if you wish to minimize index size, but if you expect users to make frequent range queries on numeric types, use the default `precisionStep` (by not specifying it) or specify it as `precisionStep="8"` (which is the default).
+This offers faster speed for range queries at the expense of increasing index size.
 
 == Working With Text
 
 Handling text properly will make your users happy by providing them with the best possible results for text searches.
 
-One technique is using a text field as a catch-all for keyword searching. Most users are not sophisticated about their searches and the most common search is likely to be a simple keyword search. You can use `copyField` to take a variety of fields and funnel them all into a single text field for keyword searches.
+One technique is using a text field as a catch-all for keyword searching.
+Most users are not sophisticated about their searches and the most common search is likely to be a simple keyword search.
+You can use `copyField` to take a variety of fields and funnel them all into a single text field for keyword searches.
 
 In the `schema.xml` file for the "```techproducts```" example included with Solr, `copyField` declarations are used to dump the contents of `cat`, `name`, `manu`, `features`, and `includes` into a single field, `text`. In addition, it could be a good idea to copy `ID` into `text` in case users wanted to search for a particular product by passing its product number to a keyword search.
 
-Another technique is using `copyField` to use the same field in different ways. Suppose you have a field that is a list of authors, like this:
+Another technique is using `copyField` to use the same field in different ways.
+Suppose you have a field that is a list of authors, like this:
 
 `Schildt, Herbert; Wolpert, Lewis; Davies, P.`
 
diff --git a/solr/solr-ref-guide/src/filters.adoc b/solr/solr-ref-guide/src/filters.adoc
index eb72764..83a153e 100644
--- a/solr/solr-ref-guide/src/filters.adoc
+++ b/solr/solr-ref-guide/src/filters.adoc
@@ -191,24 +191,31 @@ This filter converts characters from the following Unicode blocks:
 
 == Beider-Morse Filter
 
-Implements the Beider-Morse Phonetic Matching (BMPM) algorithm, which allows identification of similar names, even if they are spelled differently or in different languages. More information about how this works is available in the section on <<phonetic-matching.adoc#beider-morse-phonetic-matching-bmpm,Phonetic Matching>>.
+Implements the Beider-Morse Phonetic Matching (BMPM) algorithm, which allows identification of similar names, even if they are spelled differently or in different languages.
+More information about how this works is available in the section on <<phonetic-matching.adoc#beider-morse-phonetic-matching-bmpm,Phonetic Matching>>.
 
 [IMPORTANT]
 ====
-BeiderMorseFilter changed its behavior in Solr 5.0 due to an update to version 3.04 of the BMPM algorithm. Older version of Solr implemented BMPM version 3.00 (see http://stevemorse.org/phoneticinfo.htm). Any index built using this filter with earlier versions of Solr will need to be rebuilt.
+BeiderMorseFilter changed its behavior in Solr 5.0 due to an update to version 3.04 of the BMPM algorithm.
+Older version of Solr implemented BMPM version 3.00 (see http://stevemorse.org/phoneticinfo.htm).
+Any index built using this filter with earlier versions of Solr will need to be rebuilt.
 ====
 
 *Factory class:* `solr.BeiderMorseFilterFactory`
 
 *Arguments:*
 
-`nameType`:: Types of names. Valid values are GENERIC, ASHKENAZI, or SEPHARDIC. If not processing Ashkenazi or Sephardic names, use GENERIC.
+`nameType`:: Types of names.
+Valid values are GENERIC, ASHKENAZI, or SEPHARDIC.
+If not processing Ashkenazi or Sephardic names, use GENERIC.
 
-`ruleType`:: Types of rules to apply. Valid values are APPROX or EXACT.
+`ruleType`:: Types of rules to apply.
+Valid values are APPROX or EXACT.
 
 `concat`:: Defines if multiple possible matches should be combined with a pipe ("|").
 
-`languageSet`:: The language set to use. The value "auto" will allow the Filter to identify the language, or a comma-separated list can be supplied.
+`languageSet`:: The language set to use.
+The value "auto" will allow the Filter to identify the language, or a comma-separated list can be supplied.
 
 *Example:*
 
@@ -284,7 +291,9 @@ This filter takes the output of the <<tokenizers.adoc#classic-tokenizer,Classic
 
 == Common Grams Filter
 
-This filter for use in `index` time analysis creates word shingles by combining common tokens such as stop words with regular tokens.  This can result in an index with more unique terms, but is useful for creating phrase queries containing common words, such as "the cat", in a way that will typically be much faster then if the combined tokens are not used, because only the term positions of documents containg both terms in sequence have to be considered.  Correct usage requires being pai [...]
+This filter for use in `index` time analysis creates word shingles by combining common tokens such as stop words with regular tokens.
+This can result in an index with more unique terms, but is useful for creating phrase queries containing common words, such as "the cat", in a way that will typically be much faster then if the combined tokens are not used, because only the term positions of documents containg both terms in sequence have to be considered.
+Correct usage requires being paired with <<#common-grams-query-filter,Common Grams Query Filter>> during `query` analysis.
 
 These filters can also be combined with <<#stop-filter,Stop Filter>> so searching for `"the cat"` would match different documents then `"a cat"`, while pathological searches for either `"the"` or `"a"` would not match any documents.
 
@@ -296,7 +305,8 @@ These filters can also be combined with <<#stop-filter,Stop Filter>> so searchin
 
 `format`:: (optional) If the stopwords list has been formatted for Snowball, you can specify `format="snowball"` so Solr can read the stopwords file.
 
-`ignoreCase`:: (boolean) If true, the filter ignores the case of words when comparing them to the common word file. The default is false.
+`ignoreCase`:: (boolean) If true, the filter ignores the case of words when comparing them to the common word file.
+The default is false.
 
 *Example:*
 
@@ -348,17 +358,22 @@ This filter is used for the `query` time analysis aspect of <<#common-grams-filt
 
 == Collation Key Filter
 
-Collation allows sorting of text in a language-sensitive way. It is usually used for sorting, but can also be used with advanced searches. We've covered this in much more detail in the section on <<language-analysis.adoc#unicode-collation,Unicode Collation>>.
+Collation allows sorting of text in a language-sensitive way.
+It is usually used for sorting, but can also be used with advanced searches.
+We've covered this in much more detail in the section on <<language-analysis.adoc#unicode-collation,Unicode Collation>>.
 
 == Daitch-Mokotoff Soundex Filter
 
-Implements the Daitch-Mokotoff Soundex algorithm, which allows identification of similar names, even if they are spelled differently. More information about how this works is available in the section on <<phonetic-matching.adoc#,Phonetic Matching>>.
+Implements the Daitch-Mokotoff Soundex algorithm, which allows identification of similar names, even if they are spelled differently.
+More information about how this works is available in the section on <<phonetic-matching.adoc#,Phonetic Matching>>.
 
 *Factory class:* `solr.DaitchMokotoffSoundexFilterFactory`
 
 *Arguments:*
 
-`inject`:: (true/false) If true (the default), then new phonetic tokens are added to the stream. Otherwise, tokens are replaced with the phonetic equivalent. Setting this to false will enable phonetic matching, but the exact spelling of the target word may not match.
+`inject`:: (true/false) If true (the default), then new phonetic tokens are added to the stream.
+Otherwise, tokens are replaced with the phonetic equivalent.
+Setting this to false will enable phonetic matching, but the exact spelling of the target word may not match.
 
 *Example:*
 
@@ -390,13 +405,16 @@ Implements the Daitch-Mokotoff Soundex algorithm, which allows identification of
 
 == Double Metaphone Filter
 
-This filter creates tokens using the http://commons.apache.org/proper/commons-codec/archives/{ivy-commons-codec-version}/apidocs/org/apache/commons/codec/language/DoubleMetaphone.html[`DoubleMetaphone`] encoding algorithm from commons-codec. For more information, see the <<phonetic-matching.adoc#,Phonetic Matching>> section.
+This filter creates tokens using the http://commons.apache.org/proper/commons-codec/archives/{ivy-commons-codec-version}/apidocs/org/apache/commons/codec/language/DoubleMetaphone.html[`DoubleMetaphone`] encoding algorithm from commons-codec.
+For more information, see the <<phonetic-matching.adoc#,Phonetic Matching>> section.
 
 *Factory class:* `solr.DoubleMetaphoneFilterFactory`
 
 *Arguments:*
 
-`inject`:: (true/false) If true (the default), then new phonetic tokens are added to the stream. Otherwise, tokens are replaced with the phonetic equivalent. Setting this to false will enable phonetic matching, but the exact spelling of the target word may not match.
+`inject`:: (true/false) If true (the default), then new phonetic tokens are added to the stream.
+Otherwise, tokens are replaced with the phonetic equivalent.
+Setting this to false will enable phonetic matching, but the exact spelling of the target word may not match.
 
 `maxCodeLength`:: (integer) The maximum length of the code to be generated.
 
@@ -436,7 +454,8 @@ Default behavior for inject (true): keep the original token and add phonetic tok
 
 *Out:* "four"(1), "FR"(1), "score"(2), "SKR"(2), "and"(3), "ANT"(3), "Kuczewski"(4), "KSSK"(4), "KXFS"(4)
 
-The phonetic tokens have a position increment of 0, which indicates that they are at the same position as the token they were derived from (immediately preceding). Note that "Kuczewski" has two encodings, which are added at the same position.
+The phonetic tokens have a position increment of 0, which indicates that they are at the same position as the token they were derived from (immediately preceding).
+Note that "Kuczewski" has two encodings, which are added at the same position.
 
 *Example:*
 
@@ -466,7 +485,8 @@ This filter adds a numeric floating point boost value to tokens, splitting on a
 
 *Arguments:*
 
-`delimiter`:: The character used to separate the token and the boost. Defaults to '|'.
+`delimiter`:: The character used to separate the token and the boost.
+Defaults to '|'.
 
 *Example:*
 
@@ -672,7 +692,8 @@ This filter stems plural English words to their singular form.
 
 == English Possessive Filter
 
-This filter removes singular possessives (trailing *'s*) from words. Note that plural possessives, e.g., the *s'* in "divers' snorkels", are not removed by this filter.
+This filter removes singular possessives (trailing *'s*) from words.
+Note that plural possessives, e.g., the *s'* in "divers' snorkels", are not removed by this filter.
 
 *Factory class:* `solr.EnglishPossessiveFilterFactory`
 
@@ -714,15 +735,19 @@ This filter removes singular possessives (trailing *'s*) from words. Note that p
 
 == Fingerprint Filter
 
-This filter outputs a single token which is a concatenation of the sorted and de-duplicated set of input tokens. This can be useful for clustering/linking use cases.
+This filter outputs a single token which is a concatenation of the sorted and de-duplicated set of input tokens.
+This can be useful for clustering/linking use cases.
 
 *Factory class:* `solr.FingerprintFilterFactory`
 
 *Arguments:*
 
-`separator`:: The character used to separate tokens combined into the single output token. Defaults to " " (a space character).
+`separator`:: The character used to separate tokens combined into the single output token.
+Defaults to " " (a space character).
 
-`maxOutputTokenSize`:: The maximum length of the summarized output token. If exceeded, no output token is emitted. Defaults to 1024.
+`maxOutputTokenSize`:: The maximum length of the summarized output token.
+If exceeded, no output token is emitted.
+Defaults to 1024.
 
 *Example:*
 
@@ -770,9 +795,13 @@ See the examples below for <<Synonym Graph Filter>> and <<Word Delimiter Graph F
 
 == Hunspell Stem Filter
 
-The `Hunspell Stem Filter` provides support for several languages. You must provide the dictionary (`.dic`) and rules (`.aff`) files for each language you wish to use with the Hunspell Stem Filter. You can download those language files http://wiki.services.openoffice.org/wiki/Dictionaries[here].
+The `Hunspell Stem Filter` provides support for several languages.
+You must provide the dictionary (`.dic`) and rules (`.aff`) files for each language you wish to use with the Hunspell Stem Filter.
+You can download those language files http://wiki.services.openoffice.org/wiki/Dictionaries[here].
 
-Be aware that your results will vary widely based on the quality of the provided dictionary and rules files. For example, some languages have only a minimal word list with no morphological information. On the other hand, for languages that have no stemmer but do have an extensive dictionary file, the Hunspell stemmer may be a good choice.
+Be aware that your results will vary widely based on the quality of the provided dictionary and rules files.
+For example, some languages have only a minimal word list with no morphological information.
+On the other hand, for languages that have no stemmer but do have an extensive dictionary file, the Hunspell stemmer may be a good choice.
 
 *Factory class:* `solr.HunspellStemFilterFactory`
 
@@ -782,9 +811,12 @@ Be aware that your results will vary widely based on the quality of the provided
 
 `affix`:: (required) The path of a rules file.
 
-`ignoreCase`:: (boolean) controls whether matching is case sensitive or not. The default is false.
+`ignoreCase`:: (boolean) controls whether matching is case sensitive or not.
+The default is false.
 
-`strictAffixParsing`:: (boolean) controls whether the affix parsing is strict or not. If true, an error while reading an affix rule causes a ParseException, otherwise is ignored. The default is true.
+`strictAffixParsing`:: (boolean) controls whether the affix parsing is strict or not.
+If true, an error while reading an affix rule causes a ParseException, otherwise is ignored.
+The default is true.
 
 *Example:*
 
@@ -830,9 +862,11 @@ Be aware that your results will vary widely based on the quality of the provided
 
 == Hyphenated Words Filter
 
-This filter reconstructs hyphenated words that have been tokenized as two tokens because of a line break or other intervening whitespace in the field test. If a token ends with a hyphen, it is joined with the following token and the hyphen is discarded.
+This filter reconstructs hyphenated words that have been tokenized as two tokens because of a line break or other intervening whitespace in the field test.
+If a token ends with a hyphen, it is joined with the following token and the hyphen is discarded.
 
-Note that for this filter to work properly, the upstream tokenizer must not remove trailing hyphen characters. This filter is generally only useful at index time.
+Note that for this filter to work properly, the upstream tokenizer must not remove trailing hyphen characters.
+This filter is generally only useful at index time.
 
 *Factory class:* `solr.HyphenatedWordsFilterFactory`
 
@@ -876,13 +910,15 @@ Note that for this filter to work properly, the upstream tokenizer must not remo
 
 This filter is a custom Unicode normalization form that applies the foldings specified in http://www.unicode.org/reports/tr30/tr30-4.html[Unicode TR #30: Character Foldings] in addition to the `NFKC_Casefold` normalization form as described in <<ICU Normalizer 2 Filter>>. This filter is a better substitute for the combined behavior of the <<ASCII Folding Filter>>, <<Lower Case Filter>>, and <<ICU Normalizer 2 Filter>>.
 
-To use this filter, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>). See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
+To use this filter, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
 
 *Factory class:* `solr.ICUFoldingFilterFactory`
 
 *Arguments:*
 
-`filter`:: (string, optional) A Unicode set filter that can be used to e.g., exclude a set of characters from being processed. See the http://icu-project.org/apiref/icu4j/com/ibm/icu/text/UnicodeSet.html[UnicodeSet javadocs] for more information.
+`filter`:: (string, optional) A Unicode set filter that can be used to e.g., exclude a set of characters from being processed.
+See the http://icu-project.org/apiref/icu4j/com/ibm/icu/text/UnicodeSet.html[UnicodeSet javadocs] for more information.
 
 *Example without a filter:*
 
@@ -932,17 +968,24 @@ This filter factory normalizes text according to one of five Unicode Normalizati
 * NFD: (`name="nfc" mode="decompose"`) Normalization Form D, canonical decomposition, followed by canonical composition
 * NFKC: (`name="nfkc" mode="compose"`) Normalization Form KC, compatibility decomposition
 * NFKD: (`name="nfkc" mode="decompose"`) Normalization Form KD, compatibility decomposition, followed by canonical composition
-* NFKC_Casefold: (`name="nfkc_cf" mode="compose"`) Normalization Form KC, with additional Unicode case folding. Using the ICU Normalizer 2 Filter is a better-performing substitution for the <<Lower Case Filter>> and NFKC normalization.
+* NFKC_Casefold: (`name="nfkc_cf" mode="compose"`) Normalization Form KC, with additional Unicode case folding.
+Using the ICU Normalizer 2 Filter is a better-performing substitution for the <<Lower Case Filter>> and NFKC normalization.
 
 *Factory class:* `solr.ICUNormalizer2FilterFactory`
 
 *Arguments:*
 
-`form`:: The name of the normalization form. Valid options are `nfc`, `nfd`, `nfkc`, `nfkd`, or `nfkc_cf` (the default). Required.
+`form`:: The name of the normalization form.
+Valid options are `nfc`, `nfd`, `nfkc`, `nfkd`, or `nfkc_cf` (the default).
+Required.
 
-`mode`:: The mode of Unicode character composition and decomposition. Valid options are: `compose` (the default) or `decompose`. Required.
+`mode`:: The mode of Unicode character composition and decomposition.
+Valid options are: `compose` (the default) or `decompose`.
+Required.
 
-`filter`:: A Unicode set filter that can be used to e.g., exclude a set of characters from being processed. See the http://icu-project.org/apiref/icu4j/com/ibm/icu/text/UnicodeSet.html[UnicodeSet javadocs] for more information. Optional.
+`filter`:: A Unicode set filter that can be used to e.g., exclude a set of characters from being processed.
+See the http://icu-project.org/apiref/icu4j/com/ibm/icu/text/UnicodeSet.html[UnicodeSet javadocs] for more information.
+Optional.
 
 *Example with NFKC_Casefold:*
 
@@ -984,17 +1027,21 @@ This filter factory normalizes text according to one of five Unicode Normalizati
 
 For detailed information about these normalization forms, see http://unicode.org/reports/tr15/[Unicode Normalization Forms].
 
-To use this filter, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>). See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
+To use this filter, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
 
 == ICU Transform Filter
 
-This filter applies http://userguide.icu-project.org/transforms/general[ICU Tranforms] to text. This filter supports only ICU System Transforms. Custom rule sets are not supported.
+This filter applies http://userguide.icu-project.org/transforms/general[ICU Tranforms] to text.
+This filter supports only ICU System Transforms.
+Custom rule sets are not supported.
 
 *Factory class:* `solr.ICUTransformFilterFactory`
 
 *Arguments:*
 
-`id`:: (string) The identifier for the ICU System Transform you wish to apply with this filter. For a full list of ICU System Transforms, see http://demo.icu-project.org/icu-bin/translit?TEMPLATE_FILE=data/translit_rule_main.html.
+`id`:: (string) The identifier for the ICU System Transform you wish to apply with this filter.
+For a full list of ICU System Transforms, see http://demo.icu-project.org/icu-bin/translit?TEMPLATE_FILE=data/translit_rule_main.html.
 
 *Example:*
 
@@ -1026,21 +1073,29 @@ This filter applies http://userguide.icu-project.org/transforms/general[ICU Tran
 
 For detailed information about ICU Transforms, see http://userguide.icu-project.org/transforms/general.
 
-To use this filter, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>). See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
+To use this filter, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
 
 == Keep Word Filter
 
-This filter discards all tokens except those that are listed in the given word list. This is the inverse of the Stop Words Filter. This filter can be useful for building specialized indices for a constrained set of terms.
+This filter discards all tokens except those that are listed in the given word list.
+This is the inverse of the Stop Words Filter.
+This filter can be useful for building specialized indices for a constrained set of terms.
 
 *Factory class:* `solr.KeepWordFilterFactory`
 
 *Arguments:*
 
-`words`:: (required) Path of a text file containing the list of keep words, one per line. Blank lines and lines that begin with "#" are ignored. This may be an absolute path, or a simple filename in the Solr `conf` directory.
+`words`:: (required) Path of a text file containing the list of keep words, one per line.
+Blank lines and lines that begin with "#" are ignored.
+This may be an absolute path, or a simple filename in the Solr `conf` directory.
 
-`ignoreCase`:: (true/false) If *true* then comparisons are done case-insensitively. If this argument is true, then the words file is assumed to contain only lowercase words. The default is *false*.
+`ignoreCase`:: (true/false) If *true* then comparisons are done case-insensitively.
+If this argument is true, then the words file is assumed to contain only lowercase words.
+The default is *false*.
 
-`enablePositionIncrements`:: if `luceneMatchVersion` is `4.3` or earlier and `enablePositionIncrements="false"`, no position holes will be left by this filter when it removes tokens. *This argument is invalid if `luceneMatchVersion` is `5.0` or later.*
+`enablePositionIncrements`:: if `luceneMatchVersion` is `4.3` or earlier and `enablePositionIncrements="false"`, no position holes will be left by this filter when it removes tokens.
+*This argument is invalid if `luceneMatchVersion` is `5.0` or later.*
 
 *Example:*
 
@@ -1121,7 +1176,9 @@ Using LowerCaseFilterFactory before filtering for keep words, no `ignoreCase` fl
 
 == KStem Filter
 
-KStem is an alternative to the Porter Stem Filter for developers looking for a less aggressive stemmer. KStem was written by Bob Krovetz, ported to Lucene by Sergio Guzman-Lara (UMASS Amherst). This stemmer is only appropriate for English language text.
+KStem is an alternative to the Porter Stem Filter for developers looking for a less aggressive stemmer.
+KStem was written by Bob Krovetz, ported to Lucene by Sergio Guzman-Lara (UMASS Amherst).
+This stemmer is only appropriate for English language text.
 
 *Factory class:* `solr.KStemFilterFactory`
 
@@ -1163,17 +1220,21 @@ KStem is an alternative to the Porter Stem Filter for developers looking for a l
 
 == Length Filter
 
-This filter passes tokens whose length falls within the min/max limit specified. All other tokens are discarded.
+This filter passes tokens whose length falls within the min/max limit specified.
+All other tokens are discarded.
 
 *Factory class:* `solr.LengthFilterFactory`
 
 *Arguments:*
 
-`min`:: (integer, required) Minimum token length. Tokens shorter than this are discarded.
+`min`:: (integer, required) Minimum token length.
+Tokens shorter than this are discarded.
 
-`max`:: (integer, required, must be >= min) Maximum token length. Tokens longer than this are discarded.
+`max`:: (integer, required, must be >= min) Maximum token length.
+Tokens longer than this are discarded.
 
-`enablePositionIncrements`:: if `luceneMatchVersion` is `4.3` or earlier and `enablePositionIncrements="false"`, no position holes will be left by this filter when it removes tokens. *This argument is invalid if `luceneMatchVersion` is `5.0` or later.*
+`enablePositionIncrements`:: if `luceneMatchVersion` is `4.3` or earlier and `enablePositionIncrements="false"`, no position holes will be left by this filter when it removes tokens.
+*This argument is invalid if `luceneMatchVersion` is `5.0` or later.*
 
 *Example:*
 
@@ -1213,15 +1274,19 @@ This filter passes tokens whose length falls within the min/max limit specified.
 
 This filter limits the number of accepted tokens, typically useful for index analysis.
 
-By default, this filter ignores any tokens in the wrapped `TokenStream` once the limit has been reached, which can result in `reset()` being called prior to `incrementToken()` returning `false`. For most `TokenStream` implementations this should be acceptable, and faster then consuming the full stream. If you are wrapping a `TokenStream` which requires that the full stream of tokens be exhausted in order to function properly, use the `consumeAllTokens="true"` option.
+By default, this filter ignores any tokens in the wrapped `TokenStream` once the limit has been reached, which can result in `reset()` being called prior to `incrementToken()` returning `false`.
+For most `TokenStream` implementations this should be acceptable, and faster then consuming the full stream.
+If you are wrapping a `TokenStream` which requires that the full stream of tokens be exhausted in order to function properly, use the `consumeAllTokens="true"` option.
 
 *Factory class:* `solr.LimitTokenCountFilterFactory`
 
 *Arguments:*
 
-`maxTokenCount`:: (integer, required) Maximum token count. After this limit has been reached, tokens are discarded.
+`maxTokenCount`:: (integer, required) Maximum token count.
+After this limit has been reached, tokens are discarded.
 
-`consumeAllTokens`:: (boolean, defaults to false) Whether to consume (and discard) previous token filters' tokens after the maximum token count has been reached. See description above.
+`consumeAllTokens`:: (boolean, defaults to false) Whether to consume (and discard) previous token filters' tokens after the maximum token count has been reached.
+See description above.
 
 *Example:*
 
@@ -1261,17 +1326,22 @@ By default, this filter ignores any tokens in the wrapped `TokenStream` once the
 
 == Limit Token Offset Filter
 
-This filter limits tokens to those before a configured maximum start character offset. This can be useful to limit highlighting, for example.
+This filter limits tokens to those before a configured maximum start character offset.
+This can be useful to limit highlighting, for example.
 
-By default, this filter ignores any tokens in the wrapped `TokenStream` once the limit has been reached, which can result in `reset()` being called prior to `incrementToken()` returning `false`. For most `TokenStream` implementations this should be acceptable, and faster then consuming the full stream. If you are wrapping a `TokenStream` which requires that the full stream of tokens be exhausted in order to function properly, use the `consumeAllTokens="true"` option.
+By default, this filter ignores any tokens in the wrapped `TokenStream` once the limit has been reached, which can result in `reset()` being called prior to `incrementToken()` returning `false`.
+For most `TokenStream` implementations this should be acceptable, and faster then consuming the full stream.
+If you are wrapping a `TokenStream` which requires that the full stream of tokens be exhausted in order to function properly, use the `consumeAllTokens="true"` option.
 
 *Factory class:* `solr.LimitTokenOffsetFilterFactory`
 
 *Arguments:*
 
-`maxStartOffset`:: (integer, required) Maximum token start character offset. After this limit has been reached, tokens are discarded.
+`maxStartOffset`:: (integer, required) Maximum token start character offset.
+After this limit has been reached, tokens are discarded.
 
-`consumeAllTokens`:: (boolean, defaults to false) Whether to consume (and discard) previous token filters' tokens after the maximum start offset has been reached. See description above.
+`consumeAllTokens`:: (boolean, defaults to false) Whether to consume (and discard) previous token filters' tokens after the maximum start offset has been reached.
+See description above.
 
 *Example:*
 
@@ -1313,15 +1383,19 @@ By default, this filter ignores any tokens in the wrapped `TokenStream` once the
 
 This filter limits tokens to those before a configured maximum token position.
 
-By default, this filter ignores any tokens in the wrapped `TokenStream` once the limit has been reached, which can result in `reset()` being called prior to `incrementToken()` returning `false`. For most `TokenStream` implementations this should be acceptable, and faster then consuming the full stream. If you are wrapping a `TokenStream` which requires that the full stream of tokens be exhausted in order to function properly, use the `consumeAllTokens="true"` option.
+By default, this filter ignores any tokens in the wrapped `TokenStream` once the limit has been reached, which can result in `reset()` being called prior to `incrementToken()` returning `false`.
+For most `TokenStream` implementations this should be acceptable, and faster then consuming the full stream.
+If you are wrapping a `TokenStream` which requires that the full stream of tokens be exhausted in order to function properly, use the `consumeAllTokens="true"` option.
 
 *Factory class:* `solr.LimitTokenPositionFilterFactory`
 
 *Arguments:*
 
-`maxTokenPosition`:: (integer, required) Maximum token position. After this limit has been reached, tokens are discarded.
+`maxTokenPosition`:: (integer, required) Maximum token position.
+After this limit has been reached, tokens are discarded.
 
-`consumeAllTokens`:: (boolean, defaults to false) Whether to consume (and discard) previous token filters' tokens after the maximum start offset has been reached. See description above.
+`consumeAllTokens`:: (boolean, defaults to false) Whether to consume (and discard) previous token filters' tokens after the maximum start offset has been reached.
+See description above.
 
 *Example:*
 
@@ -1361,7 +1435,8 @@ By default, this filter ignores any tokens in the wrapped `TokenStream` once the
 
 == Lower Case Filter
 
-Converts any uppercase letters in a token to the equivalent lowercase token. All other characters are left unchanged.
+Converts any uppercase letters in a token to the equivalent lowercase token.
+All other characters are left unchanged.
 
 *Factory class:* `solr.LowerCaseFilterFactory`
 
@@ -1459,7 +1534,8 @@ For arguments and examples, see the <<Synonym Graph Filter>> below.
 
 This is specialized version of the <<Synonym Graph Filter>> that uses a mapping on synonyms that is <<managed-resources.adoc#,managed from a REST API.>>
 
-This filter maps single- or multi-token synonyms, producing a fully correct graph output. This filter is a replacement for the Managed Synonym Filter, which produces incorrect graphs for multi-token synonyms.
+This filter maps single- or multi-token synonyms, producing a fully correct graph output.
+This filter is a replacement for the Managed Synonym Filter, which produces incorrect graphs for multi-token synonyms.
 
 NOTE: Although this filter produces correct token graphs, it cannot consume an input token graph correctly.
 
@@ -1511,7 +1587,8 @@ See <<Synonym Graph Filter>> below for example input/output.
 
 == N-Gram Filter
 
-Generates n-gram tokens of sizes in the given range. Note that tokens are ordered by position and then by gram size.
+Generates n-gram tokens of sizes in the given range.
+Note that tokens are ordered by position and then by gram size.
 
 *Factory class:* `solr.NGramFilterFactory`
 
@@ -1615,7 +1692,8 @@ Preserve original term.
 
 == Numeric Payload Token Filter
 
-This filter adds a numeric floating point payload value to tokens that match a given type. Refer to the Javadoc for the `org.apache.lucene.analysis.Token` class for more information about token types and payloads.
+This filter adds a numeric floating point payload value to tokens that match a given type.
+Refer to the Javadoc for the `org.apache.lucene.analysis.Token` class for more information about token types and payloads.
 
 *Factory class:* `solr.NumericPayloadTokenFilterFactory`
 
@@ -1623,7 +1701,8 @@ This filter adds a numeric floating point payload value to tokens that match a g
 
 `payload`:: (required) A floating point value that will be added to all matching tokens.
 
-`typeMatch`:: (required) A token type name string. Tokens with a matching type name will have their payload set to the above floating point value.
+`typeMatch`:: (required) A token type name string.
+Tokens with a matching type name will have their payload set to the above floating point value.
 
 *Example:*
 
@@ -1661,7 +1740,8 @@ This filter adds a numeric floating point payload value to tokens that match a g
 
 == Pattern Replace Filter
 
-This filter applies a regular expression to each token and, for those that match, substitutes the given replacement string in place of the matched pattern. Tokens which do not match are passed though unchanged.
+This filter applies a regular expression to each token and, for those that match, substitutes the given replacement string in place of the matched pattern.
+Tokens which do not match are passed though unchanged.
 
 *Factory class:* `solr.PatternReplaceFilterFactory`
 
@@ -1669,7 +1749,9 @@ This filter applies a regular expression to each token and, for those that match
 
 `pattern`:: (required) The regular expression to test against each token, as per `java.util.regex.Pattern`.
 
-`replacement`:: (required) A string to substitute in place of the matched pattern. This string may contain references to capture groups in the regex pattern. See the Javadoc for `java.util.regex.Matcher`.
+`replacement`:: (required) A string to substitute in place of the matched pattern.
+This string may contain references to capture groups in the regex pattern.
+See the Javadoc for `java.util.regex.Matcher`.
 
 `replace`:: ("all" or "first", default "all") Indicates whether all occurrences of the pattern in the token should be replaced, or only the first.
 
@@ -1729,7 +1811,9 @@ String replacement, first occurrence only:
 
 *Example:*
 
-More complex pattern with capture group reference in the replacement. Tokens that start with non-numeric characters and end with digits will have an underscore inserted before the numbers. Otherwise the token is passed through.
+More complex pattern with capture group reference in the replacement.
+Tokens that start with non-numeric characters and end with digits will have an underscore inserted before the numbers.
+Otherwise the token is passed through.
 
 [source,xml]
 ----
@@ -1747,15 +1831,19 @@ More complex pattern with capture group reference in the replacement. Tokens tha
 
 == Phonetic Filter
 
-This filter creates tokens using one of the phonetic encoding algorithms in the `org.apache.commons.codec.language` package. For more information, see the section on <<phonetic-matching.adoc#,Phonetic Matching>>.
+This filter creates tokens using one of the phonetic encoding algorithms in the `org.apache.commons.codec.language` package.
+For more information, see the section on <<phonetic-matching.adoc#,Phonetic Matching>>.
 
 *Factory class:* `solr.PhoneticFilterFactory`
 
 *Arguments:*
 
-`encoder`:: (required) The name of the encoder to use. The encoder name must be one of the following (case insensitive): `http://commons.apache.org/proper/commons-codec/archives/{ivy-commons-codec-version}/apidocs/org/apache/commons/codec/language/DoubleMetaphone.html[DoubleMetaphone]`, `http://commons.apache.org/proper/commons-codec/archives/{ivy-commons-codec-version}/apidocs/org/apache/commons/codec/language/Metaphone.html[Metaphone]`, `http://commons.apache.org/proper/commons-codec/a [...]
+`encoder`:: (required) The name of the encoder to use.
+The encoder name must be one of the following (case insensitive): `http://commons.apache.org/proper/commons-codec/archives/{ivy-commons-codec-version}/apidocs/org/apache/commons/codec/language/DoubleMetaphone.html[DoubleMetaphone]`, `http://commons.apache.org/proper/commons-codec/archives/{ivy-commons-codec-version}/apidocs/org/apache/commons/codec/language/Metaphone.html[Metaphone]`, `http://commons.apache.org/proper/commons-codec/archives/{ivy-commons-codec-version}/apidocs/org/apache/ [...]
 
-`inject`:: (true/false) If true (the default), then new phonetic tokens are added to the stream. Otherwise, tokens are replaced with the phonetic equivalent. Setting this to false will enable phonetic matching, but the exact spelling of the target word may not match.
+`inject`:: (true/false) If true (the default), then new phonetic tokens are added to the stream.
+Otherwise, tokens are replaced with the phonetic equivalent.
+Setting this to false will enable phonetic matching, but the exact spelling of the target word may not match.
 
 `maxCodeLength`:: (integer) The maximum length of the code to be generated by the Metaphone or Double Metaphone encoders.
 
@@ -1835,7 +1923,11 @@ Default Soundex encoder.
 
 == Porter Stem Filter
 
-This filter applies the Porter Stemming Algorithm for English. The results are similar to using the Snowball Porter Stemmer with the `language="English"` argument. But this stemmer is coded directly in Java and is not based on Snowball. It does not accept a list of protected words and is only appropriate for English language text. However, it has been benchmarked as http://markmail.org/thread/d2c443z63z37rwf6[four times faster] than the English Snowball stemmer, so can provide a performa [...]
+This filter applies the Porter Stemming Algorithm for English.
+The results are similar to using the Snowball Porter Stemmer with the `language="English"` argument.
+But this stemmer is coded directly in Java and is not based on Snowball.
+It does not accept a list of protected words and is only appropriate for English language text.
+However, it has been benchmarked as http://markmail.org/thread/d2c443z63z37rwf6[four times faster] than the English Snowball stemmer, so can provide a performance enhancement.
 
 *Factory class:* `solr.PorterStemFilterFactory`
 
@@ -1885,9 +1977,11 @@ This filter enables a form of conditional filtering: it only applies its wrapped
 
 `protected`:: (required) Comma-separated list of files containing protected terms, one per line.
 
-`wrappedFilters`:: (required) Case-insensitive comma-separated list of `TokenFilterFactory` SPI names (strip trailing `(Token)FilterFactory` from the factory name - see the https://docs.oracle.com/javase/8/docs/api/java/util/ServiceLoader.html[java.util.ServiceLoader interface]).  Each filter name must be unique, so if you need to specify the same filter more than once, you must add case-insensitive unique `-id` suffixes to each same-SPI-named filter (note that the `-id` suffix is stripp [...]
+`wrappedFilters`:: (required) Case-insensitive comma-separated list of `TokenFilterFactory` SPI names (strip trailing `(Token)FilterFactory` from the factory name - see the https://docs.oracle.com/javase/8/docs/api/java/util/ServiceLoader.html[java.util.ServiceLoader interface]).
+Each filter name must be unique, so if you need to specify the same filter more than once, you must add case-insensitive unique `-id` suffixes to each same-SPI-named filter (note that the `-id` suffix is stripped prior to SPI lookup).
 
-`ignoreCase`:: (true/false, default false) Ignore case when testing for protected words. If true, the protected list should contain lowercase words.
+`ignoreCase`:: (true/false, default false) Ignore case when testing for protected words.
+If true, the protected list should contain lowercase words.
 
 *Example:*
 
@@ -1927,7 +2021,8 @@ All terms except those in `protectedTerms.txt` are truncated at 4 characters and
 
 *Example:*
 
-This example includes multiple same-named wrapped filters with unique `-id` suffixes.  Note that both the filter SPI names and `-id` suffixes are treated case-insensitively.
+This example includes multiple same-named wrapped filters with unique `-id` suffixes.
+Note that both the filter SPI names and `-id` suffixes are treated case-insensitively.
 
 For all terms except those in `protectedTerms.txt`, synonyms are added, terms are reversed, and then synonyms are added for the reversed terms:
 
@@ -1945,9 +2040,12 @@ For all terms except those in `protectedTerms.txt`, synonyms are added, terms ar
 
 == Remove Duplicates Token Filter
 
-The filter removes duplicate tokens in the stream. Tokens are considered to be duplicates ONLY if they have the same text and position values.
+The filter removes duplicate tokens in the stream.
+Tokens are considered to be duplicates ONLY if they have the same text and position values.
 
-Because positions must be the same, this filter might not do what a user expects it to do based on its name. It is a very specialized filter that is only useful in very specific circumstances. It has been so named for brevity, even though it is potentially misleading.
+Because positions must be the same, this filter might not do what a user expects it to do based on its name.
+It is a very specialized filter that is only useful in very specific circumstances.
+It has been so named for brevity, even though it is potentially misleading.
 
 *Factory class:* `solr.RemoveDuplicatesTokenFilterFactory`
 
@@ -1955,7 +2053,8 @@ Because positions must be the same, this filter might not do what a user expects
 
 *Example:*
 
-One example of where `RemoveDuplicatesTokenFilterFactory` is useful in situations where a synonym file is being used in conjunction with a stemmer. In these situations, both the stemmer and the synonym filter can cause completely identical terms with the same positions to end up in the stream, increasing index size with no benefit.
+One example of where `RemoveDuplicatesTokenFilterFactory` is useful in situations where a synonym file is being used in conjunction with a stemmer.
+In these situations, both the stemmer and the synonym filter can cause completely identical terms with the same positions to end up in the stream, increasing index size with no benefit.
 
 Consider the following entry from a `synonyms.txt` file:
 
@@ -2008,21 +2107,26 @@ When used in the following configuration:
 
 == Reversed Wildcard Filter
 
-This filter reverses tokens to provide faster leading wildcard and prefix queries. Tokens without wildcards are not reversed.
+This filter reverses tokens to provide faster leading wildcard and prefix queries.
+Tokens without wildcards are not reversed.
 
 *Factory class:* `solr.ReversedWildcardFilterFactory`
 
 *Arguments:*
 
-`withOriginal`:: (boolean) If true, the filter produces both original and reversed tokens at the same positions. If false, produces only reversed tokens.
+`withOriginal`:: (boolean) If true, the filter produces both original and reversed tokens at the same positions.
+If false, produces only reversed tokens.
 
-`maxPosAsterisk`:: (integer, default = 2) The maximum position of the asterisk wildcard ('*') that triggers the reversal of the query term. Terms with asterisks at positions above this value are not reversed.
+`maxPosAsterisk`:: (integer, default = 2) The maximum position of the asterisk wildcard ('*') that triggers the reversal of the query term.
+Terms with asterisks at positions above this value are not reversed.
 
-`maxPosQuestion`:: (integer, default = 1) The maximum position of the question mark wildcard ('?') that triggers the reversal of query term. To reverse only pure suffix queries (queries with a single leading asterisk), set this to 0 and `maxPosAsterisk` to 1.
+`maxPosQuestion`:: (integer, default = 1) The maximum position of the question mark wildcard ('?') that triggers the reversal of query term.
+To reverse only pure suffix queries (queries with a single leading asterisk), set this to 0 and `maxPosAsterisk` to 1.
 
 `maxFractionAsterisk`:: (float, default = 0.0) An additional parameter that triggers the reversal if asterisk ('*') position is less than this fraction of the query token length.
 
-`minTrailing`:: (integer, default = 2) The minimum number of trailing characters in a query token after the last wildcard character. For good performance this should be set to a value larger than 1.
+`minTrailing`:: (integer, default = 2) The minimum number of trailing characters in a query token after the last wildcard character.
+For good performance this should be set to a value larger than 1.
 
 *Example:*
 
@@ -2062,7 +2166,8 @@ This filter reverses tokens to provide faster leading wildcard and prefix querie
 
 == Shingle Filter
 
-This filter constructs shingles, which are token n-grams, from the token stream. It combines runs of tokens into a single token.
+This filter constructs shingles, which are token n-grams, from the token stream.
+It combines runs of tokens into a single token.
 
 *Factory class:* `solr.ShingleFilterFactory`
 
@@ -2134,9 +2239,13 @@ A shingle size of four, do not include original token.
 
 == Snowball Porter Stemmer Filter
 
-This filter factory instantiates a language-specific stemmer generated by Snowball. Snowball is a software package that generates pattern-based word stemmers. This type of stemmer is not as accurate as a table-based stemmer, but is faster and less complex. Table-driven stemmers are labor intensive to create and maintain and so are typically commercial products.
+This filter factory instantiates a language-specific stemmer generated by Snowball.
+Snowball is a software package that generates pattern-based word stemmers.
+This type of stemmer is not as accurate as a table-based stemmer, but is faster and less complex.
+Table-driven stemmers are labor intensive to create and maintain and so are typically commercial products.
 
-Solr contains Snowball stemmers for Armenian, Basque, Catalan, Danish, Dutch, English, Finnish, French, German, Hungarian, Italian, Norwegian, Portuguese, Romanian, Russian, Spanish, Swedish and Turkish. For more information on Snowball, visit http://snowball.tartarus.org/.
+Solr contains Snowball stemmers for Armenian, Basque, Catalan, Danish, Dutch, English, Finnish, French, German, Hungarian, Italian, Norwegian, Portuguese, Romanian, Russian, Spanish, Swedish and Turkish.
+For more information on Snowball, visit http://snowball.tartarus.org/.
 
 `StopFilterFactory`, `CommonGramsFilterFactory`, and `CommonGramsQueryFilterFactory` can optionally read stopwords in Snowball format (specify `format="snowball"` in the configuration of those FilterFactories).
 
@@ -2144,9 +2253,14 @@ Solr contains Snowball stemmers for Armenian, Basque, Catalan, Danish, Dutch, En
 
 *Arguments:*
 
-`language`:: (default "English") The name of a language, used to select the appropriate Porter stemmer to use. Case is significant. This string is used to select a package name in the `org.tartarus.snowball.ext` class hierarchy.
+`language`:: (default "English") The name of a language, used to select the appropriate Porter stemmer to use.
+Case is significant.
+This string is used to select a package name in the `org.tartarus.snowball.ext` class hierarchy.
 
-`protected`:: Path of a text file containing a list of protected words, one per line. Protected words will not be stemmed. Blank lines and lines that begin with "#" are ignored. This may be an absolute path, or a simple file name in the Solr `conf` directory.
+`protected`:: Path of a text file containing a list of protected words, one per line.
+Protected words will not be stemmed.
+Blank lines and lines that begin with "#" are ignored.
+This may be an absolute path, or a simple file name in the Solr `conf` directory.
 
 *Example:*
 
@@ -2222,23 +2336,29 @@ Spanish stemmer, Spanish words:
 
 == Stop Filter
 
-This filter discards, or _stops_ analysis of, tokens that are on the given stop words list. A standard stop words list is included in the Solr `conf` directory, named `stopwords.txt`, which is appropriate for typical English language text.
+This filter discards, or _stops_ analysis of, tokens that are on the given stop words list.
+A standard stop words list is included in the Solr `conf` directory, named `stopwords.txt`, which is appropriate for typical English language text.
 
 *Factory class:* `solr.StopFilterFactory`
 
 *Arguments:*
 
-`words`:: (optional) The path to a file that contains a list of stop words, one per line. Blank lines and lines that begin with "#" are ignored. This may be an absolute path, or path relative to the Solr `conf` directory.
+`words`:: (optional) The path to a file that contains a list of stop words, one per line.
+Blank lines and lines that begin with "#" are ignored.
+This may be an absolute path, or path relative to the Solr `conf` directory.
 
 `format`:: (optional) If the stopwords list has been formatted for Snowball, you can specify `format="snowball"` so Solr can read the stopwords file.
 
-`ignoreCase`:: (true/false, default false) Ignore case when testing for stop words. If true, the stop list should contain lowercase words.
+`ignoreCase`:: (true/false, default false) Ignore case when testing for stop words.
+If true, the stop list should contain lowercase words.
 
-`enablePositionIncrements`:: if `luceneMatchVersion` is `4.4` or earlier and `enablePositionIncrements="false"`, no position holes will be left by this filter when it removes tokens. *This argument is invalid if `luceneMatchVersion` is `5.0` or later.*
+`enablePositionIncrements`:: if `luceneMatchVersion` is `4.4` or earlier and `enablePositionIncrements="false"`, no position holes will be left by this filter when it removes tokens.
+*This argument is invalid if `luceneMatchVersion` is `5.0` or later.*
 
 *Example:*
 
-Case-sensitive matching, capitalized words not stopped. Token positions skip stopped words.
+Case-sensitive matching, capitalized words not stopped.
+Token positions skip stopped words.
 
 [.dynamic-tabs]
 --
@@ -2292,9 +2412,11 @@ Case-sensitive matching, capitalized words not stopped. Token positions skip sto
 
 Like <<Stop Filter>>, this filter discards, or _stops_ analysis of, tokens that are on the given stop words list.
 
-Suggest Stop Filter differs from Stop Filter in that it will not remove the last token unless it is followed by a token separator. For example, a query `"find the"` would preserve the `'the'` since it was not followed by a space, punctuation, etc., and mark it as a `KEYWORD` so that following filters will not change or remove it.
+Suggest Stop Filter differs from Stop Filter in that it will not remove the last token unless it is followed by a token separator.
+For example, a query `"find the"` would preserve the `'the'` since it was not followed by a space, punctuation, etc., and mark it as a `KEYWORD` so that following filters will not change or remove it.
 
-By contrast, a query like "`find the popsicle`" would remove '`the`' as a stopword, since it's followed by a space. When using one of the analyzing suggesters, you would normally use the ordinary `StopFilterFactory` in your index analyzer and then SuggestStopFilter in your query analyzer.
+By contrast, a query like "`find the popsicle`" would remove '`the`' as a stopword, since it's followed by a space.
+When using one of the analyzing suggesters, you would normally use the ordinary `StopFilterFactory` in your index analyzer and then SuggestStopFilter in your query analyzer.
 
 *Factory class:* `solr.SuggestStopFilterFactory`
 
@@ -2302,11 +2424,15 @@ By contrast, a query like "`find the popsicle`" would remove '`the`' as a stopwo
 
 `words`:: (optional; default: {lucene-javadocs}/analysis/common/org/apache/lucene/analysis/core/StopAnalyzer.html[`StopAnalyzer#ENGLISH_STOP_WORDS_SET`] ) The name of a stopwords file to parse.
 
-`format`:: (optional; default: `wordset`) Defines how the words file will be parsed. If `words` is not specified, then `format` must not be specified. The valid values for the format option are:
+`format`:: (optional; default: `wordset`) Defines how the words file will be parsed.
+If `words` is not specified, then `format` must not be specified.
+The valid values for the format option are:
 
-`wordset`:: This is the default format, which supports one word per line (including any intra-word whitespace) and allows whole line comments beginning with the `#` character. Blank lines are ignored.
+`wordset`:: This is the default format, which supports one word per line (including any intra-word whitespace) and allows whole line comments beginning with the `#` character.
+Blank lines are ignored.
 
-`snowball`:: This format allows for multiple words specified on each line, and trailing comments may be specified using the vertical line (`|`). Blank lines are ignored.
+`snowball`:: This format allows for multiple words specified on each line, and trailing comments may be specified using the vertical line (`|`).
+Blank lines are ignored.
 
 `ignoreCase`:: (optional; default: *false*) If *true*, matching is case-insensitive.
 
@@ -2350,7 +2476,9 @@ By contrast, a query like "`find the popsicle`" would remove '`the`' as a stopwo
 
 == Synonym Filter
 
-This filter does synonym mapping. Each token is looked up in the list of synonyms and if a match is found, then the synonym is emitted in place of the token. The position value of the new tokens are set such they all occur at the same position as the original token.
+This filter does synonym mapping.
+Each token is looked up in the list of synonyms and if a match is found, then the synonym is emitted in place of the token.
+The position value of the new tokens are set such they all occur at the same position as the original token.
 
 .Synonym Filter has been Deprecated
 [WARNING]
@@ -2364,9 +2492,11 @@ For arguments and examples, see the Synonym Graph Filter below.
 
 == Synonym Graph Filter
 
-This filter maps single- or multi-token synonyms, producing a fully correct graph output. This filter is a replacement for the Synonym Filter, which produces incorrect graphs for multi-token synonyms.
+This filter maps single- or multi-token synonyms, producing a fully correct graph output.
+This filter is a replacement for the Synonym Filter, which produces incorrect graphs for multi-token synonyms.
 
-If you use this filter during indexing, you must follow it with a Flatten Graph Filter to squash tokens on top of one another like the Synonym Filter, because the indexer can't directly consume a graph. To get fully correct positional queries when your synonym replacements are multiple tokens, you should instead apply synonyms using this filter at query time.
+If you use this filter during indexing, you must follow it with a Flatten Graph Filter to squash tokens on top of one another like the Synonym Filter, because the indexer can't directly consume a graph.
+To get fully correct positional queries when your synonym replacements are multiple tokens, you should instead apply synonyms using this filter at query time.
 
 NOTE: Although this filter produces correct token graphs, it cannot consume an input token graph correctly.
 
@@ -2374,27 +2504,37 @@ NOTE: Although this filter produces correct token graphs, it cannot consume an i
 
 *Arguments:*
 
-`synonyms`:: (required) The path of a file that contains a list of synonyms, one per line. In the (default) `solr` format - see the `format` argument below for alternatives - blank lines and lines that begin with "`#`" are ignored. This may be a comma-separated list of paths.  See <<resource-loading.adoc#,Resource Loading>> for more information.
+`synonyms`:: (required) The path of a file that contains a list of synonyms, one per line.
+In the (default) `solr` format - see the `format` argument below for alternatives - blank lines and lines that begin with "`#`" are ignored.
+This may be a comma-separated list of paths.
+See <<resource-loading.adoc#,Resource Loading>> for more information.
 +
 There are two ways to specify synonym mappings:
 +
-* A comma-separated list of words. If the token matches any of the words, then all the words in the list are substituted, which will include the original token.
+* A comma-separated list of words.
+If the token matches any of the words, then all the words in the list are substituted, which will include the original token.
 +
-* Two comma-separated lists of words with the symbol "\=>" between them. If the token matches any word on the left, then the list on the right is substituted. The original token will not be included unless it is also in the list on the right.
+* Two comma-separated lists of words with the symbol "\=>" between them.
+If the token matches any word on the left, then the list on the right is substituted.
+The original token will not be included unless it is also in the list on the right.
 
 `ignoreCase`:: (optional; default: `false`) If `true`, synonyms will be matched case-insensitively.
 
-`expand`:: (optional; default: `true`) If `true`, a synonym will be expanded to all equivalent synonyms. If `false`, all equivalent synonyms will be reduced to the first in the list.
+`expand`:: (optional; default: `true`) If `true`, a synonym will be expanded to all equivalent synonyms.
+If `false`, all equivalent synonyms will be reduced to the first in the list.
 
-`format`:: (optional; default: `solr`) Controls how the synonyms will be parsed. The short names `solr` (for {lucene-javadocs}/analysis/common/org/apache/lucene/analysis/synonym/SolrSynonymParser.html[`SolrSynonymParser)`] and `wordnet` (for {lucene-javadocs}/analysis/common/org/apache/lucene/analysis/synonym/WordnetSynonymParser.html[`WordnetSynonymParser`] ) are supported, or you may alternatively supply the name of your own {lucene-javadocs}/analysis/common/org/apache/lucene/analysis/ [...]
+`format`:: (optional; default: `solr`) Controls how the synonyms will be parsed.
+The short names `solr` (for {lucene-javadocs}/analysis/common/org/apache/lucene/analysis/synonym/SolrSynonymParser.html[`SolrSynonymParser)`] and `wordnet` (for {lucene-javadocs}/analysis/common/org/apache/lucene/analysis/synonym/WordnetSynonymParser.html[`WordnetSynonymParser`] ) are supported, or you may alternatively supply the name of your own {lucene-javadocs}/analysis/common/org/apache/lucene/analysis/synonym/SynonymMap.Builder.html[`SynonymMap.Builder`] subclass.
 
-`tokenizerFactory`:: (optional; default: `WhitespaceTokenizerFactory`) The name of the tokenizer factory to use when parsing the synonyms file. Arguments with the name prefix `tokenizerFactory.*` will be supplied as init params to the specified tokenizer factory.
+`tokenizerFactory`:: (optional; default: `WhitespaceTokenizerFactory`) The name of the tokenizer factory to use when parsing the synonyms file.
+Arguments with the name prefix `tokenizerFactory.*` will be supplied as init params to the specified tokenizer factory.
 +
 Any arguments not consumed by the synonym filter factory, including those without the `tokenizerFactory.*` prefix, will also be supplied as init params to the tokenizer factory.
 +
 If `tokenizerFactory` is specified, then `analyzer` may not be, and vice versa.
 
-`analyzer`:: (optional; default: `WhitespaceTokenizerFactory`) The name of the analyzer class to use when parsing the synonyms file. If `analyzer` is specified, then `tokenizerFactory` may not be, and vice versa.
+`analyzer`:: (optional; default: `WhitespaceTokenizerFactory`) The name of the analyzer class to use when parsing the synonyms file.
+If `analyzer` is specified, then `tokenizerFactory` may not be, and vice versa.
 
 For the following examples, assume a synonyms file named `mysynonyms.txt`:
 
@@ -2535,13 +2675,15 @@ This filter adds the numeric character offsets of the token as a payload value f
 
 == Trim Filter
 
-This filter trims leading and/or trailing whitespace from tokens. Most tokenizers break tokens at whitespace, so this filter is most often used for special situations.
+This filter trims leading and/or trailing whitespace from tokens.
+Most tokenizers break tokens at whitespace, so this filter is most often used for special situations.
 
 *Factory class:* `solr.TrimFilterFactory`
 
 *Arguments:*
 
-`updateOffsets`:: if `luceneMatchVersion` is `4.3` or earlier and `updateOffsets="true"`, trimmed tokens' start and end offsets will be updated to those of the first and last characters (plus one) remaining in the token. *This argument is invalid if `luceneMatchVersion` is `5.0` or later.*
+`updateOffsets`:: if `luceneMatchVersion` is `4.3` or earlier and `updateOffsets="true"`, trimmed tokens' start and end offsets will be updated to those of the first and last characters (plus one) remaining in the token.
+*This argument is invalid if `luceneMatchVersion` is `5.0` or later.*
 
 *Example:*
 
@@ -2691,7 +2833,9 @@ With the example below, for a token "example.com" with type `<URL>`, the token e
 
 == Type Token Filter
 
-This filter blacklists or whitelists a specified list of token types, assuming the tokens have type metadata associated with them. For example, the <<tokenizers.adoc#uax29-url-email-tokenizer,UAX29 URL Email Tokenizer>> emits "<URL>" and "<EMAIL>" typed tokens, as well as other types. This filter would allow you to pull out only e-mail addresses from text as tokens, if you wish.
+This filter blacklists or whitelists a specified list of token types, assuming the tokens have type metadata associated with them.
+For example, the <<tokenizers.adoc#uax29-url-email-tokenizer,UAX29 URL Email Tokenizer>> emits "<URL>" and "<EMAIL>" typed tokens, as well as other types.
+This filter would allow you to pull out only e-mail addresses from text as tokens, if you wish.
 
 *Factory class:* `solr.TypeTokenFilterFactory`
 
@@ -2699,9 +2843,11 @@ This filter blacklists or whitelists a specified list of token types, assuming t
 
 `types`:: Defines the location of a file of types to filter.
 
-`useWhitelist`:: If *true*, the file defined in `types` should be used as include list. If *false*, or undefined, the file defined in `types` is used as a blacklist.
+`useWhitelist`:: If *true*, the file defined in `types` should be used as include list.
+If *false*, or undefined, the file defined in `types` is used as a blacklist.
 
-`enablePositionIncrements`:: if `luceneMatchVersion` is `4.3` or earlier and `enablePositionIncrements="false"`, no position holes will be left by this filter when it removes tokens. *This argument is invalid if `luceneMatchVersion` is `5.0` or later.*
+`enablePositionIncrements`:: if `luceneMatchVersion` is `4.3` or earlier and `enablePositionIncrements="false"`, no position holes will be left by this filter when it removes tokens.
+*This argument is invalid if `luceneMatchVersion` is `5.0` or later.*
 
 *Example:*
 
@@ -2747,7 +2893,8 @@ For a full description, including arguments and examples, see the Word Delimiter
 
 This filter splits tokens at word delimiters.
 
-If you use this filter during indexing, you must follow it with a Flatten Graph Filter to squash tokens on top of one another like the Word Delimiter Filter, because the indexer can't directly consume a graph. To get fully correct positional queries when tokens are split, you should instead use this filter at query time.
+If you use this filter during indexing, you must follow it with a Flatten Graph Filter to squash tokens on top of one another like the Word Delimiter Filter, because the indexer can't directly consume a graph.
+To get fully correct positional queries when tokens are split, you should instead use this filter at query time.
 
 Note: although this filter produces correct token graphs, it cannot consume an input token graph correctly.
 
@@ -2767,7 +2914,8 @@ The rules for determining delimiters are determined as follows:
 
 *Arguments:*
 
-`generateWordParts`:: (integer, default 1) If non-zero, splits words at delimiters. For example:"CamelCase", "hot-spot" -> "Camel", "Case", "hot", "spot"
+`generateWordParts`:: (integer, default 1) If non-zero, splits words at delimiters.
+For example:"CamelCase", "hot-spot" -> "Camel", "Case", "hot", "spot"
 
 `generateNumberParts`:: (integer, default 1) If non-zero, splits numeric strings at delimiters:"1947-32" -> *"1947", "32"
 
@@ -2787,9 +2935,12 @@ The rules for determining delimiters are determined as follows:
 
 `stemEnglishPossessive`:: (integer, default 1) If 1, strips the possessive `'s` from each subword.
 
-`types`:: (optional) The pathname of a file that contains *character \=> type* mappings, which enable customization of this filter's splitting behavior. Recognized character types: `LOWER`, `UPPER`, `ALPHA`, `DIGIT`, `ALPHANUM`, and `SUBWORD_DELIM`.
+`types`:: (optional) The pathname of a file that contains *character \=> type* mappings, which enable customization of this filter's splitting behavior.
+Recognized character types: `LOWER`, `UPPER`, `ALPHA`, `DIGIT`, `ALPHANUM`, and `SUBWORD_DELIM`.
 +
-The default for any character without a customized mapping is computed from Unicode character properties. Blank lines and comment lines starting with '#' are ignored. An example file:
+The default for any character without a customized mapping is computed from Unicode character properties.
+Blank lines and comment lines starting with '#' are ignored.
+An example file:
 +
 [source,text]
 ----
@@ -2804,7 +2955,8 @@ $ => DIGIT
 
 *Example:*
 
-Default behavior. The whitespace tokenizer is used here to preserve non-alphanumeric characters.
+Default behavior.
+The whitespace tokenizer is used here to preserve non-alphanumeric characters.
 
 [.dynamic-tabs]
 --
@@ -2850,7 +3002,8 @@ Default behavior. The whitespace tokenizer is used here to preserve non-alphanum
 
 *Example:*
 
-Do not split on case changes, and do not generate number parts. Note that by not generating number parts, tokens containing only numeric parts are ultimately discarded.
+Do not split on case changes, and do not generate number parts.
+Note that by not generating number parts, tokens containing only numeric parts are ultimately discarded.
 
 [source,xml]
 ----
@@ -2886,7 +3039,8 @@ Concatenate word parts and number parts, but not word and number parts that occu
 
 *Example:*
 
-Concatenate all. Word and/or number parts are joined together.
+Concatenate all.
+Word and/or number parts are joined together.
 
 [source,xml]
 ----
diff --git a/solr/solr-ref-guide/src/function-queries.adoc b/solr/solr-ref-guide/src/function-queries.adoc
index 3a57378..e0df316 100644
--- a/solr/solr-ref-guide/src/function-queries.adoc
+++ b/solr/solr-ref-guide/src/function-queries.adoc
@@ -20,7 +20,9 @@ Function queries enable you to generate a relevancy score using the actual value
 
 Function queries are supported by the <<dismax-query-parser.adoc#,DisMax>>, <<edismax-query-parser.adoc#,Extended DisMax>>, and <<standard-query-parser.adoc#,standard>> query parsers.
 
-Function queries use _functions_. The functions can be a constant (numeric or string literal), a field, another function or a parameter substitution argument. You can use these functions to modify the ranking of results for users. These could be used to change the ranking of results based on a user's location, or some other calculation.
+Function queries use _functions_. The functions can be a constant (numeric or string literal), a field, another function or a parameter substitution argument.
+You can use these functions to modify the ranking of results for users.
+These could be used to change the ranking of results based on a user's location, or some other calculation.
 
 == Using Function Query
 
@@ -34,13 +36,15 @@ There are several ways of using function queries in a Solr query:
 ----
 q={!func}div(popularity,price)&fq={!frange l=1000}customer_ratings
 ----
-* In a Sort expression. For example:
+* In a Sort expression.
+For example:
 +
 [source,text]
 ----
 sort=div(popularity,price) desc, score desc
 ----
-* Add the results of functions as pseudo-fields to documents in query results. For instance, for:
+* Add the results of functions as pseudo-fields to documents in query results.
+For instance, for:
 +
 [source,text]
 ----
@@ -57,13 +61,16 @@ the output would be:
 <float name="score">0.343</float>
 ...
 ----
-* Use in a parameter that is explicitly for specifying functions, such as the eDisMax query parser's <<edismax-query-parser.adoc#extended-dismax-parameters,`boost` parameter>>, or the DisMax query parser's <<dismax-query-parser.adoc#bf-boost-functions-parameter,`bf` (boost function) parameter>>. (Note that the `bf` parameter actually takes a list of function queries separated by white space and each with an optional boost. Make sure you eliminate any internal white space in single functi [...]
+* Use in a parameter that is explicitly for specifying functions, such as the eDisMax query parser's <<edismax-query-parser.adoc#extended-dismax-parameters,`boost` parameter>>, or the DisMax query parser's <<dismax-query-parser.adoc#bf-boost-functions-parameter,`bf` (boost function) parameter>>. (Note that the `bf` parameter actually takes a list of function queries separated by white space and each with an optional boost.
+Make sure you eliminate any internal white space in single function queries when using `bf`).
+For example:
 +
 [source,text]
 ----
 q=dismax&bf="ord(popularity)^0.5 recip(rord(price),1,1000,1000)^0.3"
 ----
-* Introduce a function query inline in the Lucene query parser with the `\_val_` keyword. For example:
+* Introduce a function query inline in the Lucene query parser with the `\_val_` keyword.
+For example:
 +
 [source,text]
 ----
@@ -109,7 +116,9 @@ Specifies a floating point constant.
 * `1.5`
 
 === def Function
-`def` is short for default. Returns the value of field "field", or if the field does not exist, returns the default value specified. Yields the first value where `exists()==true`.
+`def` is short for default.
+Returns the value of field "field", or if the field does not exist, returns the default value specified.
+Yields the first value where `exists()==true`.
 
 *Syntax Examples*
 
@@ -117,7 +126,8 @@ Specifies a floating point constant.
 * `def(myfield, 1.0):` equivalent to `if(exists(myfield),myfield,1.0)`
 
 === div Function
-Divides one value or function by another. `div(x,y)` divides `x` by `y`.
+Divides one value or function by another.
+`div(x,y)` divides `x` by `y`.
 
 *Syntax Examples*
 
@@ -125,7 +135,9 @@ Divides one value or function by another. `div(x,y)` divides `x` by `y`.
 * `div(sum(x,100),max(y,1))`
 
 === dist Function
-Returns the distance between two vectors (points) in an n-dimensional space. Takes in the power, plus two or more ValueSource instances and calculates the distances between the two vectors. Each ValueSource must be a number.
+Returns the distance between two vectors (points) in an n-dimensional space.
+Takes in the power, plus two or more ValueSource instances and calculates the distances between the two vectors.
+Each ValueSource must be a number.
 
 There must be an even number of ValueSource instances passed in and the method assumes that the first half represent the first vector and the second half represent the second vector.
 
@@ -137,7 +149,8 @@ There must be an even number of ValueSource instances passed in and the method a
 * `dist(1,x,y,z,e,f,g)`: Manhattan distance between (x,y,z) and (e,f,g) where each letter is a field name.
 
 === docfreq(field,val) Function
-Returns the number of documents that contain the term in the field. This is a constant (the same value for all documents in the index).
+Returns the number of documents that contain the term in the field.
+This is a constant (the same value for all documents in the index).
 
 You can quote the term if it's more complex, or do parameter substitution for the term value.
 
@@ -147,7 +160,8 @@ You can quote the term if it's more complex, or do parameter substitution for th
 * `...&defType=func` `&q=docfreq(text,$myterm)&myterm=solr`
 
 === field Function
-Returns the numeric docValues or indexed value of the field with the specified name. In its simplest (single argument) form, this function can only be used on single valued fields, and can be called using the name of the field as a string, or for most conventional field names simply use the field name by itself without using the `field(...)` syntax.
+Returns the numeric docValues or indexed value of the field with the specified name.
+In its simplest (single argument) form, this function can only be used on single valued fields, and can be called using the name of the field as a string, or for most conventional field names simply use the field name by itself without using the `field(...)` syntax.
 
 When using docValues, an optional 2nd argument can be specified to select the `min` or `max` value of multivalued fields.
 
@@ -170,21 +184,26 @@ For multivalued docValues fields:
 * `field(myMultiValuedFloatField,max)`
 
 === hsin Function
-The Haversine distance calculates the distance between two points on a sphere when traveling along the sphere. The values must be in radians. `hsin` also take a Boolean argument to specify whether the function should convert its output to radians.
+The Haversine distance calculates the distance between two points on a sphere when traveling along the sphere.
+The values must be in radians.
+`hsin` also take a Boolean argument to specify whether the function should convert its output to radians.
 
 *Syntax Example*
 
 * `hsin(2, true, x, y, 0, 0)`
 
 === idf Function
-Inverse document frequency; a measure of whether the term is common or rare across all documents. Obtained by dividing the total number of documents by the number of documents containing the term, and then taking the logarithm of that quotient. See also `tf`.
+Inverse document frequency; a measure of whether the term is common or rare across all documents.
+Obtained by dividing the total number of documents by the number of documents containing the term, and then taking the logarithm of that quotient.
+See also `tf`.
 
 *Syntax Example*
 
 * `idf(fieldName,'solr')`: measures the inverse of the frequency of the occurrence of the term `'solr'` in `fieldName`.
 
 === if Function
-Enables conditional function queries. In `if(test,value1,value2)`:
+Enables conditional function queries.
+In `if(test,value1,value2)`:
 
 * `test` is or refers to a logical value or expression that returns a logical value (TRUE or FALSE).
 * `value1` is the value that is returned by the function if `test` yields TRUE.
@@ -194,10 +213,12 @@ An expression can be any function which outputs boolean values, or even function
 
 *Syntax Example*
 
-* `if(termfreq (cat,'electronics'),popularity,42)`: This function checks each document for to see if it contains the term "electronics" in the `cat` field. If it does, then the value of the `popularity` field is returned, otherwise the value of `42` is returned.
+* `if(termfreq (cat,'electronics'),popularity,42)`: This function checks each document for to see if it contains the term "electronics" in the `cat` field.
+If it does, then the value of the `popularity` field is returned, otherwise the value of `42` is returned.
 
 === linear Function
-Implements `m*x+c` where `m` and `c` are constants and `x` is an arbitrary function. This is equivalent to `sum(product(m,x),c)`, but slightly more efficient as it is implemented as a single function.
+Implements `m*x+c` where `m` and `c` are constants and `x` is an arbitrary function.
+This is equivalent to `sum(product(m,x),c)`, but slightly more efficient as it is implemented as a single function.
 
 *Syntax Examples*
 
@@ -213,20 +234,24 @@ Returns the log base 10 of the specified function.
 * `log(sum(x,100))`
 
 === map Function
-Maps any values of an input function `x` that fall within `min` and `max` inclusive to the specified `target`. The arguments `min` and `max` must be constants. The arguments `target` and `default` can be constants or functions.
+Maps any values of an input function `x` that fall within `min` and `max` inclusive to the specified `target`.
+The arguments `min` and `max` must be constants.
+The arguments `target` and `default` can be constants or functions.
 
 If the value of `x` does not fall between `min` and `max`, then either the value of `x` is returned, or a default value is returned if specified as a 5th argument.
 
 *Syntax Examples*
 
 * `map(x,min,max,target)`
-** `map(x,0,0,1)`: Changes any values of 0 to 1. This can be useful in handling default 0 values.
+** `map(x,0,0,1)`: Changes any values of 0 to 1.
+This can be useful in handling default 0 values.
 * `map(x,min,max,target,default)`
 ** `map(x,0,100,1,-1)`: Changes any values between `0` and `100` to `1`, and all other values to` -1`.
 ** `map(x,0,100,sum(x,599),docfreq(text,solr))`: Changes any values between `0` and `100` to x+599, and all other values to frequency of the term 'solr' in the field text.
 
 === max Function
-Returns the maximum numeric value of multiple nested functions or constants, which are specified as arguments: `max(x,y,...)`. The `max` function can also be useful for "bottoming out" another function or field at some specified constant.
+Returns the maximum numeric value of multiple nested functions or constants, which are specified as arguments: `max(x,y,...)`.
+The `max` function can also be useful for "bottoming out" another function or field at some specified constant.
 
 Use the `field(myfield,max)` syntax for <<field Function,selecting the maximum value of a single multivalued field>>.
 
@@ -235,14 +260,16 @@ Use the `field(myfield,max)` syntax for <<field Function,selecting the maximum v
 * `max(myfield,myotherfield,0)`
 
 === maxdoc Function
-Returns the number of documents in the index, including those that are marked as deleted but have not yet been purged. This is a constant (the same value for all documents in the index).
+Returns the number of documents in the index, including those that are marked as deleted but have not yet been purged.
+This is a constant (the same value for all documents in the index).
 
 *Syntax Example*
 
 * `maxdoc()`
 
 === min Function
-Returns the minimum numeric value of multiple nested functions of constants, which are specified as arguments: `min(x,y,...)`. The `min` function can also be useful for providing an "upper bound" on a function using a constant.
+Returns the minimum numeric value of multiple nested functions of constants, which are specified as arguments: `min(x,y,...)`.
+The `min` function can also be useful for providing an "upper bound" on a function using a constant.
 
 Use the `field(myfield,min)` <<field Function,syntax for selecting the minimum value of a single multivalued field>>.
 
@@ -251,7 +278,8 @@ Use the `field(myfield,min)` <<field Function,syntax for selecting the minimum v
 * `min(myfield,myotherfield,0)`
 
 === ms Function
-Returns milliseconds of difference between its arguments. Dates are relative to the Unix or POSIX time epoch, midnight, January 1, 1970 UTC.
+Returns milliseconds of difference between its arguments.
+Dates are relative to the Unix or POSIX time epoch, midnight, January 1, 1970 UTC.
 
 Arguments may be the name of a `DatePointField`, `TrieDateField`, or date math based on a <<date-formatting-math.adoc#,constant date or `NOW`>>.
 
@@ -269,14 +297,16 @@ Arguments may be the name of a `DatePointField`, `TrieDateField`, or date math b
 * `ms(datefield1, datefield2)`
 
 === norm(_field_) Function
-Returns the "norm" stored in the index for the specified field. This is the product of the index time boost and the length normalization factor, according to the {lucene-javadocs}/core/org/apache/lucene/search/similarities/Similarity.html[Similarity] for the field.
+Returns the "norm" stored in the index for the specified field.
+This is the product of the index time boost and the length normalization factor, according to the {lucene-javadocs}/core/org/apache/lucene/search/similarities/Similarity.html[Similarity] for the field.
 
 *Syntax Example*
 
 * `norm(fieldName)`
 
 === numdocs Function
-Returns the number of documents in the index, not including those that are marked as deleted but have not yet been purged. This is a constant (the same value for all documents in the index).
+Returns the number of documents in the index, not including those that are marked as deleted but have not yet been purged.
+This is a constant (the same value for all documents in the index).
 
 *Syntax Example*
 
@@ -285,7 +315,9 @@ Returns the number of documents in the index, not including those that are marke
 === ord Function
 Returns the ordinal of the indexed field value within the indexed list of terms for that field in Lucene index order (lexicographically ordered by unicode value), starting at 1.
 
-In other words, for a given field, all values are ordered lexicographically; this function then returns the offset of a particular value in that ordering. The field must have a maximum of one value per document (not multi-valued). `0` is returned for documents without a value in the field.
+In other words, for a given field, all values are ordered lexicographically; this function then returns the offset of a particular value in that ordering.
+The field must have a maximum of one value per document (not multi-valued).
+`0` is returned for documents without a value in the field.
 
 IMPORTANT: `ord()` depends on the position in an index and can change when other documents are inserted or deleted.
 
@@ -300,12 +332,15 @@ See also `rord` below.
 === payload Function
 Returns the float value computed from the decoded payloads of the term specified.
 
-The return value is computed using the `min`, `max`, or `average` of the decoded payloads. A special `first` function can be used instead of the others, to short-circuit term enumeration and return only the decoded payload of the first term.
+The return value is computed using the `min`, `max`, or `average` of the decoded payloads.
+A special `first` function can be used instead of the others, to short-circuit term enumeration and return only the decoded payload of the first term.
 
-The field specified must have float or integer payload encoding capability (via `DelimitedPayloadTokenFilter` or `NumericPayloadTokenFilter`). If no payload is found for the term, the default value is returned.
+The field specified must have float or integer payload encoding capability (via `DelimitedPayloadTokenFilter` or `NumericPayloadTokenFilter`).
+If no payload is found for the term, the default value is returned.
 
 * `payload(field_name,term)`: default value is 0.0, `average` function is used.
-* `payload(field_name,term,default_value)`: default value can be a constant, field name, or another float returning function. `average` function used.
+* `payload(field_name,term,default_value)`: default value can be a constant, field name, or another float returning function.
+`average` function used.
 * `payload(field_name,term,default_value,function)`: function values can be `min`, `max`, `average`, or `first`.
 
 *Syntax Example*
@@ -314,7 +349,8 @@ The field specified must have float or integer payload encoding capability (via
 
 === pow Function
 
-Raises the specified base to the specified power. `pow(x,y)` raises `x` to the power of `y`.
+Raises the specified base to the specified power.
+`pow(x,y)` raises `x` to the power of `y`.
 
 *Syntax Examples*
 
@@ -323,7 +359,8 @@ Raises the specified base to the specified power. `pow(x,y)` raises `x` to the p
 * `pow(x,0.5):` the same as `sqrt`
 
 === product Function
-Returns the product of multiple values or functions, which are specified in a comma-separated list. `mul(...)` may also be used as an alias for this function.
+Returns the product of multiple values or functions, which are specified in a comma-separated list.
+`mul(...)` may also be used as an alias for this function.
 
 *Syntax Examples*
 
@@ -332,7 +369,8 @@ Returns the product of multiple values or functions, which are specified in a co
 * `mul(x,y)`
 
 === query Function
-Returns the score for the given subquery, or the default value for documents not matching the query. Any type of subquery is supported through either parameter de-referencing `$otherparam` or direct specification of the query string in the <<local-params.adoc#,local params>> through the `v` key.
+Returns the score for the given subquery, or the default value for documents not matching the query.
+Any type of subquery is supported through either parameter de-referencing `$otherparam` or direct specification of the query string in the <<local-params.adoc#,local params>> through the `v` key.
 
 *Syntax Examples*
 
@@ -344,7 +382,9 @@ Returns the score for the given subquery, or the default value for documents not
 === recip Function
 Performs a reciprocal function with `recip(x,m,a,b)` implementing `a/(m*x+b)` where `m,a,b` are constants, and `x` is any arbitrarily complex function.
 
-When `a` and `b` are equal, and `x>=0`, this function has a maximum value of `1` that drops as `x` increases. Increasing the value of `a` and `b` together results in a movement of the entire function to a flatter part of the curve. These properties can make this an ideal function for boosting more recent documents when x is `rord(datefield)`.
+When `a` and `b` are equal, and `x>=0`, this function has a maximum value of `1` that drops as `x` increases.
+Increasing the value of `a` and `b` together results in a movement of the entire function to a flatter part of the curve.
+These properties can make this an ideal function for boosting more recent documents when x is `rord(datefield)`.
 
 *Syntax Examples*
 
@@ -359,9 +399,13 @@ Returns the reverse ordering of that returned by `ord`.
 * `rord(myDateField)`
 
 === scale Function
-Scales values of the function `x` such that they fall between the specified `minTarget` and `maxTarget` inclusive. The current implementation traverses all of the function values to obtain the min and max, so it can pick the correct scale.
+Scales values of the function `x` such that they fall between the specified `minTarget` and `maxTarget` inclusive.
+The current implementation traverses all of the function values to obtain the min and max, so it can pick the correct scale.
 
-The current implementation cannot distinguish when documents have been deleted or documents that have no value. It uses `0.0` values for these cases. This means that if values are normally all greater than `0.0`, one can still end up with `0.0` as the `min` value to map from. In these cases, an appropriate `map()` function could be used as a workaround to change `0.0` to a value in the real range, as shown here: `scale(map(x,0,0,5),1,2)`
+The current implementation cannot distinguish when documents have been deleted or documents that have no value.
+It uses `0.0` values for these cases.
+This means that if values are normally all greater than `0.0`, one can still end up with `0.0` as the `min` value to map from.
+In these cases, an appropriate `map()` function could be used as a workaround to change `0.0` to a value in the real range, as shown here: `scale(map(x,0,0,5),1,2)`
 
 *Syntax Examples*
 
@@ -369,7 +413,9 @@ The current implementation cannot distinguish when documents have been deleted o
 * `scale(x,1,2)`: scales the values of x such that all values will be between 1 and 2 inclusive.
 
 === sqedist Function
-The Square Euclidean distance calculates the 2-norm (Euclidean distance) but does not take the square root, thus saving a fairly expensive operation. It is often the case that applications that care about Euclidean distance do not need the actual distance, but instead can use the square of the distance. There must be an even number of ValueSource instances passed in and the method assumes that the first half represent the first vector and the second half represent the second vector.
+The Square Euclidean distance calculates the 2-norm (Euclidean distance) but does not take the square root, thus saving a fairly expensive operation.
+It is often the case that applications that care about Euclidean distance do not need the actual distance, but instead can use the square of the distance.
+There must be an even number of ValueSource instances passed in and the method assumes that the first half represent the first vector and the second half represent the second vector.
 
 *Syntax Example*
 
@@ -385,14 +431,18 @@ Returns the square root of the specified value or function.
 * `sqrt(sum(x,100))`
 
 === strdist Function
-Calculate the distance between two strings. Uses the Lucene spell checker `StringDistance` interface and supports all of the implementations available in that package, plus allows applications to plug in their own via Solr's resource loading capabilities. `strdist` takes (string1, string2, distance measure).
+Calculate the distance between two strings.
+Uses the Lucene spell checker `StringDistance` interface and supports all of the implementations available in that package, plus allows applications to plug in their own via Solr's resource loading capabilities.
+`strdist` takes (string1, string2, distance measure).
 
 Possible values for distance measure are:
 
 * jw: Jaro-Winkler
 * edit: Levenstein or Edit distance
-* ngram: The NGramDistance, if specified, can optionally pass in the ngram size too. Default is 2.
-* FQN: Fully Qualified class Name for an implementation of the StringDistance interface. Must have a no-arg constructor.
+* ngram: The NGramDistance, if specified, can optionally pass in the ngram size too.
+Default is 2.
+* FQN: Fully Qualified class Name for an implementation of the StringDistance interface.
+Must have a no-arg constructor.
 
 *Syntax Example*
 
@@ -407,7 +457,8 @@ Returns `x-y` from `sub(x,y)`.
 * `sub(100, sqrt(myfield))`
 
 === sum Function
-Returns the sum of multiple values or functions, which are specified in a comma-separated list. `add(...)` may be used as an alias for this function.
+Returns the sum of multiple values or functions, which are specified in a comma-separated list.
+`add(...)` may be used as an alias for this function.
 
 *Syntax Examples*
 
@@ -417,7 +468,8 @@ Returns the sum of multiple values or functions, which are specified in a comma-
 * `add(x,y)`
 
 === sumtotaltermfreq Function
-Returns the sum of `totaltermfreq` values for all terms in the field in the entire index (i.e., the number of indexed tokens for that field). (Aliases `sumtotaltermfreq` to `sttf`.)
+Returns the sum of `totaltermfreq` values for all terms in the field in the entire index (i.e., the number of indexed tokens for that field).
+(Aliases `sumtotaltermfreq` to `sttf`.)
 
 *Syntax Example*
 If doc1:(fieldX:A B C) and doc2:(fieldX:A A A A):
@@ -435,26 +487,32 @@ Returns the number of times the term appears in the field for that document.
 * `termfreq(text,'memory')`
 
 === tf Function
-Term frequency; returns the term frequency factor for the given term, using the {lucene-javadocs}/core/org/apache/lucene/search/similarities/Similarity.html[Similarity] for the field. The `tf-idf` value increases proportionally to the number of times a word appears in the document, but is offset by the frequency of the word in the document, which helps to control for the fact that some words are generally more common than others. See also `idf`.
+Term frequency; returns the term frequency factor for the given term, using the {lucene-javadocs}/core/org/apache/lucene/search/similarities/Similarity.html[Similarity] for the field.
+The `tf-idf` value increases proportionally to the number of times a word appears in the document, but is offset by the frequency of the word in the document, which helps to control for the fact that some words are generally more common than others.
+See also `idf`.
 
 *Syntax Examples*
 
 * `tf(text,'solr')`
 
 === top Function
-Causes the function query argument to derive its values from the top-level IndexReader containing all parts of an index. For example, the ordinal of a value in a single segment will be different from the ordinal of that same value in the complete index.
+Causes the function query argument to derive its values from the top-level IndexReader containing all parts of an index.
+For example, the ordinal of a value in a single segment will be different from the ordinal of that same value in the complete index.
 
 The `ord()` and `rord()` functions implicitly use `top()`, and hence `ord(foo)` is equivalent to `top(ord(foo))`.
 
 === totaltermfreq Function
-Returns the number of times the term appears in the field in the entire index. (Aliases `totaltermfreq` to `ttf`.)
+Returns the number of times the term appears in the field in the entire index.
+(Aliases `totaltermfreq` to `ttf`.)
 
 *Syntax Example*
 
 * `ttf(text,'memory')`
 
 == Boolean Functions
-The following functions are boolean – they return true or false. They are mostly useful as the first argument of the `if` function, and some of these can be combined. If used somewhere else, it will yield a '1' or '0'.
+The following functions are boolean – they return true or false.
+They are mostly useful as the first argument of the `if` function, and some of these can be combined.
+If used somewhere else, it will yield a '1' or '0'.
 
 === and Function
 Returns a value of true if and only if all of its operands evaluate to true.
@@ -504,17 +562,21 @@ Returns `true` if any member of the field exists.
 
 == Example Function Queries
 
-To give you a better understanding of how function queries can be used in Solr, suppose an index stores the dimensions in meters x,y,z of some hypothetical boxes with arbitrary names stored in field `boxname`. Suppose we want to search for box matching name `findbox` but ranked according to volumes of boxes. The query parameters would be:
+To give you a better understanding of how function queries can be used in Solr, suppose an index stores the dimensions in meters x,y,z of some hypothetical boxes with arbitrary names stored in field `boxname`.
+Suppose we want to search for box matching name `findbox` but ranked according to volumes of boxes.
+The query parameters would be:
 
 [source,text]
 q=boxname:findbox _val_:"product(x,y,z)"
 
-This query will rank the results based on volumes. In order to get the computed volume, you will need to request the `score`, which will contain the resultant volume:
+This query will rank the results based on volumes.
+In order to get the computed volume, you will need to request the `score`, which will contain the resultant volume:
 
 [source,text]
 &fl=*, score
 
-Suppose that you also have a field storing the weight of the box as `weight`. To sort by the density of the box and return the value of the density in score, you would submit the following query:
+Suppose that you also have a field storing the weight of the box as `weight`.
+To sort by the density of the box and return the value of the density in score, you would submit the following query:
 
 [source,text]
 ----
@@ -523,14 +585,16 @@ http://localhost:8983/solr/collection_name/select?q=boxname:findbox _val_:"div(w
 
 == Sort By Function
 
-You can sort your query results by the output of a function. For example, to sort results by distance, you could enter:
+You can sort your query results by the output of a function.
+For example, to sort results by distance, you could enter:
 
 [source,text]
 ----
 http://localhost:8983/solr/collection_name/select?q=*:*&sort=dist(2, point1, point2) desc
 ----
 
-Sort by function also supports pseudo-fields: fields can be generated dynamically and return results as though it was normal field in the index. For example,
+Sort by function also supports pseudo-fields: fields can be generated dynamically and return results as though it was normal field in the index.
+For example,
 
 `&fl=id,sum(x, y),score&wt=xml`
 
diff --git a/solr/solr-ref-guide/src/graph-traversal.adoc b/solr/solr-ref-guide/src/graph-traversal.adoc
index 23b9737..ce210f9 100644
--- a/solr/solr-ref-guide/src/graph-traversal.adoc
+++ b/solr/solr-ref-guide/src/graph-traversal.adoc
@@ -18,20 +18,25 @@
 
 Graph traversal with streaming expressions uses the `nodes` function to perform a breadth-first graph traversal.
 
-The `nodes` function can be combined with the `scoreNodes` function to provide recommendations. `nodes` can also be combined with the wider streaming expression library to perform complex operations on gathered node sets.
+The `nodes` function can be combined with the `scoreNodes` function to provide recommendations.
+`nodes` can also be combined with the wider streaming expression library to perform complex operations on gathered node sets.
 
 `nodes` traversals are distributed within a SolrCloud collection and can span collections.
 
-`nodes` is designed for use cases that involve zooming into a neighborhood in the graph and performing precise traversals to gather node sets and aggregations. In these types of use cases `nodes` will often provide sub-second performance. Some sample use cases are provided later in the document.
+`nodes` is designed for use cases that involve zooming into a neighborhood in the graph and performing precise traversals to gather node sets and aggregations.
+In these types of use cases `nodes` will often provide sub-second performance.
+Some sample use cases are provided later in the document.
 
 [IMPORTANT]
 ====
-This document assumes a basic understanding of graph terminology and streaming expressions. You can begin exploring graph traversal concepts with this https://en.wikipedia.org/wiki/Graph_traversal[Wikipedia article]. More details about streaming expressions are available in this Guide, in the section <<streaming-expressions.adoc#,Streaming Expressions>>.
+This document assumes a basic understanding of graph terminology and streaming expressions.
+You can begin exploring graph traversal concepts with this https://en.wikipedia.org/wiki/Graph_traversal[Wikipedia article]. More details about streaming expressions are available in this Guide, in the section <<streaming-expressions.adoc#,Streaming Expressions>>.
 ====
 
 == Basic Syntax
 
-We'll start with the most basic syntax and slowly build up more complexity. The most basic syntax for `nodes` is:
+We'll start with the most basic syntax and slowly build up more complexity.
+The most basic syntax for `nodes` is:
 
 [source,plain]
 ----
@@ -42,9 +47,12 @@ nodes(emails,
 
 Let's break down this simple expression.
 
-The first parameter, `emails`, is the collection being traversed. The second parameter, `walk`, maps a hard-coded node ID ("\johndoe@apache.org") to a field in the index (`from`). This will return all the *edges* in the index that have `johndoe@apache.org` in the `from` field.
+The first parameter, `emails`, is the collection being traversed.
+The second parameter, `walk`, maps a hard-coded node ID ("\johndoe@apache.org") to a field in the index (`from`).
+This will return all the *edges* in the index that have `johndoe@apache.org` in the `from` field.
 
-The `gather` parameter tells the function to gather the values in the `to `field. The values that are gathered are the node IDs emitted by the function.
+The `gather` parameter tells the function to gather the values in the `to `field.
+The values that are gathered are the node IDs emitted by the function.
 
 In the example above the nodes emitted will be all of the people that "johndoe@apache.org" has emailed.
 
@@ -59,7 +67,8 @@ nodes(emails,
 
 The `nodes` function above finds all the edges with "johndoe@apache.org" or "janesmith@apache.org" in the `from` field and gathers the `to` field.
 
-Like all <<streaming-expressions.adoc#,Streaming Expressions>>, you can execute a `nodes` expression by sending it to the `/stream` handler. For example:
+Like all <<streaming-expressions.adoc#,Streaming Expressions>>, you can execute a `nodes` expression by sending it to the `/stream` handler.
+For example:
 
 [source,bash]
 ----
@@ -102,9 +111,13 @@ The output of this expression would look like this:
 }
 ----
 
-All of the tuples returned have the `node` field. The `node` field contains the node IDs gathered by the function. The `collection`, `field`, and `level` of the traversal are also included in the output.
+All of the tuples returned have the `node` field.
+The `node` field contains the node IDs gathered by the function.
+The `collection`, `field`, and `level` of the traversal are also included in the output.
 
-Notice that the level is "1" for each tuple in the example. The root nodes are level 0 (in the example above, the root nodes are "johndoe@apache.org, janesmith@apache.org") By default the `nodes` function emits only the _*leaf nodes*_ of the traversal, which is the outer-most node set. To emit the root nodes you can specify the `scatter` parameter:
+Notice that the level is "1" for each tuple in the example.
+The root nodes are level 0 (in the example above, the root nodes are "johndoe@apache.org, janesmith@apache.org") By default the `nodes` function emits only the _*leaf nodes*_ of the traversal, which is the outer-most node set.
+To emit the root nodes you can specify the `scatter` parameter:
 
 [source,plain]
 ----
@@ -160,7 +173,8 @@ Now the level 0 root node is included in the output.
 
 == Aggregations
 
-`nodes` also supports aggregations. For example:
+`nodes` also supports aggregations.
+For example:
 
 [source,plain]
 ----
@@ -170,17 +184,25 @@ nodes(emails,
       count(*))
 ----
 
-The expression above finds the edges with "\johndoe@apache.org" or "\janesmith@apache.org" in the `from` field and gathers the values from the `to` field. It also aggregates the count for each node ID gathered.
+The expression above finds the edges with "\johndoe@apache.org" or "\janesmith@apache.org" in the `from` field and gathers the values from the `to` field.
+It also aggregates the count for each node ID gathered.
 
-A gathered node could have a count of 2 if both "\johndoe@apache.org" and "\janesmith@apache.org" have emailed the same person. Node sets contain a unique set of nodes, so the same person won't appear twice in the node set, but the count will reflect that it appeared twice during the traversal.
+A gathered node could have a count of 2 if both "\johndoe@apache.org" and "\janesmith@apache.org" have emailed the same person.
+Node sets contain a unique set of nodes, so the same person won't appear twice in the node set, but the count will reflect that it appeared twice during the traversal.
 
-Edges are uniqued as part of the traversal so the count will *not* reflect the number of times "\johndoe@apache.org" emailed the same person. For example, personA might have emailed personB 100 times. These edges would get uniqued and only be counted once. But if person personC also emailed personB this would increment the count for personB.
+Edges are uniqued as part of the traversal so the count will *not* reflect the number of times "\johndoe@apache.org" emailed the same person.
+For example, personA might have emailed personB 100 times.
+These edges would get uniqued and only be counted once.
+But if person personC also emailed personB this would increment the count for personB.
 
-The aggregation functions supported are `count(*)`, `sum(field)`, `min(field)`, `max(field)`, and `avg(field)`. The fields being aggregated should be present in the edges collected during the traversal. Later examples (below) will show aggregations can be a powerful tool for providing recommendations and limiting the scope of traversals.
+The aggregation functions supported are `count(*)`, `sum(field)`, `min(field)`, `max(field)`, and `avg(field)`.
+The fields being aggregated should be present in the edges collected during the traversal.
+Later examples (below) will show aggregations can be a powerful tool for providing recommendations and limiting the scope of traversals.
 
 == Nesting nodes Functions
 
-The `nodes` function can be nested to traverse deeper into the graph. For example:
+The `nodes` function can be nested to traverse deeper into the graph.
+For example:
 
 [source,plain]
 ----
@@ -194,23 +216,36 @@ nodes(emails,
 
 In the example above the outer `nodes` function operates on the node set collected from the inner `nodes` function.
 
-Notice that the inner `nodes` function behaves exactly as the examples already discussed. But the `walk` parameter of the outer `nodes` function behaves differently.
+Notice that the inner `nodes` function behaves exactly as the examples already discussed.
+But the `walk` parameter of the outer `nodes` function behaves differently.
 
-In the outer `nodes` function the `walk` parameter works with tuples coming from an internal streaming expression. In this scenario the `walk` parameter maps the `node` field to the `from` field. Remember that the node IDs collected from the inner `nodes` expression are placed in the `node` field.
+In the outer `nodes` function the `walk` parameter works with tuples coming from an internal streaming expression.
+In this scenario the `walk` parameter maps the `node` field to the `from` field.
+Remember that the node IDs collected from the inner `nodes` expression are placed in the `node` field.
 
-Put more simply, the inner expression gathers all the people that "\johndoe@apache.org" has emailed. We can call this group the "friends of \johndoe@apache.org". The outer expression gathers all the people that the "friends of \johndoe@apache.org" have emailed. This is a basic friends-of-friends traversal.
+Put more simply, the inner expression gathers all the people that "\johndoe@apache.org" has emailed.
+We can call this group the "friends of \johndoe@apache.org". The outer expression gathers all the people that the "friends of \johndoe@apache.org" have emailed.
+This is a basic friends-of-friends traversal.
 
 This construct of nesting `nodes` functions is the basic technique for doing a controlled traversal through the graph.
 
 == Cycle Detection
 
-The `nodes` function performs cycle detection across the entire traversal. This ensures that nodes that have already been visited are not traversed again. Cycle detection is important for both limiting the size of traversals and gathering accurate aggregations. Without cycle detection the size of the traversal could grow exponentially with each hop in the traversal. With cycle detection only new nodes encountered are traversed.
+The `nodes` function performs cycle detection across the entire traversal.
+This ensures that nodes that have already been visited are not traversed again.
+Cycle detection is important for both limiting the size of traversals and gathering accurate aggregations.
+Without cycle detection the size of the traversal could grow exponentially with each hop in the traversal.
+With cycle detection only new nodes encountered are traversed.
 
-Cycle detection *does not* cross collection boundaries. This is because internally the collection name is part of the node ID. For example the node ID "\johndoe@apache.org", is really `emails/johndoe@apache.org`. When traversing to another collection "\johndoe@apache.org" will be traversed.
+Cycle detection *does not* cross collection boundaries.
+This is because internally the collection name is part of the node ID.
+For example the node ID "\johndoe@apache.org", is really `emails/johndoe@apache.org`.
+When traversing to another collection "\johndoe@apache.org" will be traversed.
 
 == Filtering the Traversal
 
-Each level in the traversal can be filtered with a filter query. For example:
+Each level in the traversal can be filtered with a filter query.
+For example:
 
 [source,plain]
 ----
@@ -220,11 +255,14 @@ nodes(emails,
       gather="to")
 ----
 
-In the example above only emails that match the filter query will be included in the traversal. Any Solr query can be included here. So you can do fun things like <<spatial-search.adoc#,geospatial queries>>, apply any of the available <<query-syntax-and-parsers.adoc#,query parsers>>, or even write custom query parsers to limit the traversal.
+In the example above only emails that match the filter query will be included in the traversal.
+Any Solr query can be included here.
+So you can do fun things like <<spatial-search.adoc#,geospatial queries>>, apply any of the available <<query-syntax-and-parsers.adoc#,query parsers>>, or even write custom query parsers to limit the traversal.
 
 == Root Streams
 
-Any streaming expression can be used to provide the root nodes for a traversal. For example:
+Any streaming expression can be used to provide the root nodes for a traversal.
+For example:
 
 [source,plain]
 ----
@@ -234,26 +272,37 @@ nodes(emails,
       gather="to")
 ----
 
-The example above provides the root nodes through a search expression. You can also provide arbitrarily complex, nested streaming expressions with joins, etc., to specify the root nodes.
+The example above provides the root nodes through a search expression.
+You can also provide arbitrarily complex, nested streaming expressions with joins, etc., to specify the root nodes.
 
-Notice that the `walk` parameter maps a field from the tuples generated by the inner stream. In this case it maps the `to` field from the inner stream to the `from` field.
+Notice that the `walk` parameter maps a field from the tuples generated by the inner stream.
+In this case it maps the `to` field from the inner stream to the `from` field.
 
 == Skipping High Frequency Nodes
 
-It's often desirable to skip traversing high frequency nodes in the graph. This is similar in nature to a search term stop list. The best way to describe this is through an example use case.
+It's often desirable to skip traversing high frequency nodes in the graph.
+This is similar in nature to a search term stop list.
+The best way to describe this is through an example use case.
 
-Let's say that you want to recommend content for a user based on a collaborative filter. Below is one approach for a simple collaborative filter:
+Let's say that you want to recommend content for a user based on a collaborative filter.
+Below is one approach for a simple collaborative filter:
 
 . Find all content userA has read.
-. Find users whose reading list is closest to userA. These are users with similar tastes as userA.
+. Find users whose reading list is closest to userA.
+These are users with similar tastes as userA.
 . Recommend content based on what the users in step 2 have read, that userA has not yet read.
 
-Look closely at step 2. In large graphs, step 2 can lead to a very large traversal. This is because userA may have viewed content that has been viewed by millions of other people. We may want to skip these high frequency nodes for two reasons:
+Look closely at step 2.
+In large graphs, step 2 can lead to a very large traversal.
+This is because userA may have viewed content that has been viewed by millions of other people.
+We may want to skip these high frequency nodes for two reasons:
 
 . A large traversal that visit millions of unique nodes is slow and takes a lot of memory because cycle detection is tracked in memory.
-. High frequency nodes are also not useful in determining users with similar tastes. The content that fewer people have viewed provides a more precise recommendation.
+. High frequency nodes are also not useful in determining users with similar tastes.
+The content that fewer people have viewed provides a more precise recommendation.
 
-The `nodes` function has the `maxDocFreq` parameter to allow for filtering out high frequency nodes. The sample code below shows steps 1 and 2 of the recommendation:
+The `nodes` function has the `maxDocFreq` parameter to allow for filtering out high frequency nodes.
+The sample code below shows steps 1 and 2 of the recommendation:
 
 [source,plain]
 ----
@@ -266,13 +315,20 @@ The `nodes` function has the `maxDocFreq` parameter to allow for filtering out h
        count(*)))
 ----
 
-In the example above, the inner search expression searches the `logs` collection and returning all the articles viewed by "user1". The outer `nodes` expression takes all the articles emitted from the inner search expression and finds all the records in the logs collection for those articles. It then gathers and aggregates the users that have read the articles. The `maxDocFreq` parameter limits the articles returned to those that appear in no more then 10,000 log records (per shard). This [...]
+In the example above, the inner search expression searches the `logs` collection and returning all the articles viewed by "user1". The outer `nodes` expression takes all the articles emitted from the inner search expression and finds all the records in the logs collection for those articles.
+It then gathers and aggregates the users that have read the articles.
+The `maxDocFreq` parameter limits the articles returned to those that appear in no more then 10,000 log records (per shard).
+This guards against returning articles that have been viewed by millions of users.
 
 == Tracking the Traversal
 
-By default the `nodes` function only tracks enough information to do cycle detection. This provides enough information to output the nodes and aggregations in the graph.
+By default the `nodes` function only tracks enough information to do cycle detection.
+This provides enough information to output the nodes and aggregations in the graph.
 
-For some use cases, such as graph visualization, we also need to output the edges. Setting `trackTraversal="true"` tells `nodes` to track the connections between nodes, so the edges can be constructed. When `trackTraversal` is enabled a new `ancestors` property will appear with each node. The `ancestors` property contains a list of node IDs that pointed to the node.
+For some use cases, such as graph visualization, we also need to output the edges.
+Setting `trackTraversal="true"` tells `nodes` to track the connections between nodes, so the edges can be constructed.
+When `trackTraversal` is enabled a new `ancestors` property will appear with each node.
+The `ancestors` property contains a list of node IDs that pointed to the node.
 
 Below is a sample `nodes` expression with `trackTraversal` set to true:
 
@@ -290,7 +346,11 @@ nodes(emails,
 
 == Cross-Collection Traversals
 
-Nested `nodes` functions can operate on different SolrCloud collections. This allow traversals to "walk" from one collection to another to gather nodes. Cycle detection does not cross collection boundaries, so nodes collected in one collection will be traversed in a different collection. This was done deliberately to support cross-collection traversals. Note that the output from a cross-collection traversal will likely contain duplicate nodes with different collection attributes.
+Nested `nodes` functions can operate on different SolrCloud collections.
+This allow traversals to "walk" from one collection to another to gather nodes.
+Cycle detection does not cross collection boundaries, so nodes collected in one collection will be traversed in a different collection.
+This was done deliberately to support cross-collection traversals.
+Note that the output from a cross-collection traversal will likely contain duplicate nodes with different collection attributes.
 
 Below is a sample `nodes` expression that traverses from the "emails" collection to the "logs" collection:
 
@@ -307,11 +367,14 @@ nodes(logs,
       gather="contentID")
 ----
 
-The example above finds all people who sent emails with a body that contains "solr rocks". It then finds all the people these people have emailed. Then it traverses to the logs collection and gathers all the content IDs that these people have edited.
+The example above finds all people who sent emails with a body that contains "solr rocks". It then finds all the people these people have emailed.
+Then it traverses to the logs collection and gathers all the content IDs that these people have edited.
 
 == Combining nodes With Other Streaming Expressions
 
-The `nodes` function can act as both a stream source and a stream decorator. The connection with the wider stream expression library provides tremendous power and flexibility when performing graph traversals. Here is an example of using the streaming expression library to intersect two friend networks:
+The `nodes` function can act as both a stream source and a stream decorator.
+The connection with the wider stream expression library provides tremendous power and flexibility when performing graph traversals.
+Here is an example of using the streaming expression library to intersect two friend networks:
 
 [source,plain]
 ----
@@ -334,13 +397,18 @@ The `nodes` function can act as both a stream source and a stream decorator. The
                                   scatter="branches,leaves")))
 ----
 
-The example above gathers two separate friend networks, one rooted with "\johndoe@apache.org" and another rooted with "\janedoe@apache.org". The friend networks are then sorted by the `node` field, and intersected. The resulting node set will be the intersection of the two friend networks.
+The example above gathers two separate friend networks, one rooted with "\johndoe@apache.org" and another rooted with "\janedoe@apache.org". The friend networks are then sorted by the `node` field, and intersected.
+The resulting node set will be the intersection of the two friend networks.
 
 == Sample Use Cases for Graph Traversal
 
 === Calculate Market Basket Co-occurrence
 
-It is often useful to know which products are most frequently purchased with a particular product. This example uses a simple market basket table (indexed in Solr) to store past shopping baskets. The schema for the table is very simple with each row containing a `basketID` and a `productID`. This can be seen as a graph with each row in the table representing an edge. And it can be traversed very quickly to calculate basket co-occurrence, even when the graph contains billions of edges.
+It is often useful to know which products are most frequently purchased with a particular product.
+This example uses a simple market basket table (indexed in Solr) to store past shopping baskets.
+The schema for the table is very simple with each row containing a `basketID` and a `productID`.
+This can be seen as a graph with each row in the table representing an edge.
+And it can be traversed very quickly to calculate basket co-occurrence, even when the graph contains billions of edges.
 
 Here is the sample syntax:
 
@@ -358,21 +426,32 @@ top(n="5",
 
 Let's break down exactly what this traversal is doing.
 
-. The first expression evaluated is the inner `random` expression, which returns 500 random basketIDs, from the `baskets` collection, that have the `productID` "ABC". The `random` expression is very useful for recommendations because it limits the traversal to a fixed set of baskets, and because it adds the element of surprise into the recommendation. Using the `random` function you can provide fast sample sets from very large graphs.
-. The outer `nodes` expression finds all the records in the `baskets` collection for the basketIDs generated in step 1. It also filters out `productID` "ABC" so it doesn't show up in the results. It then gathers and counts the productID's across these baskets.
+. The first expression evaluated is the inner `random` expression, which returns 500 random basketIDs, from the `baskets` collection, that have the `productID` "ABC". The `random` expression is very useful for recommendations because it limits the traversal to a fixed set of baskets, and because it adds the element of surprise into the recommendation.
+Using the `random` function you can provide fast sample sets from very large graphs.
+. The outer `nodes` expression finds all the records in the `baskets` collection for the basketIDs generated in step 1.
+It also filters out `productID` "ABC" so it doesn't show up in the results.
+It then gathers and counts the productID's across these baskets.
 . The outer `top` expression ranks the productIDs emitted in step 2 by the count and selects the top 5.
 
 In a nutshell this expression finds the products that most frequently co-occur with product "ABC" in past shopping baskets.
 
 === Using the scoreNodes Function to Make a Recommendation
 
-This use case builds on the market basket example <<Calculate Market Basket Co-occurrence,above>> that calculates which products co-occur most frequently with productID:ABC. The ranked co-occurrence counts provide candidates for a recommendation. The `scoreNodes` function can be used to score the candidates to find the best recommendation.
+This use case builds on the market basket example <<Calculate Market Basket Co-occurrence,above>> that calculates which products co-occur most frequently with productID:ABC.
+The ranked co-occurrence counts provide candidates for a recommendation.
+The `scoreNodes` function can be used to score the candidates to find the best recommendation.
 
-Before diving into the syntax of the `scoreNodes` function it's useful to understand why the raw co-occurrence counts may not produce the best recommendation. The reason is that raw co-occurrence counts favor items that occur frequently across all baskets. A better recommendation would find the product that has the most significant relationship with productID ABC. The `scoreNodes` function uses a term frequency-inverse document frequency (TF-IDF) algorithm to find the most significant re [...]
+Before diving into the syntax of the `scoreNodes` function it's useful to understand why the raw co-occurrence counts may not produce the best recommendation.
+The reason is that raw co-occurrence counts favor items that occur frequently across all baskets.
+A better recommendation would find the product that has the most significant relationship with productID ABC.
+The `scoreNodes` function uses a term frequency-inverse document frequency (TF-IDF) algorithm to find the most significant relationship.
 
 ==== How scoreNodes Works
 
-The `scoreNodes` function assigns a score to each node emitted by the nodes expression. By default the `scoreNodes` function uses the `count(*)` aggregation, which is the co-occurrence count, as the TF value. The IDF value for each node is fetched from the collection where the node was gathered. Each node is then scored using the TF*IDF formula, which provides a boost to nodes with a lower frequency across all market baskets.
+The `scoreNodes` function assigns a score to each node emitted by the nodes expression.
+By default the `scoreNodes` function uses the `count(*)` aggregation, which is the co-occurrence count, as the TF value.
+The IDF value for each node is fetched from the collection where the node was gathered.
+Each node is then scored using the TF*IDF formula, which provides a boost to nodes with a lower frequency across all market baskets.
 
 Combining the co-occurrence count with the IDF provides a score that shows how important the relationship is between productID ABC and the recommendation candidates.
 
@@ -396,13 +475,18 @@ top(n="1",
 
 This example builds on the earlier example "Calculate market basket co-occurrence".
 
-. Notice that the inner-most `top` function is taking the top 50 products that co-occur most frequently with productID ABC. This provides 50 candidate recommendations.
+. Notice that the inner-most `top` function is taking the top 50 products that co-occur most frequently with productID ABC.
+This provides 50 candidate recommendations.
 . The `scoreNodes` function then assigns a score to the candidates based on the TF*IDF of each node.
-. The outer `top` expression selects the highest scoring node. This is the recommendation.
+. The outer `top` expression selects the highest scoring node.
+This is the recommendation.
 
 === Recommend Content Based on Collaborative Filter
 
-In this example we'll recommend content for a user based on a collaborative filter. This recommendation is made using log records that contain the `userID` and `articleID` and the action performed. In this scenario each log record can be viewed as an edge in a graph. The userID and articleID are the nodes and the action is an edge property used to filter the traversal.
+In this example we'll recommend content for a user based on a collaborative filter.
+This recommendation is made using log records that contain the `userID` and `articleID` and the action performed.
+In this scenario each log record can be viewed as an edge in a graph.
+The userID and articleID are the nodes and the action is an edge property used to filter the traversal.
 
 Here is the sample syntax:
 
@@ -428,21 +512,34 @@ top(n="5",
 
 Let's break down the expression above step-by-step.
 
-. The first expression evaluated is the inner `search` expression. This expression searches the `logs` collection for all records matching "user1". This is the user we are making the recommendation for.
+. The first expression evaluated is the inner `search` expression.
+This expression searches the `logs` collection for all records matching "user1". This is the user we are making the recommendation for.
 +
-There is a filter applied to pull back only records where the "action:read". It returns the `articleID` for each record found. In other words, this expression returns all the articles "user1" has read.
-. The inner `nodes` expression operates over the articleIDs returned from step 1. It takes each `articleID` found and searches them against the `articleID` field.
+There is a filter applied to pull back only records where the "action:read". It returns the `articleID` for each record found.
+In other words, this expression returns all the articles "user1" has read.
+. The inner `nodes` expression operates over the articleIDs returned from step 1.
+It takes each `articleID` found and searches them against the `articleID` field.
 +
-Note that it skips high frequency nodes using the `maxDocFreq` parameter to filter out articles that appear over 10,000 times in the logs. It gathers userIDs and aggregates the counts for each user. This step finds the users that have read the same articles that "user1" has read and counts how many of the same articles they have read.
-. The inner `top` expression ranks the users emitted from step 2. It will emit the top 30 users who have the most overlap with user1's reading list.
-. The outer `nodes` expression gathers the reading list for the users emitted from step 3. It counts the articleIDs that are gathered.
+Note that it skips high frequency nodes using the `maxDocFreq` parameter to filter out articles that appear over 10,000 times in the logs.
+It gathers userIDs and aggregates the counts for each user.
+This step finds the users that have read the same articles that "user1" has read and counts how many of the same articles they have read.
+. The inner `top` expression ranks the users emitted from step 2.
+It will emit the top 30 users who have the most overlap with user1's reading list.
+. The outer `nodes` expression gathers the reading list for the users emitted from step 3.
+It counts the articleIDs that are gathered.
 +
-Any article selected in step 1 (user1 reading list), will not appear in this step due to cycle detection. So this step returns the articles read by the users with the most similar readings habits to "user1" that "user1" has not read yet. It also counts the number of times each article has been read across this user group.
-. The outer `top` expression takes the top articles emitted from step 4. This is the recommendation.
+Any article selected in step 1 (user1 reading list), will not appear in this step due to cycle detection.
+So this step returns the articles read by the users with the most similar readings habits to "user1" that "user1" has not read yet.
+It also counts the number of times each article has been read across this user group.
+. The outer `top` expression takes the top articles emitted from step 4.
+This is the recommendation.
 
 === Protein Pathway Traversal
 
-In recent years, scientists have become increasingly able to rationally design drugs that target the mutated proteins, called oncogenes, responsible for some cancers. Proteins typically act through long chains of chemical interactions between multiple proteins, called pathways, and, while the oncogene in the pathway may not have a corresponding drug, another protein in the pathway may. Graph traversal on a protein collection that records protein interactions and drugs may yield possible  [...]
+In recent years, scientists have become increasingly able to rationally design drugs that target the mutated proteins, called oncogenes, responsible for some cancers.
+Proteins typically act through long chains of chemical interactions between multiple proteins, called pathways, and, while the oncogene in the pathway may not have a corresponding drug, another protein in the pathway may.
+Graph traversal on a protein collection that records protein interactions and drugs may yield possible candidates.
+(Thanks to Lewis Geer of the NCBI, for providing this example).
 
 The example below illustrates a protein pathway traversal:
 
@@ -458,23 +555,36 @@ nodes(proteins,
 
 Let's break down exactly what this traversal is doing.
 
-. The inner `nodes` expression traverses in the `proteins` collection. It finds all the edges in the graph where the name of the protein is "NRAS". Then it gathers the proteins in the `interacts` field. This gathers all the proteins that "NRAS" interactions with.
-. The outer `nodes` expression also works with the `proteins` collection. It gathers all the drugs that correspond to proteins emitted from step 1.
+. The inner `nodes` expression traverses in the `proteins` collection.
+It finds all the edges in the graph where the name of the protein is "NRAS". Then it gathers the proteins in the `interacts` field.
+This gathers all the proteins that "NRAS" interactions with.
+. The outer `nodes` expression also works with the `proteins` collection.
+It gathers all the drugs that correspond to proteins emitted from step 1.
 . Using this stepwise approach you can gather the drugs along the pathway of interactions any number of steps away from the root protein.
 
 == Exporting GraphML to Support Graph Visualization
 
-In the examples above, the `nodes` expression was sent to Solr's `/stream` handler like any other streaming expression. This approach outputs the nodes in the same JSON tuple format as other streaming expressions so that it can be treated like any other streaming expression. You can use the `/stream` handler when you need to operate directly on the tuples, such as in the recommendation use cases above.
+In the examples above, the `nodes` expression was sent to Solr's `/stream` handler like any other streaming expression.
+This approach outputs the nodes in the same JSON tuple format as other streaming expressions so that it can be treated like any other streaming expression.
+You can use the `/stream` handler when you need to operate directly on the tuples, such as in the recommendation use cases above.
 
-There are other graph traversal use cases that involve graph visualization. Solr supports these use cases with the introduction of the `/graph` request handler, which takes a `nodes` expression and outputs the results in GraphML.
+There are other graph traversal use cases that involve graph visualization.
+Solr supports these use cases with the introduction of the `/graph` request handler, which takes a `nodes` expression and outputs the results in GraphML.
 
-http://graphml.graphdrawing.org/[GraphML] is an XML format supported by graph visualization tools such as https://gephi.org/[Gephi], which is a sophisticated open source tool for statistically analyzing and visualizing graphs. Using a `nodes` expression, parts of a larger graph can be exported in GraphML and then imported into tools like Gephi.
+http://graphml.graphdrawing.org/[GraphML] is an XML format supported by graph visualization tools such as https://gephi.org/[Gephi], which is a sophisticated open source tool for statistically analyzing and visualizing graphs.
+Using a `nodes` expression, parts of a larger graph can be exported in GraphML and then imported into tools like Gephi.
 
 There are a few things to keep mind when exporting a graph in GraphML:
 
-. The `/graph` handler can export both the nodes and edges in the graph. By default, it only exports the nodes. To export the edges you must set `trackTraversal="true"` in the `nodes` expression.
-. The `/graph` handler currently accepts an arbitrarily complex streaming expression which includes a `nodes` expression. If the streaming expression doesn't include a `nodes` expression, the `/graph` handler will not properly output GraphML.
-. The `/graph` handler currently accepts a single arbitrarily complex, nested `nodes` expression per request. This means you cannot send in a streaming expression that joins or intersects the node sets from multiple `nodes` expressions. The `/graph` handler does support any level of nesting within a single `nodes` expression. The `/stream` handler does support joining and intersecting node sets, but the `/graph` handler currently does not.
+. The `/graph` handler can export both the nodes and edges in the graph.
+By default, it only exports the nodes.
+To export the edges you must set `trackTraversal="true"` in the `nodes` expression.
+. The `/graph` handler currently accepts an arbitrarily complex streaming expression which includes a `nodes` expression.
+If the streaming expression doesn't include a `nodes` expression, the `/graph` handler will not properly output GraphML.
+. The `/graph` handler currently accepts a single arbitrarily complex, nested `nodes` expression per request.
+This means you cannot send in a streaming expression that joins or intersects the node sets from multiple `nodes` expressions.
+The `/graph` handler does support any level of nesting within a single `nodes` expression.
+The `/stream` handler does support joining and intersecting node sets, but the `/graph` handler currently does not.
 
 === Sample GraphML Request
 
diff --git a/solr/solr-ref-guide/src/graph.adoc b/solr/solr-ref-guide/src/graph.adoc
index 8d7294f..f91506f 100644
--- a/solr/solr-ref-guide/src/graph.adoc
+++ b/solr/solr-ref-guide/src/graph.adoc
@@ -17,24 +17,21 @@
 // under the License.
 
 
-This section of the user guide covers the syntax and theory behind *graph expressions*. Examples are presented for two key graph use cases: *bipartite graph recommenders* and *event correlation* with
-*temporal graph queries*.
+This section of the user guide covers the syntax and theory behind *graph expressions*.
+Examples are presented for two key graph use cases: *bipartite graph recommenders* and *event correlation* with *temporal graph queries*.
 
 == Graphs
 
 Log records and other data indexed in Solr have connections between them that can be seen as a distributed graph.
 Graph expressions provide a mechanism for identifying root nodes in the graph and walking their connections.
-The general goal of the graph walk is to materialize a specific *subgraph* and perform *link analysis* to understand
-the connections between nodes.
+The general goal of the graph walk is to materialize a specific *subgraph* and perform *link analysis* to understand the connections between nodes.
 
 In the next few sections below we'll review the graph theory behind Solr's graph expressions.
 
 === Subgraphs
 
-A subgraph is a smaller subset of the nodes and connections of the
-larger graph.
-Graph expressions allow you to flexibly define and materialize a subgraph from the larger graph
-stored in the distributed index.
+A subgraph is a smaller subset of the nodes and connections of the larger graph.
+Graph expressions allow you to flexibly define and materialize a subgraph from the larger graph stored in the distributed index.
 
 Subgraphs play two important roles:
 
@@ -46,19 +43,14 @@ The design of the subgraph defines the meaning of the link analysis.
 === Bipartite Subgraphs
 
 Graph expressions can be used to materialize *bipartite subgraphs*.
-A bipartite graph is a graph where the nodes are split into two
-distinct categories.
-The links between those two categories can then
-be analyzed to study how they relate.
-Bipartite graphs are often discussed
-in the context of collaborative filter recommender systems.
+A bipartite graph is a graph where the nodes are split into two distinct categories.
+The links between those two categories can then be analyzed to study how they relate.
+Bipartite graphs are often discussed in the context of collaborative filter recommender systems.
 
 A bipartite graph between *shopping baskets* and *products* is a useful example.
-Through link analysis between the shopping baskets and products
-we can determine which products are most often purchased within the same shopping baskets.
+Through link analysis between the shopping baskets and products we can determine which products are most often purchased within the same shopping baskets.
 
-In the example below there is a Solr collection called baskets
-with three fields:
+In the example below there is a Solr collection called baskets with three fields:
 
 *id*: Unique ID
 
@@ -70,10 +62,9 @@ Each record in the collection represents a product in a shopping basket.
 All products in the same basket share the same basket ID.
 
 Let's consider a simple example where we want to find a product
-that is often sold with *butter*. In order to do this we could create a
-*bipartite subgraph* of shopping baskets that contain *butter*.
-We won't include butter itself in the graph as it doesn't help with
-finding a complementary product for butter.
+that is often sold with *butter*.
+In order to do this we could create a *bipartite subgraph* of shopping baskets that contain *butter*.
+We won't include butter itself in the graph as it doesn't help with finding a complementary product for butter.
 
 Below is an example of this bipartite subgraph represented as a matrix:
 
@@ -127,8 +118,7 @@ The `trackTraversal` flag tells the nodes expression to track the links between
 The output of the nodes function is a *node set* that represents the subgraph specified by the nodes function.
 The node set contains a unique set of nodes that are gathered during the graph walk.
 The `node` property in the result is the value of the gathered node.
-In the shopping basket example the `product_s` field is in the node property
-because that was what was specified to be gathered in the nodes expression.
+In the shopping basket example the `product_s` field is in the node property because that was what was specified to be gathered in the nodes expression.
 
 The output of the shopping basket graph expression is as follows:
 [source,json]
@@ -174,23 +164,19 @@ The output of the shopping basket graph expression is as follows:
 }
 ----
 
-The `ancestors` property in the result contains a unique, alphabetically sorted set of all the *inbound links*
-to the node in the subgraph.
+The `ancestors` property in the result contains a unique, alphabetically sorted set of all the *inbound links* to the node in the subgraph.
 In this case it shows the baskets that are linked to each product.
 The ancestor links will only be tracked when the trackTraversal flag is turned on in the nodes expression.
 
 === Link Analysis and Degree Centrality
 
-Link analysis is often performed to determine *node centrality*. When analyzing for centrality the
-goal is to assign a weight to each node based on how connected it is in the subgraph.
+Link analysis is often performed to determine *node centrality*.
+When analyzing for centrality the goal is to assign a weight to each node based on how connected it is in the subgraph.
 There are different types of node centrality.
-Graph expressions very efficiently calculates
-*inbound degree centrality* (in-degree).
+Graph expressions very efficiently calculates *inbound degree centrality* (in-degree).
 
-Inbound degree centrality is calculated by counting the number of inbound
-links to each node.
-For simplicity this document will sometimes refer
-to inbound degree simply as degree.
+Inbound degree centrality is calculated by counting the number of inbound links to each node.
+For simplicity this document will sometimes refer to inbound degree simply as degree.
 
 Back to the shopping basket example:
 
@@ -204,8 +190,7 @@ eggs:   2
 milk:   2
 ----
 
-From the degree calculation we know that *eggs* and *milk* appear more frequently in shopping baskets with
-butter than *cheese* does.
+From the degree calculation we know that *eggs* and *milk* appear more frequently in shopping baskets with butter than *cheese* does.
 
 The nodes function can calculate degree centrality by adding the `count(*)` aggregation as shown below:
 
@@ -270,10 +255,8 @@ The output of this graph expression is as follows:
 
 The `count(+++*+++)` aggregation counts the "gathered" nodes, in this case the values in the `product_s` field.
 Notice that the `count(+++*+++)` result is the same as the number of ancestors.
-This will always be the case because the nodes function first deduplicates the edges before
-counting the gathered nodes.
-Because of this the `count(+++*+++)` aggregation always calculates the
-inbound degree centrality for the gathered nodes.
+This will always be the case because the nodes function first deduplicates the edges before counting the gathered nodes.
+Because of this the `count(+++*+++)` aggregation always calculates the inbound degree centrality for the gathered nodes.
 
 === Dot Product
 
@@ -288,20 +271,16 @@ This tells us that a nearest neighbor search, using a maximum inner product simi
 === Limiting Basket Out-Degree
 
 The recommendation can be made stronger by limiting the *out-degree* of the baskets.
-The out-degree is the
-number of outbound links of a node in a graph.
-In the shopping basket example the outbound links
-from the baskets link to products.
+The out-degree is the number of outbound links of a node in a graph.
+In the shopping basket example the outbound links from the baskets link to products.
 So limiting the out-degree will limit the size of the baskets.
 
-Why does limiting the size of the shopping baskets make a stronger recommendation? To answer this question it helps
-to think about each shopping basket as *voting* for products that go with *butter*. In an election with two candidates
-if you were to vote for both candidates the votes would cancel each other out and have no effect.
+Why does limiting the size of the shopping baskets make a stronger recommendation?
+To answer this question it helps to think about each shopping basket as *voting* for products that go with *butter*.
+In an election with two candidates if you were to vote for both candidates the votes would cancel each other out and have no effect.
 But if you vote for only one candidate your vote will affect the outcome.
-The same principle holds true
-for recommendations.
-As a basket votes for more products it dilutes the strength of its recommendation for any
-one product.
+The same principle holds true for recommendations.
+As a basket votes for more products it dilutes the strength of its recommendation for any one product.
 A basket with just butter and one other item more strongly recommends that item.
 
 The `maxDocFreq` parameter can be used to limit the graph "walk" to only include baskets that appear in the index a certain number of times.
@@ -327,18 +306,14 @@ nodes(baskets,
 === Node Scoring
 
 The degree of the node describes how many nodes in the subgraph link to it.
-But this does not tell us if the node is particularly central to this subgraph or if it is just a
-very frequent node in the entire graph.
-Nodes that appear frequently in the subgraph but
-infrequently in the entire graph can be considered more *relevant* to the subgraph.
+But this does not tell us if the node is particularly central to this subgraph or if it is just a very frequent node in the entire graph.
+Nodes that appear frequently in the subgraph but infrequently in the entire graph can be considered more *relevant* to the subgraph.
 
 The search index contains information about how frequently each node appears in the entire index.
-Using a technique similar to *tf-idf* document scoring, graph expressions can combine the
-degree of the node with its inverse document frequency in the index to determine a relevancy score.
+Using a technique similar to *tf-idf* document scoring, graph expressions can combine the degree of the node with its inverse document frequency in the index to determine a relevancy score.
 
 The `scoreNodes` function scores the nodes.
-Below is an example of the scoreNodes function applied to
-the shopping basket node set.
+Below is an example of the scoreNodes function applied to the shopping basket node set.
 
 [source,text]
 ----
@@ -352,13 +327,10 @@ scoreNodes(nodes(baskets,
 ----
 
 The output now includes a `nodeScore` property.
-In the output below notice how *eggs* has a higher
-nodeScore than *milk* even though they have the same `count(+++*+++)`. This is because milk appears more
-frequently in the entire index than eggs does.
-The `docFreq` property added by the `nodeScore` function
-shows the document frequency in the index.
-Because of the lower `docFreq` eggs is considered more relevant
-to this subgraph, and a better recommendation to be paired with butter.
+In the output below notice how *eggs* has a higher nodeScore than *milk* even though they have the same `count(+++*+++)`.
+This is because milk appears more frequently in the entire index than eggs does.
+The `docFreq` property added by the `nodeScore` function shows the document frequency in the index.
+Because of the lower `docFreq` eggs is considered more relevant to this subgraph, and a better recommendation to be paired with butter.
 
 [source,json]
 ----
@@ -418,10 +390,8 @@ to this subgraph, and a better recommendation to be paired with butter.
 == Temporal Graph Expressions
 
 The examples above lay the groundwork for temporal graph queries.
-Temporal graph queries allow the `nodes` function to walk the graph using *windows of time* to surface
-*cross-correlations* within the data.
-The nodes function currently supports graph walks using *ten second increments*
-which is useful for *event correlation* and *root cause analysis* in log analytics.
+Temporal graph queries allow the `nodes` function to walk the graph using *windows of time* to surface *cross-correlations* within the data.
+The nodes function currently supports graph walks using *ten second increments* which is useful for *event correlation* and *root cause analysis* in log analytics.
 
 In order to support temporal graph queries a ten second truncated timestamp in *ISO 8601* format must be added to the log records as a string field at indexing time.
 Here is a sample ten second truncated timestamp: `2021-02-10T20:51:30Z`.
@@ -432,13 +402,11 @@ So those using Solr to analyze Solr logs get temporal graph expressions for free
 
 === Root Events
 
-Once the ten second windows have been indexed with the log records we can devise a query that
-creates a set of *root events*. We can demonstrate this with an example using Solr log records.
+Once the ten second windows have been indexed with the log records we can devise a query that creates a set of *root events*.
+We can demonstrate this with an example using Solr log records.
 
-In this example we'll perform a Streaming Expression `facet` aggregation that finds the top 10, ten second windows
-with the highest average query time.
-These time windows can be used to represent *slow query events* in a temporal
-graph query.
+In this example we'll perform a Streaming Expression `facet` aggregation that finds the top 10, ten second windows with the highest average query time.
+These time windows can be used to represent *slow query events* in a temporal graph query.
 
 Here is the facet function:
 
@@ -494,12 +462,10 @@ Below is a snippet of the results with the 25 windows with the highest average q
 ----
 === Temporal Bipartite Subgraphs
 
-Once we've identified a set of root events it's easy to perform a graph query that creates a
-bipartite graph of the log events types that occurred within the same ten second windows.
+Once we've identified a set of root events it's easy to perform a graph query that creates a bipartite graph of the log events types that occurred within the same ten second windows.
 With Solr logs there is a field called `type_s` which is the type of log event.
 
-In order to see what log events happened in the same ten second window of our root events we can "walk" the
-ten second windows and gather the `type_s` field.
+In order to see what log events happened in the same ten second window of our root events we can "walk" the ten second windows and gather the `type_s` field.
 
 [source,text]
 ----
@@ -565,25 +531,18 @@ Below is the resulting node set:
 }
 ----
 
-In this result set the `node` field holds the type of log events that occurred within the
-same ten second windows as the root events.
-Notice that the event types include:
-query, admin, update and error.
-The `count(+++*+++)` shows the degree centrality of the different
-log event types.
+In this result set the `node` field holds the type of log events that occurred within the same ten second windows as the root events.
+Notice that the event types include: query, admin, update and error.
+The `count(+++*+++)` shows the degree centrality of the different log event types.
 
 Notice that there is only one *error* event within the same ten second windows of the slow query events.
 
 === Window Parameter
 
-For event correlation and root cause analysis it's not enough to find events that occur
-within the *same* ten second root event windows.
-What's needed is to find events that occur
-within a window of time *prior to each root event*. The `window` parameter allows you to
-specify this prior window of time as part of the query.
-The window parameter is an integer
-which specifies the number of ten second time windows, prior to each root event window,
-to include in the graph walk.
+For event correlation and root cause analysis it's not enough to find events that occur within the *same* ten second root event windows.
+What's needed is to find events that occur within a window of time *prior to each root event*.
+The `window` parameter allows you to specify this prior window of time as part of the query.
+The window parameter is an integer which specifies the number of ten second time windows, prior to each root event window, to include in the graph walk.
 
 [source,text]
 ----
@@ -652,42 +611,29 @@ Notice that there are *now 29 error* events within the 3 ten second windows prio
 
 === Degree as a Representation of Correlation
 
-By performing link analysis on the temporal bipartite graph we can calculate the
-degree of each event type that occurs in the specified time windows.
-We established in the bipartite graph recommender example the direct relationship between
-*inbound degree* and the *dot product*. In the field of digital signal processing the
-dot product is used to represent *correlation*.
-In our temporal graph queries we can then view the inbound degree as a
-representation of correlation between the root events and the events that
-occur within the specified time windows.
+By performing link analysis on the temporal bipartite graph we can calculate the degree of each event type that occurs in the specified time windows.
+We established in the bipartite graph recommender example the direct relationship between *inbound degree* and the *dot product*.
+In the field of digital signal processing the dot product is used to represent *correlation*.
+In our temporal graph queries we can then view the inbound degree as a representation of correlation between the root events and the events that occur within the specified time windows.
 
 === Lag Parameter
 
 Understanding the *lag* in the correlation is important for certain use cases.
 In a lagged correlation an event occurs and following a *delay* another event occurs.
-The window parameter doesn't capture the delay as we only know that an event
-occurred somewhere within a prior window.
-
-The `lag` parameter can be used to start calculating the window parameter a
-number of ten second windows in the past.
-For example we could walk the graph in 20 second
-windows starting from 30 seconds prior to a set of root events.
-By adjusting the lag and re-running the query we can determine which lagged
-window has the highest degree.
+The window parameter doesn't capture the delay as we only know that an event occurred somewhere within a prior window.
+
+The `lag` parameter can be used to start calculating the window parameter a number of ten second windows in the past.
+For example we could walk the graph in 20 second windows starting from 30 seconds prior to a set of root events.
+By adjusting the lag and re-running the query we can determine which lagged window has the highest degree.
 From this we can determine the delay.
 
 === Node Scoring and Temporal Anomaly Detection
 
-The concept of node scoring can be applied to temporal graph queries to find events that are
-both *correlated* with a set of root events and *anomalous* to the root events.
-The degree calculation establishes the correlation between events
-but it does not establish if the event is a very common occurrence in
-the entire graph or specific to the subgraph.
+The concept of node scoring can be applied to temporal graph queries to find events that are both *correlated* with a set of root events and *anomalous* to the root events.
+The degree calculation establishes the correlation between events but it does not establish if the event is a very common occurrence in the entire graph or specific to the subgraph.
 
-The `scoreNodes` functions can be applied to score the nodes based on the degree and the
-commonality of the node's term in the index.
-This will establish whether the event is anomalous to
-the root events.
+The `scoreNodes` functions can be applied to score the nodes based on the degree and the commonality of the node's term in the index.
+This will establish whether the event is anomalous to the root events.
 
 [source,text]
 ----
diff --git a/solr/solr-ref-guide/src/hadoop-authentication-plugin.adoc b/solr/solr-ref-guide/src/hadoop-authentication-plugin.adoc
index 532c99b..46c9fe9 100644
--- a/solr/solr-ref-guide/src/hadoop-authentication-plugin.adoc
+++ b/solr/solr-ref-guide/src/hadoop-authentication-plugin.adoc
@@ -149,7 +149,8 @@ The example below uses `ConfigurableInternodeAuthHadoopPlugin`, and hence you mu
 As a result, all internode communication will use the Kerberos mechanism, instead of PKI authentication.
 
 This configuration assumes that your servers are using the `solr` principal, and will be allowed to impersonate any other user with requests coming from any other host.
-For additional security, consider setting the host list to match your cluster nodes. The Hadoop https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/Superusers.html[proxy users documentation] contains more detail about available configuration options.
+For additional security, consider setting the host list to match your cluster nodes.
+The Hadoop https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/Superusers.html[proxy users documentation] contains more detail about available configuration options.
 
 [source,json]
 ----
diff --git a/solr/solr-ref-guide/src/highlighting.adoc b/solr/solr-ref-guide/src/highlighting.adoc
index 32dc185..950a56f 100644
--- a/solr/solr-ref-guide/src/highlighting.adoc
+++ b/solr/solr-ref-guide/src/highlighting.adoc
@@ -18,25 +18,35 @@
 
 Highlighting in Solr allows fragments of documents that match the user's query to be included with the query response.
 
-The fragments are included in a special section of the query response (the `highlighting` section), and the client uses the formatting clues also included to determine how to present the snippets to users. Fragments are a portion of a document field that contains matches from the query and are sometimes also referred to as "snippets" or "passages".
+The fragments are included in a special section of the query response (the `highlighting` section), and the client uses the formatting clues also included to determine how to present the snippets to users.
+Fragments are a portion of a document field that contains matches from the query and are sometimes also referred to as "snippets" or "passages".
 
-Highlighting is extremely configurable, perhaps more than any other part of Solr. There are many parameters each for fragment sizing, formatting, ordering, backup/alternate behavior, and more options that are hard to categorize. Nonetheless, highlighting is very simple to use.
+Highlighting is extremely configurable, perhaps more than any other part of Solr.
+There are many parameters each for fragment sizing, formatting, ordering, backup/alternate behavior, and more options that are hard to categorize.
+Nonetheless, highlighting is very simple to use.
 
 == Usage
 
 === Common Highlighter Parameters
-You only need to set the `hl` and often `hl.fl` parameters to get results. The following table documents these and some other supported parameters. Note that many highlighting parameters support per-field overrides, such as: `f._title_txt_.hl.snippets`
+You only need to set the `hl` and often `hl.fl` parameters to get results.
+The following table documents these and some other supported parameters.
+Note that many highlighting parameters support per-field overrides, such as: `f._title_txt_.hl.snippets`
 
 `hl`::
-Use this parameter to enable or disable highlighting. The default is `false`. If you want to use highlighting, you must set this to `true`.
+Use this parameter to enable or disable highlighting.
+The default is `false`.
+If you want to use highlighting, you must set this to `true`.
 
 `hl.method`::
-The highlighting implementation to use. Acceptable values are: `unified`, `original`, `fastVector`. The default is `original`.
+The highlighting implementation to use.
+Acceptable values are: `unified`, `original`, `fastVector`.
+The default is `original`.
 +
 See the <<Choosing a Highlighter>> section below for more details on the differences between the available highlighters.
 
 `hl.fl`::
-Specifies a list of fields to highlight, either comma- or space-delimited.  These must be "stored".
+Specifies a list of fields to highlight, either comma- or space-delimited.
+ These must be "stored".
 A wildcard of `\*` (asterisk) can be used to match field globs, such as `text_*` or even `\*` to highlight on all fields where highlighting is possible.
 When using `*`, consider adding `hl.requireFieldMatch=true`.
 +
@@ -55,42 +65,57 @@ When setting this, you might also need to set `hl.qparser`.
 The default is the value of the `q` parameter (already parsed).
 
 `hl.qparser`::
-The <<query-syntax-and-parsers.adoc#,query parser>> to use for the `hl.q` query.  It only applies when `hl.q` is set.
+The <<query-syntax-and-parsers.adoc#,query parser>> to use for the `hl.q` query.
+It only applies when `hl.q` is set.
 +
 The default is the value of the `defType` parameter which in turn defaults to `lucene`.
 
 `hl.requireFieldMatch`::
-By default, `false`, all query terms will be highlighted for each field to be highlighted (`hl.fl`) no matter what fields the parsed query refer to. If set to `true`, only query terms aligning with the field being highlighted will in turn be highlighted.
+By default, `false`, all query terms will be highlighted for each field to be highlighted (`hl.fl`) no matter what fields the parsed query refer to.
+If set to `true`, only query terms aligning with the field being highlighted will in turn be highlighted.
 +
-If the query references fields different from the field being highlighted and they have different text analysis, the query may not highlight query terms it should have and vice versa. The analysis used is that of the field being highlighted (`hl.fl`), not the query fields.
+If the query references fields different from the field being highlighted and they have different text analysis, the query may not highlight query terms it should have and vice versa.
+The analysis used is that of the field being highlighted (`hl.fl`), not the query fields.
 
 `hl.usePhraseHighlighter`::
-If set to `true`, the default, Solr will highlight phrase queries (and other advanced position-sensitive queries) accurately – as phrases. If `false`, the parts of the phrase will be highlighted everywhere instead of only when it forms the given phrase.
+If set to `true`, the default, Solr will highlight phrase queries (and other advanced position-sensitive queries) accurately – as phrases.
+If `false`, the parts of the phrase will be highlighted everywhere instead of only when it forms the given phrase.
 
 `hl.highlightMultiTerm`::
-If set to `true`, the default, Solr will highlight wildcard queries (and other `MultiTermQuery` subclasses). If `false`, they won't be highlighted at all.
+If set to `true`, the default, Solr will highlight wildcard queries (and other `MultiTermQuery` subclasses).
+If `false`, they won't be highlighted at all.
 
 `hl.snippets`::
-Specifies maximum number of highlighted snippets to generate per field. It is possible for any number of snippets from zero to this value to be generated. The default is `1`.
+Specifies maximum number of highlighted snippets to generate per field.
+It is possible for any number of snippets from zero to this value to be generated.
+The default is `1`.
 
 `hl.fragsize`::
-Specifies the approximate size, in characters, of fragments to consider for highlighting. The default is `100`. Using `0` indicates that no fragmenting should be considered and the whole field value should be used.
+Specifies the approximate size, in characters, of fragments to consider for highlighting.
+The default is `100`.
+Using `0` indicates that no fragmenting should be considered and the whole field value should be used.
 
 `hl.tag.pre`::
-(`hl.simple.pre` for the Original Highlighter) Specifies the “tag” to use before a highlighted term. This can be any string, but is most often an HTML or XML tag.
+(`hl.simple.pre` for the Original Highlighter) Specifies the “tag” to use before a highlighted term.
+This can be any string, but is most often an HTML or XML tag.
 +
 The default is `<em>`.
 
 `hl.tag.post`::
-(`hl.simple.post` for the Original Highlighter) Specifies the “tag” to use after a highlighted term. This can be any string, but is most often an HTML or XML tag.
+(`hl.simple.post` for the Original Highlighter) Specifies the “tag” to use after a highlighted term.
+This can be any string, but is most often an HTML or XML tag.
 +
 The default is `</em>`.
 
 `hl.encoder`::
-If blank, the default, then the stored text will be returned without any escaping/encoding performed by the highlighter. If set to `html` then special HTML/XML characters will be encoded (e.g., `&` becomes `\&amp;`). The pre/post snippet characters are never encoded.
+If blank, the default, then the stored text will be returned without any escaping/encoding performed by the highlighter.
+If set to `html` then special HTML/XML characters will be encoded (e.g., `&` becomes `\&amp;`).
+The pre/post snippet characters are never encoded.
 
 `hl.maxAnalyzedChars`::
-The character limit to look for highlights, after which no highlighting will be done. This is mostly only a performance concern for an _analysis_ based offset source since it's the slowest. See <<Schema Options and Performance Considerations>>.
+The character limit to look for highlights, after which no highlighting will be done.
+This is mostly only a performance concern for an _analysis_ based offset source since it's the slowest.
+See <<Schema Options and Performance Considerations>>.
 +
 The default is `51200` characters.
 
@@ -98,7 +123,8 @@ There are more parameters supported as well depending on the highlighter (via `h
 
 === Highlighting in the Query Response
 
-In the response to a query, Solr includes highlighting data in a section separate from the documents. It is up to a client to determine how to process this response and display the highlights to users.
+In the response to a query, Solr includes highlighting data in a section separate from the documents.
+It is up to a client to determine how to process this response and display the highlights to users.
 
 Using the example documents included with Solr, we can see how this might work:
 
@@ -135,15 +161,20 @@ we get a response such as this (truncated slightly for space):
 }
 ----
 
-Note the two sections `docs` and `highlighting`. The `docs` section contains the fields of the document requested with the `fl` parameter of the query (only "id", "name", "manu", and "cat").
+Note the two sections `docs` and `highlighting`.
+The `docs` section contains the fields of the document requested with the `fl` parameter of the query (only "id", "name", "manu", and "cat").
 
-The `highlighting` section includes the ID of each document, and the field that contains the highlighted portion. In this example, we used the `hl.fl` parameter to say we wanted query terms highlighted in the "manu" field. When there is a match to the query term in that field, it will be included for each document ID in the list.
+The `highlighting` section includes the ID of each document, and the field that contains the highlighted portion.
+In this example, we used the `hl.fl` parameter to say we wanted query terms highlighted in the "manu" field.
+When there is a match to the query term in that field, it will be included for each document ID in the list.
 
 == Choosing a Highlighter
 
-Solr provides a `HighlightComponent` (a <<requesthandlers-searchcomponents.adoc#defining-search-components,`SearchComponent`>>) and it's in the default list of components for search handlers. It offers a somewhat unified API over multiple actual highlighting implementations (or simply "highlighters") that do the business of highlighting.
+Solr provides a `HighlightComponent` (a <<requesthandlers-searchcomponents.adoc#defining-search-components,`SearchComponent`>>) and it's in the default list of components for search handlers.
+It offers a somewhat unified API over multiple actual highlighting implementations (or simply "highlighters") that do the business of highlighting.
 
-There are many parameters supported by more than one highlighter, and sometimes the implementation details and semantics will be a bit different, so don't expect identical results when switching highlighters. You should use the `hl.method` parameter to choose a highlighter but it's also possible to explicitly configure an implementation by class name in `solrconfig.xml`.
+There are many parameters supported by more than one highlighter, and sometimes the implementation details and semantics will be a bit different, so don't expect identical results when switching highlighters.
+You should use the `hl.method` parameter to choose a highlighter but it's also possible to explicitly configure an implementation by class name in `solrconfig.xml`.
 
 There are four highlighters available that can be chosen at runtime with the `hl.method` parameter, in order of general recommendation:
 
@@ -172,57 +203,74 @@ The "alternate" fallback options are more primitive.
 The Original Highlighter, sometimes called the "Standard Highlighter" or "Default Highlighter", is Lucene's original highlighter – a venerable option with a high degree of customization options.
 Its query accuracy is good enough for most needs, although it's not quite as good/perfect as the Unified Highlighter.
 +
-The Original Highlighter will normally analyze stored text on the fly in order to highlight. It will use full term vectors if available.
+The Original Highlighter will normally analyze stored text on the fly in order to highlight.
+It will use full term vectors if available.
 If the text isn't "stored" but is in doc values (`docValues="true"`), this highlighter can work with it.
 +
-Where this highlighter falls short is performance; it's often twice as slow as the Unified Highlighter. And despite being the most customizable, it doesn't have a BreakIterator based fragmenter (all the others do), which could pose a challenge for some languages.
+Where this highlighter falls short is performance; it's often twice as slow as the Unified Highlighter.
+And despite being the most customizable, it doesn't have a BreakIterator based fragmenter (all the others do), which could pose a challenge for some languages.
 
 
 <<The FastVector Highlighter,FastVector Highlighter>>:: (`hl.method=fastVector`)
 +
-The FastVector Highlighter _requires_ full term vector options (`termVectors`, `termPositions`, and `termOffsets`) on the field, and is optimized with that in mind. It is nearly as configurable as the Original Highlighter with some variability.
+The FastVector Highlighter _requires_ full term vector options (`termVectors`, `termPositions`, and `termOffsets`) on the field, and is optimized with that in mind.
+It is nearly as configurable as the Original Highlighter with some variability.
 +
 This highlighter notably supports multi-colored highlighting such that different query words can be denoted in the fragment with different marking, usually expressed as an HTML tag with a unique color.
 +
 This highlighter's query-representation is less advanced than the Original or Unified Highlighters: for example it will not work well with the `surround` parser, and there are multiple reported bugs pertaining to queries with stop-words.
 
-Both the FastVector and Original Highlighters can be used in conjunction in a search request to highlight some fields with one and some the other. In contrast, the Unified Highlighter can only be chosen exclusively.
+Both the FastVector and Original Highlighters can be used in conjunction in a search request to highlight some fields with one and some the other.
+In contrast, the Unified Highlighter can only be chosen exclusively.
 
 
-The Unified Highlighter is exclusively configured via search parameters. In contrast, some settings for the Original and FastVector Highlighters are set in `solrconfig.xml`. There's a robust example of the latter in the "```techproducts```" configset.
+The Unified Highlighter is exclusively configured via search parameters.
+In contrast, some settings for the Original and FastVector Highlighters are set in `solrconfig.xml`.
+There's a robust example of the latter in the "```techproducts```" configset.
 
 In addition to further information below, more information can be found in the {solr-javadocs}/core/org/apache/solr/highlight/package-summary.html[Solr javadocs].
 
 === Schema Options and Performance Considerations
 
-Fundamental to the internals of highlighting are detecting the _offsets_ of the individual words that match the query. Some of the highlighters can run the stored text through the analysis chain defined in the schema, some can look them up from _postings_, and some can look them up from _term vectors._ These choices have different trade-offs:
+Fundamental to the internals of highlighting are detecting the _offsets_ of the individual words that match the query.
+Some of the highlighters can run the stored text through the analysis chain defined in the schema, some can look them up from _postings_, and some can look them up from _term vectors._ These choices have different trade-offs:
 
-* *Analysis*: Supported by the Unified and Original Highlighters. If you don't go out of your way to configure the other options below, the highlighter will analyze the stored text on the fly (during highlighting) to calculate offsets.
+* *Analysis*: Supported by the Unified and Original Highlighters.
+If you don't go out of your way to configure the other options below, the highlighter will analyze the stored text on the fly (during highlighting) to calculate offsets.
 +
 The benefit of this approach is that your index won't grow larger with any extra data that isn't strictly necessary for highlighting.
 +
 The down side is that highlighting speed is roughly linear with the amount of text to process, with a large factor being the complexity of your analysis chain.
 +
-For "short" text, this is a good choice. Or maybe it's not short but you're prioritizing a smaller index and indexing speed over highlighting performance.
-* *Postings*: Supported by the Unified Highlighter. Set `storeOffsetsWithPositions` to `true`. This adds a moderate amount of extra data to the index but it speeds up highlighting tremendously, especially compared to analysis with longer text fields.
+For "short" text, this is a good choice.
+Or maybe it's not short but you're prioritizing a smaller index and indexing speed over highlighting performance.
+* *Postings*: Supported by the Unified Highlighter.
+Set `storeOffsetsWithPositions` to `true`.
+This adds a moderate amount of extra data to the index but it speeds up highlighting tremendously, especially compared to analysis with longer text fields.
 +
 However, wildcard queries will fall back to analysis unless "light" term vectors are added.
 
-** *with Term Vectors (light)*: Supported only by the Unified Highlighter. To enable this mode set `termVectors` to `true` but no other term vector related options on the field being highlighted.
+** *with Term Vectors (light)*: Supported only by the Unified Highlighter.
+To enable this mode set `termVectors` to `true` but no other term vector related options on the field being highlighted.
 +
-This adds even more data to the index than just `storeOffsetsWithPositions` but not as much as enabling all the extra term vector options. Term Vectors are only accessed by the highlighter when a wildcard query is used and will prevent a fall back to analysis of the stored text.
+This adds even more data to the index than just `storeOffsetsWithPositions` but not as much as enabling all the extra term vector options.
+Term Vectors are only accessed by the highlighter when a wildcard query is used and will prevent a fall back to analysis of the stored text.
 +
 This is definitely the fastest option for highlighting wildcard queries on large text fields.
-* *Term Vectors (full)*: Supported by the Unified, FastVector, and Original Highlighters. Set `termVectors`, `termPositions`, and `termOffsets` to `true`, and potentially `termPayloads` for advanced use cases.
+* *Term Vectors (full)*: Supported by the Unified, FastVector, and Original Highlighters.
+Set `termVectors`, `termPositions`, and `termOffsets` to `true`, and potentially `termPayloads` for advanced use cases.
 +
-This adds substantial weight to the index – similar in size to the compressed stored text. If you are using the Unified Highlighter then this is not a recommended configuration since it's slower and heavier than postings with light term vectors. However, this could make sense if full term vectors are already needed for another use-case.
+This adds substantial weight to the index – similar in size to the compressed stored text.
+If you are using the Unified Highlighter then this is not a recommended configuration since it's slower and heavier than postings with light term vectors.
+However, this could make sense if full term vectors are already needed for another use-case.
 
 == The Unified Highlighter
 
 The Unified Highlighter supports these following additional parameters to the ones listed earlier:
 
 `hl.offsetSource`::
-By default, the Unified Highlighter will usually pick the right offset source (see above). However it may be ambiguous such as during a migration from one offset source to another that hasn't completed.
+By default, the Unified Highlighter will usually pick the right offset source (see above).
+However it may be ambiguous such as during a migration from one offset source to another that hasn't completed.
 +
 The offset source can be explicitly configured to one of: `ANALYSIS`, `POSTINGS`, `POSTINGS_WITH_TERM_VECTORS`, or `TERM_VECTORS`.
 
@@ -240,19 +288,25 @@ When `false`, it's an optimal target -- the highlighter will _on average_ produc
 A `false` setting is slower, particularly when there's lots of text and `hl.bs.type=SENTENCE`.
 
 `hl.tag.ellipsis`::
-By default, each snippet is returned as a separate value (as is done with the other highlighters). Set this parameter to instead return one string with this text as the delimiter. _Note: this is likely to be removed in the future._
+By default, each snippet is returned as a separate value (as is done with the other highlighters).
+Set this parameter to instead return one string with this text as the delimiter.
+_Note: this is likely to be removed in the future._
 
 `hl.defaultSummary`::
-If `true`, use the leading portion of the text as a snippet if a proper highlighted snippet can't otherwise be generated. The default is `false`.
+If `true`, use the leading portion of the text as a snippet if a proper highlighted snippet can't otherwise be generated.
+The default is `false`.
 
 `hl.score.k1`::
-Specifies BM25 term frequency normalization parameter 'k1'. For example, it can be set to `0` to rank passages solely based on the number of query terms that match. The default is `1.2`.
+Specifies BM25 term frequency normalization parameter 'k1'. For example, it can be set to `0` to rank passages solely based on the number of query terms that match.
+The default is `1.2`.
 
 `hl.score.b`::
-Specifies BM25 length normalization parameter 'b'. For example, it can be set to "0" to ignore the length of passages entirely when ranking. The default is `0.75`.
+Specifies BM25 length normalization parameter 'b'. For example, it can be set to "0" to ignore the length of passages entirely when ranking.
+The default is `0.75`.
 
 `hl.score.pivot`::
-Specifies BM25 average passage length in characters. The default is `87`.
+Specifies BM25 average passage length in characters.
+The default is `87`.
 
 `hl.bs.language`::
 Specifies the breakiterator language for dividing the document into passages.
@@ -264,14 +318,18 @@ Specifies the breakiterator country for dividing the document into passages.
 Specifies the breakiterator variant for dividing the document into passages.
 
 `hl.bs.type`::
-Specifies the breakiterator type for dividing the document into passages. Can be `SEPARATOR`, `SENTENCE`, `WORD`*, `CHARACTER`, `LINE`, or `WHOLE`. `SEPARATOR` is special value that splits text on a user-provided character in `hl.bs.separator`.
+Specifies the breakiterator type for dividing the document into passages.
+Can be `SEPARATOR`, `SENTENCE`, `WORD`*, `CHARACTER`, `LINE`, or `WHOLE`.
+`SEPARATOR` is special value that splits text on a user-provided character in `hl.bs.separator`.
 +
 The default is `SENTENCE`.
 
 `hl.bs.separator`::
-Indicates which character to break the text on. Use only if you have defined `hl.bs.type=SEPARATOR`.
+Indicates which character to break the text on.
+Use only if you have defined `hl.bs.type=SEPARATOR`.
 +
-This is useful when the text has already been manipulated in advance to have a special delineation character at desired highlight passage boundaries. This character will still appear in the text as the last character of a passage.
+This is useful when the text has already been manipulated in advance to have a special delineation character at desired highlight passage boundaries.
+This character will still appear in the text as the last character of a passage.
 
 `hl.weightMatches`::
 Tells the UH to use Lucene's new "Weight Matches" API instead of doing SpanQuery conversion.
@@ -286,10 +344,13 @@ However if either `hl.usePhraseHighlighter` or `hl.multiTermQuery` are set to fa
 The Original Highlighter supports these following additional parameters to the ones listed earlier:
 
 `hl.mergeContiguous`::
-Instructs Solr to collapse contiguous fragments into a single fragment. A value of `true` indicates contiguous fragments will be collapsed into single fragment. The default value, `false`, is also the backward-compatible setting.
+Instructs Solr to collapse contiguous fragments into a single fragment.
+A value of `true` indicates contiguous fragments will be collapsed into single fragment.
+The default value, `false`, is also the backward-compatible setting.
 
 `hl.maxMultiValuedToExamine`::
-Specifies the maximum number of entries in a multi-valued field to examine before stopping. This can potentially return zero results if the limit is reached before any matches are found.
+Specifies the maximum number of entries in a multi-valued field to examine before stopping.
+This can potentially return zero results if the limit is reached before any matches are found.
 +
 If used with the `maxMultiValuedToMatch`, whichever limit is reached first will determine when to stop looking.
 +
@@ -306,54 +367,67 @@ The default is `Integer.MAX_VALUE`.
 Specifies a field to be used as a backup default summary if Solr cannot generate a snippet (i.e., because no terms match).
 
 `hl.maxAlternateFieldLength`::
-Specifies the maximum number of characters of the field to return. Any value less than or equal to `0` means the field's length is unlimited (the default behavior).
+Specifies the maximum number of characters of the field to return.
+Any value less than or equal to `0` means the field's length is unlimited (the default behavior).
 +
 This parameter is only used in conjunction with the `hl.alternateField` parameter.
 
 `hl.highlightAlternate`::
-If set to `true`, the default, and `hl.alternateFieldName` is active, Solr will show the entire alternate field, with highlighting of occurrences. If `hl.maxAlternateFieldLength=N` is used, Solr returns max `N` characters surrounding the best matching fragment.
+If set to `true`, the default, and `hl.alternateFieldName` is active, Solr will show the entire alternate field, with highlighting of occurrences.
+If `hl.maxAlternateFieldLength=N` is used, Solr returns max `N` characters surrounding the best matching fragment.
 +
 If set to `false`, or if there is no match in the alternate field either, the alternate field will be shown without highlighting.
 
 `hl.formatter`::
-Selects a formatter for the highlighted output. Currently the only legal value is `simple`, which surrounds a highlighted term with a customizable pre- and post-text snippet.
+Selects a formatter for the highlighted output.
+Currently the only legal value is `simple`, which surrounds a highlighted term with a customizable pre- and post-text snippet.
 
 `hl.simple.pre`, `hl.simple.post`::
-Specifies the text that should appear before (`hl.simple.pre`) and after (`hl.simple.post`) a highlighted term, when using the `simple` formatter. The default is `<em>` and `</em>`.
+Specifies the text that should appear before (`hl.simple.pre`) and after (`hl.simple.post`) a highlighted term, when using the `simple` formatter.
+The default is `<em>` and `</em>`.
 
 `hl.fragmenter`::
-Specifies a text snippet generator for highlighted text. The standard (default) fragmenter is `gap`, which creates fixed-sized fragments with gaps for multi-valued fields.
+Specifies a text snippet generator for highlighted text.
+The standard (default) fragmenter is `gap`, which creates fixed-sized fragments with gaps for multi-valued fields.
 +
 Another option is `regex`, which tries to create fragments that resemble a specified regular expression.
 
 `hl.regex.slop`::
 When using the regex fragmenter (`hl.fragmenter=regex`), this parameter defines the factor by which the fragmenter can stray from the ideal fragment size (given by `hl.fragsize`) to accommodate a regular expression.
 +
-For instance, a slop of `0.2` with `hl.fragsize=100` should yield fragments between 80 and 120 characters in length. It is usually good to provide a slightly smaller `hl.fragsize` value when using the regex fragmenter.
+For instance, a slop of `0.2` with `hl.fragsize=100` should yield fragments between 80 and 120 characters in length.
+It is usually good to provide a slightly smaller `hl.fragsize` value when using the regex fragmenter.
 +
 The default is `0.6`.
 
 `hl.regex.pattern`::
-Specifies the regular expression for fragmenting. This could be used to extract sentences.
+Specifies the regular expression for fragmenting.
+This could be used to extract sentences.
 
 `hl.regex.maxAnalyzedChars`::
-Instructs Solr to analyze only this many characters from a field when using the regex fragmenter (after which, the fragmenter produces fixed-sized fragments). The default is `10000`.
+Instructs Solr to analyze only this many characters from a field when using the regex fragmenter (after which, the fragmenter produces fixed-sized fragments).
+The default is `10000`.
 +
 Note, applying a complicated regex to a huge field is computationally expensive.
 
 `hl.preserveMulti`::
-If `true`, multi-valued fields will return all values in the order they were saved in the index. If `false`, the default, only values that match the highlight request will be returned.
+If `true`, multi-valued fields will return all values in the order they were saved in the index.
+If `false`, the default, only values that match the highlight request will be returned.
 
 `hl.payloads`::
 When `hl.usePhraseHighlighter` is `true` and the indexed field has payloads but not term vectors (generally quite rare), the index's payloads will be read into the highlighter's memory index along with the postings.
 +
 If this may happen and you know you don't need them for highlighting (i.e., your queries don't filter by payload) then you can save a little memory by setting this to false.
 
-The Original Highlighter has a plugin architecture that enables new functionality to be registered in `solrconfig.xml`. The "```techproducts```" configset shows most of these settings explicitly. You can use it as a guide to provide your own components to include a `SolrFormatter`, `SolrEncoder`, and `SolrFragmenter.`
+The Original Highlighter has a plugin architecture that enables new functionality to be registered in `solrconfig.xml`.
+The "```techproducts```" configset shows most of these settings explicitly.
+You can use it as a guide to provide your own components to include a `SolrFormatter`, `SolrEncoder`, and `SolrFragmenter.`
 
 == The FastVector Highlighter
 
-The FastVector Highlighter (FVH) can be used in conjunction with the Original Highlighter if not all fields should be highlighted with the FVH. In such a mode, set `hl.method=original` and `f.yourTermVecField.hl.method=fastVector` for all fields that should use the FVH. One annoyance to keep in mind is that the Original Highlighter uses `hl.simple.pre` whereas the FVH (and other highlighters) use `hl.tag.pre`.
+The FastVector Highlighter (FVH) can be used in conjunction with the Original Highlighter if not all fields should be highlighted with the FVH.
+In such a mode, set `hl.method=original` and `f.yourTermVecField.hl.method=fastVector` for all fields that should use the FVH.
+One annoyance to keep in mind is that the Original Highlighter uses `hl.simple.pre` whereas the FVH (and other highlighters) use `hl.tag.pre`.
 
 In addition to the initial listed parameters, the following parameters documented for the Original Highlighter above are also supported by the FVH:
 
@@ -364,14 +438,19 @@ In addition to the initial listed parameters, the following parameters documente
 And here are additional parameters supported by the FVH:
 
 `hl.fragListBuilder`::
-The snippet fragmenting algorithm. The `weighted` fragListBuilder uses IDF-weights to order fragments. This fragListBuilder is the default.
+The snippet fragmenting algorithm.
+The `weighted` fragListBuilder uses IDF-weights to order fragments.
+This fragListBuilder is the default.
 +
-Other options are `single`, which returns the entire field contents as one snippet, or `simple`. You can select a fragListBuilder with this parameter, or modify an existing implementation in `solrconfig.xml` to be the default by adding "default=true".
+Other options are `single`, which returns the entire field contents as one snippet, or `simple`.
+You can select a fragListBuilder with this parameter, or modify an existing implementation in `solrconfig.xml` to be the default by adding "default=true".
 
 `hl.fragmentsBuilder`::
 The fragments builder is responsible for formatting the fragments, which uses `<em>` and `</em>` markup by default (if `hl.tag.pre` and `hl.tag.post` are not defined).
 +
-Another pre-configured choice is `colored`, which is an example of how to use the fragments builder to insert HTML into the snippets for colored highlights if you choose. You can also implement your own if you'd like. You can select a fragments builder with this parameter, or modify an existing implementation in `solrconfig.xml` to be the default by adding "default=true".
+Another pre-configured choice is `colored`, which is an example of how to use the fragments builder to insert HTML into the snippets for colored highlights if you choose.
+You can also implement your own if you'd like.
+You can select a fragments builder with this parameter, or modify an existing implementation in `solrconfig.xml` to be the default by adding "default=true".
 
 `hl.boundaryScanner`::
 See <<Using Boundary Scanners with the FastVector Highlighter>> below.
@@ -380,20 +459,25 @@ See <<Using Boundary Scanners with the FastVector Highlighter>> below.
 See <<Using Boundary Scanners with the FastVector Highlighter>> below.
 
 `hl.phraseLimit`::
-The maximum number of phrases to analyze when searching for the highest-scoring phrase. The default is `5000`.
+The maximum number of phrases to analyze when searching for the highest-scoring phrase.
+The default is `5000`.
 
 `hl.multiValuedSeparatorChar`::
-Text to use to separate one value from the next for a multi-valued field. The default is " " (a space).
+Text to use to separate one value from the next for a multi-valued field.
+The default is " " (a space).
 
 === Using Boundary Scanners with the FastVector Highlighter
 
-The FastVector Highlighter will occasionally truncate highlighted words. To prevent this, implement a boundary scanner in `solrconfig.xml`, then use the `hl.boundaryScanner` parameter to specify the boundary scanner for highlighting.
+The FastVector Highlighter will occasionally truncate highlighted words.
+To prevent this, implement a boundary scanner in `solrconfig.xml`, then use the `hl.boundaryScanner` parameter to specify the boundary scanner for highlighting.
 
 Solr supports two boundary scanners: `breakIterator` and `simple`.
 
 ==== The breakIterator Boundary Scanner
 
-The `breakIterator` boundary scanner offers excellent performance right out of the box by taking locale and boundary type into account. In most cases you will want to use the `breakIterator` boundary scanner. To implement the `breakIterator` boundary scanner, add this code to the `highlighting` section of your `solrconfig.xml` file, adjusting the type, language, and country values as appropriate to your application:
+The `breakIterator` boundary scanner offers excellent performance right out of the box by taking locale and boundary type into account.
+In most cases you will want to use the `breakIterator` boundary scanner.
+To implement the `breakIterator` boundary scanner, add this code to the `highlighting` section of your `solrconfig.xml` file, adjusting the type, language, and country values as appropriate to your application:
 
 [source,xml]
 ----
@@ -410,7 +494,8 @@ Possible values for the `hl.bs.type` parameter are WORD, LINE, SENTENCE, and CHA
 
 ==== The simple Boundary Scanner
 
-The `simple` boundary scanner scans term boundaries for a specified maximum character value (`hl.bs.maxScan`) and for common delimiters such as punctuation marks (`hl.bs.chars`). To implement the `simple` boundary scanner, add this code to the `highlighting` section of your `solrconfig.xml` file, adjusting the values as appropriate to your application:
+The `simple` boundary scanner scans term boundaries for a specified maximum character value (`hl.bs.maxScan`) and for common delimiters such as punctuation marks (`hl.bs.chars`).
+To implement the `simple` boundary scanner, add this code to the `highlighting` section of your `solrconfig.xml` file, adjusting the values as appropriate to your application:
 
 [source,xml]
 ----
diff --git a/solr/solr-ref-guide/src/implicit-requesthandlers.adoc b/solr/solr-ref-guide/src/implicit-requesthandlers.adoc
index 0c5c9d0..f53ef8c 100644
--- a/solr/solr-ref-guide/src/implicit-requesthandlers.adoc
+++ b/solr/solr-ref-guide/src/implicit-requesthandlers.adoc
@@ -29,7 +29,8 @@ NOTE: All endpoint paths listed below should be placed after Solr's host and por
 Many of these handlers are used throughout the Admin UI to show information about Solr.
 
 [horizontal]
-File:: Return content of files in `${solr.home}/conf/`. This handler must have a collection name in the path to the endpoint.
+File:: Return content of files in `${solr.home}/conf/`.
+This handler must have a collection name in the path to the endpoint.
 +
 [cols="3*.",frame=none,grid=cols,options="header"]
 |===
@@ -150,7 +151,8 @@ Document Analysis:: Return a breakdown of the analysis process of the given docu
 |`solr/<collection>/analysis/document` |{solr-javadocs}/core/org/apache/solr/handler/DocumentAnalysisRequestHandler.html[DocumentAnalysisRequestHandler] |`_ANALYSIS_DOCUMENT`
 |===
 
-Field Analysis:: Return index- and query-time analysis over the given field(s)/field type(s). This handler drives the <<analysis-screen.adoc#,Analysis screen>> in Solr's Admin UI.
+Field Analysis:: Return index- and query-time analysis over the given field(s)/field type(s).
+This handler drives the <<analysis-screen.adoc#,Analysis screen>> in Solr's Admin UI.
 +
 [cols="3*.",frame=none,grid=cols,options="header"]
 |===
diff --git a/solr/solr-ref-guide/src/index-location-format.adoc b/solr/solr-ref-guide/src/index-location-format.adoc
index 4370356..7e0d6cd 100644
--- a/solr/solr-ref-guide/src/index-location-format.adoc
+++ b/solr/solr-ref-guide/src/index-location-format.adoc
@@ -20,7 +20,10 @@ Where and how Solr stores its indexes are configurable options.
 
 == Specifying a Location for Index Data with the dataDir Parameter
 
-By default, Solr stores its index data in a directory called `/data` under the core's instance directory (`instanceDir`). If you would like to specify a different directory for storing index data, you can configure the `dataDir` in the `core.properties` file for the core, or use the `<dataDir>` parameter in the `solrconfig.xml` file. You can specify another directory either with an absolute path or a pathname relative to the instanceDir of the SolrCore. For example:
+By default, Solr stores its index data in a directory called `/data` under the core's instance directory (`instanceDir`).
+If you would like to specify a different directory for storing index data, you can configure the `dataDir` in the `core.properties` file for the core, or use the `<dataDir>` parameter in the `solrconfig.xml` file.
+You can specify another directory either with an absolute path or a pathname relative to the instanceDir of the SolrCore.
+For example:
 
 [source,xml]
 ----
@@ -36,7 +39,8 @@ element `<solrDataHome>` then the location of data directory will be `<SOLR_DATA
 
 == Specifying the DirectoryFactory For Your Index
 
-The default {solr-javadocs}/core/org/apache/solr/core/NRTCachingDirectoryFactory.html[`solr.NRTCachingDirectoryFactory`] is filesystem based, and tries to pick the best implementation for the current JVM and platform. You can force a particular implementation and/or configuration options by specifying {solr-javadocs}/core/org/apache/solr/core/MMapDirectoryFactory.html[`solr.MMapDirectoryFactory`] or {solr-javadocs}/core/org/apache/solr/core/NIOFSDirectoryFactory.html[`solr.NIOFSDirectory [...]
+The default {solr-javadocs}/core/org/apache/solr/core/NRTCachingDirectoryFactory.html[`solr.NRTCachingDirectoryFactory`] is filesystem based, and tries to pick the best implementation for the current JVM and platform.
+You can force a particular implementation and/or configuration options by specifying {solr-javadocs}/core/org/apache/solr/core/MMapDirectoryFactory.html[`solr.MMapDirectoryFactory`] or {solr-javadocs}/core/org/apache/solr/core/NIOFSDirectoryFactory.html[`solr.NIOFSDirectoryFactory`].
 
 [source,xml]
 ----
@@ -46,7 +50,8 @@ The default {solr-javadocs}/core/org/apache/solr/core/NRTCachingDirectoryFactory
 </directoryFactory>
 ----
 
-The {solr-javadocs}/core/org/apache/solr/core/RAMDirectoryFactory.html[`solr.RAMDirectoryFactory`] is memory based, not persistent, and does not work with replication. Use this DirectoryFactory to store your index in RAM.
+The {solr-javadocs}/core/org/apache/solr/core/RAMDirectoryFactory.html[`solr.RAMDirectoryFactory`] is memory based, not persistent, and does not work with replication.
+Use this DirectoryFactory to store your index in RAM.
 
 [source,xml]
 ----
@@ -55,5 +60,6 @@ The {solr-javadocs}/core/org/apache/solr/core/RAMDirectoryFactory.html[`solr.RAM
 
 [NOTE]
 ====
-If you are using Hadoop and would like to store your indexes in HDFS, you should use the {solr-javadocs}/core/org/apache/solr/core/HdfsDirectoryFactory.html[`solr.HdfsDirectoryFactory`] instead of either of the above implementations. For more details, see the section <<solr-on-hdfs.adoc#,Solr on HDFS>>.
+If you are using Hadoop and would like to store your indexes in HDFS, you should use the {solr-javadocs}/core/org/apache/solr/core/HdfsDirectoryFactory.html[`solr.HdfsDirectoryFactory`] instead of either of the above implementations.
+For more details, see the section <<solr-on-hdfs.adoc#,Solr on HDFS>>.
 ====
diff --git a/solr/solr-ref-guide/src/index-segments-merging.adoc b/solr/solr-ref-guide/src/index-segments-merging.adoc
index 7ecb000..98a9adb 100644
--- a/solr/solr-ref-guide/src/index-segments-merging.adoc
+++ b/solr/solr-ref-guide/src/index-segments-merging.adoc
@@ -147,7 +147,7 @@ There are two parameters that can can be adjusted when using the default TieredM
 A value of `0.0` will make expungeDeletes behave essentially identically to `optimize`.
 
 `deletesPctAllowed`::
-(default `33.0`). During normal segment merging, a best effort is made to insure that the total percentage of deleted documents in the index is below this threshold.
+(default `33.0`) During normal segment merging, a best effort is made to insure that the total percentage of deleted documents in the index is below this threshold.
 Valid settings are between 20% and 50%.
 33% was chosen as the default because as this setting approaches 20%, considerable load is added to the system.
 
@@ -183,7 +183,8 @@ If a merge is necessary yet we already have this many threads running, the index
 Note that Solr will only run the smallest `maxThreadCount` merges at a time.
 
 `maxThreadCount`::
-The maximum number of simultaneous merge threads that should be running at once. This must be less than `maxMergeCount`.
+The maximum number of simultaneous merge threads that should be running at once.
+This must be less than `maxMergeCount`.
 
 `ioThrottle`::
 A Boolean value (`true` or `false`) to explicitly control I/O throttling.
@@ -293,7 +294,8 @@ The default is `1000`, expressed in milliseconds.
 
 == Other Indexing Settings
 
-There are a few other parameters that may be important to configure for your implementation. These settings affect how or when updates are made to an index.
+There are a few other parameters that may be important to configure for your implementation.
+These settings affect how or when updates are made to an index.
 
 === deletionPolicy
 
diff --git a/solr/solr-ref-guide/src/index.adoc b/solr/solr-ref-guide/src/index.adoc
index 2805c84..010cdda 100644
--- a/solr/solr-ref-guide/src/index.adoc
+++ b/solr/solr-ref-guide/src/index.adoc
@@ -33,7 +33,8 @@
 [.lead-homepage]
 Welcome to Apache Solr(TM), the open source solution for search and analytics.
 
-Solr is the fast open source search platform built on Apache Lucene(TM) that provides scalable indexing and search, as well as faceting, hit highlighting and advanced analysis/tokenization capabilities. Solr and Lucene are managed by the http://www.apache.org/[Apache Software Foundation].
+Solr is the fast open source search platform built on Apache Lucene(TM) that provides scalable indexing and search, as well as faceting, hit highlighting and advanced analysis/tokenization capabilities.
+Solr and Lucene are managed by the http://www.apache.org/[Apache Software Foundation].
 
 This Reference Guide is the official Solr documentation, written and published by Lucene/Solr committers.
 ****
diff --git a/solr/solr-ref-guide/src/indexing-data-operations.adoc b/solr/solr-ref-guide/src/indexing-data-operations.adoc
index 6245d11..e33fe91 100644
--- a/solr/solr-ref-guide/src/indexing-data-operations.adoc
+++ b/solr/solr-ref-guide/src/indexing-data-operations.adoc
@@ -27,7 +27,8 @@
 // specific language governing permissions and limitations
 // under the License.
 
-This section describes how Solr adds data to its index. It covers the following topics:
+This section describes how Solr adds data to its index.
+It covers the following topics:
 
 ****
 // This tags the below list so it can be used in the parent page section list
@@ -52,4 +53,5 @@ This section describes how Solr adds data to its index. It covers the following
 
 == Indexing Using Client APIs
 
-Using client APIs, such as <<solrj.adoc#,SolrJ>>, from your applications is an important option for updating Solr indexes. See the <<client-apis.adoc#,Client APIs>> section for more information.
+Using client APIs, such as <<solrj.adoc#,SolrJ>>, from your applications is an important option for updating Solr indexes.
+See the <<client-apis.adoc#,Client APIs>> section for more information.
diff --git a/solr/solr-ref-guide/src/indexing-nested-documents.adoc b/solr/solr-ref-guide/src/indexing-nested-documents.adoc
index 21f0293..3003018 100644
--- a/solr/solr-ref-guide/src/indexing-nested-documents.adoc
+++ b/solr/solr-ref-guide/src/indexing-nested-documents.adoc
@@ -423,17 +423,13 @@ include::{example-source-dir}IndexingNestedDocuments.java[tag=anon-kids]
 --
 
 
-This simplified approach was common in older versions of Solr, and can still be used with "Root-Only" schemas that do not contain any other nested related fields apart from `\_root_`.  (Many schemas in existence are this way simply because default configsets are this way, even if the application isn't using nested documents.)
+This simplified approach was common in older versions of Solr, and can still be used with "Root-Only" schemas that do not contain any other nested related fields apart from `\_root_`.
+Many schemas in existence are this way simply because default configsets are this way, even if the application isn't using nested documents.
 
 This approach should *NOT* be used when schemas include a `\_nest_path_` field, as the existence of that field triggers assumptions and changes in behavior in various query time functionality, such as the <<searching-nested-documents.adoc#child-doc-transformer,[child]>>, that will not work when nested documents do not have any intrinsic "nested path" information.
 
-The results of indexing anonymous nested children with a "Root-Only" schema are similar to what
-happens if you attempt to index "pseudo field" nested documents using a "Root-Only" schema.
-Notably: since there is no nested path information for the
-<<searching-nested-documents.adoc#child-doc-transformer,[child]>> transformer to use to reconstruct the structure of a nest
-of documents, it returns all matching children as a flat list, similar in structure to how they were originally indexed:
-
-
+The results of indexing anonymous nested children with a "Root-Only" schema are similar to what happens if you attempt to index "pseudo field" nested documents using a "Root-Only" schema.
+Notably: since there is no nested path information for the <<searching-nested-documents.adoc#child-doc-transformer,[child]>> transformer to use to reconstruct the structure of a nest of documents, it returns all matching children as a flat list, similar in structure to how they were originally indexed:
 
 [.dynamic-tabs]
 --
diff --git a/solr/solr-ref-guide/src/indexing-with-tika.adoc b/solr/solr-ref-guide/src/indexing-with-tika.adoc
index e3c9723..0086acb 100644
--- a/solr/solr-ref-guide/src/indexing-with-tika.adoc
+++ b/solr/solr-ref-guide/src/indexing-with-tika.adoc
@@ -21,9 +21,11 @@ If the documents you need to index are in a binary format, such as Word, Excel,
 Solr uses code from the Tika project to provide a framework for incorporating many different file-format parsers such as http://pdfbox.apache.org/[Apache PDFBox] and http://poi.apache.org/index.html[Apache POI] into Solr itself.
 
 Working with this framework, Solr's `ExtractingRequestHandler` uses Tika internally to support uploading binary files
-for data extraction and indexing. Downloading Tika is not required to use Solr Cell.
+for data extraction and indexing.
+Downloading Tika is not required to use Solr Cell.
 
-When this framework was under development, it was called the Solr _Content Extraction Library_, or _CEL_; from that abbreviation came this framework's name: Solr Cell. The names Solr Cell and `ExtractingRequestHandler` are used
+When this framework was under development, it was called the Solr _Content Extraction Library_, or _CEL_; from that abbreviation came this framework's name: Solr Cell.
+The names Solr Cell and `ExtractingRequestHandler` are used
 interchangeably for this feature.
 
 == Key Solr Cell Concepts
@@ -37,7 +39,8 @@ See http://tika.apache.org/{ivy-tika-version}/formats.html for the file types su
 Solr responds to Tika's SAX events to create one or more text fields from the content.
 Tika exposes document metadata as well (apart from the XHTML).
 * Tika produces metadata such as Title, Subject, and Author according to specifications such as the DublinCore.
-The metadata available is highly dependent on the file types and what they in turn contain. Some of the general metadata created is described in the section <<Metadata Created by Tika>> below.
+The metadata available is highly dependent on the file types and what they in turn contain.
+Some of the general metadata created is described in the section <<Metadata Created by Tika>> below.
 Solr Cell supplies some metadata of its own too.
 * Solr Cell concatenates text from the internal XHTML into a `content` field.
 You can configure which elements should be included/ignored, and which should map to another field.
@@ -93,18 +96,22 @@ Once Solr is started, you can use curl to send a sample PDF included with Solr v
 curl 'http://localhost:8983/solr/gettingstarted/update/extract?literal.id=doc1&commit=true' -F "myfile=@example/exampledocs/solr-word.pdf"
 ----
 
-The URL above calls the `ExtractingRequestHandler`, uploads the file `solr-word.pdf`, and assigns it the unique ID `doc1`. Here's a closer look at the components of this command:
+The URL above calls the `ExtractingRequestHandler`, uploads the file `solr-word.pdf`, and assigns it the unique ID `doc1`.
+Here's a closer look at the components of this command:
 
 * The `literal.id=doc1` parameter provides a unique ID for the document being indexed.
 Without this, the ID would be set to the absolute path to the file.
 +
 There are alternatives to this, such as mapping a metadata field to the ID, generating a new UUID, or generating an ID from a signature (hash) of the content.
 
-* The `commit=true parameter` causes Solr to perform a commit after indexing the document, making it immediately searchable. For optimum performance when loading many documents, don't call the commit command until you are done.
+* The `commit=true parameter` causes Solr to perform a commit after indexing the document, making it immediately searchable.
+For optimum performance when loading many documents, don't call the commit command until you are done.
 
-* The `-F` flag instructs curl to POST data using the Content-Type `multipart/form-data` and supports the uploading of binary files. The `@` symbol instructs curl to upload the attached file.
+* The `-F` flag instructs curl to POST data using the Content-Type `multipart/form-data` and supports the uploading of binary files.
+The `@` symbol instructs curl to upload the attached file.
 
-* The argument `myfile=@example/exampledocs/solr-word.pdf` uploads the sample file. Note this includes the path, so if you upload a different file, always be sure to include either the relative or absolute path to the file.
+* The argument `myfile=@example/exampledocs/solr-word.pdf` uploads the sample file.
+Note this includes the path, so if you upload a different file, always be sure to include either the relative or absolute path to the file.
 
 You can also use `bin/post` to do the same thing:
 
@@ -113,7 +120,8 @@ You can also use `bin/post` to do the same thing:
 bin/post -c gettingstarted example/exampledocs/solr-word.pdf -params "literal.id=doc1"
 ----
 
-Now you can execute a query and find that document with a request like `\http://localhost:8983/solr/gettingstarted/select?q=pdf`. The document will look something like this:
+Now you can execute a query and find that document with a request like `\http://localhost:8983/solr/gettingstarted/select?q=pdf`.
+The document will look something like this:
 
 image::images/indexing-with-tika/sample-pdf-query.png[float="right",width=50%,pdfwidth=60%]
 
@@ -153,7 +161,10 @@ These parameters can be set for each indexing request (as request parameters), o
 the request handler generally by defining them in `solrconfig.xml`, as described in <<Configuring the ExtractingRequestHandler in solrconfig.xml>>.
 
 `capture`::
-Captures XHTML elements with the specified name for a supplementary addition to the Solr document. This parameter can be useful for copying chunks of the XHTML into a separate field. For instance, it could be used to grab paragraphs (`<p>`) and index them into a separate field. Note that content is still also captured into the `content` field.
+Captures XHTML elements with the specified name for a supplementary addition to the Solr document.
+This parameter can be useful for copying chunks of the XHTML into a separate field.
+For instance, it could be used to grab paragraphs (`<p>`) and index them into a separate field.
+Note that content is still also captured into the `content` field.
 +
 Example: `capture=p` (in a request) or `<str name="capture">p</str>` (in `solrconfig.xml`)
 +
@@ -162,7 +173,8 @@ Output: `"p": {"This is a paragraph from my document."}`
 This parameter can also be used with the `fmap._source_field_` parameter to map content from attributes to a new field.
 
 `captureAttr`::
-Indexes attributes of the Tika XHTML elements into separate fields, named after the element. If set to `true`, when extracting from HTML, Tika can return the href attributes in `<a>` tags as fields named "`a`".
+Indexes attributes of the Tika XHTML elements into separate fields, named after the element.
+If set to `true`, when extracting from HTML, Tika can return the href attributes in `<a>` tags as fields named "`a`".
 +
 Example: `captureAttr=true`
 +
@@ -179,12 +191,17 @@ A default field to use if the `uprefix` parameter is not specified and a field c
 Example: `defaultField=\_text_`
 
 `extractOnly`::
-Default is `false`. If `true`, returns the extracted content from Tika without indexing the document. This returns the extracted XHTML as a string in the response. When viewing on a screen, it may be useful to set the `extractFormat` parameter for a response format other than XML to aid in viewing the embedded XHTML tags.
+Default is `false`.
+If `true`, returns the extracted content from Tika without indexing the document.
+This returns the extracted XHTML as a string in the response.
+When viewing on a screen, it may be useful to set the `extractFormat` parameter for a response format other than XML to aid in viewing the embedded XHTML tags.
 +
 Example: `extractOnly=true`
 
 `extractFormat`::
-The default is `xml`, but the other option is `text`. Controls the serialization format of the extract content. The `xml` format is actually XHTML, the same format that results from passing the `-x` command to the Tika command line application, while the text format is like that produced by Tika's `-t` command.
+The default is `xml`, but the other option is `text`.
+Controls the serialization format of the extract content.
+The `xml` format is actually XHTML, the same format that results from passing the `-x` command to the Tika command line application, while the text format is like that produced by Tika's `-t` command.
 +
 This parameter is valid only if `extractOnly` is set to true.
 +
@@ -193,17 +210,20 @@ Example: `extractFormat=text`
 Output: For an example output (in XML), see https://cwiki.apache.org/confluence/display/solr/TikaExtractOnlyExampleOutput
 
 `fmap._source_field_`::
-Maps (moves) one field name to another. The `source_field` must be a field in incoming documents, and the value is the Solr field to map to.
+Maps (moves) one field name to another.
+The `source_field` must be a field in incoming documents, and the value is the Solr field to map to.
 +
 Example: `fmap.content=text` causes the data in the `content` field generated by Tika to be moved to the Solr's `text` field.
 
 `ignoreTikaException`::
-If `true`, exceptions found during processing will be skipped. Any metadata available, however, will be indexed.
+If `true`, exceptions found during processing will be skipped.
+Any metadata available, however, will be indexed.
 +
 Example: `ignoreTikaException=true`
 
 `literal._fieldname_`::
-Populates a field with the name supplied with the specified value for each document. The data can be multivalued if the field is multivalued.
+Populates a field with the name supplied with the specified value for each document.
+The data can be multivalued if the field is multivalued.
 +
 Example: `literal.doc_status=published`
 +
@@ -213,7 +233,8 @@ Output: `"doc_status": "published"`
 If `true` (the default), literal field values will override other values with the same field name.
 +
 If `false`, literal values defined with `literal._fieldname_` will be appended to data already in the fields extracted
-from Tika. When setting `literalsOverride` to `false`, the field must be multivalued.
+from Tika.
+When setting `literalsOverride` to `false`, the field must be multivalued.
 +
 Example: `literalsOverride=false`
 
@@ -225,43 +246,51 @@ Example: `lowernames=true`
 Output: Assuming input of "Content-Type", the result in documents would be a field `content_type`
 
 `multipartUploadLimitInKB`::
-Defines the size in kilobytes of documents to allow. The default is `2048` (2Mb).
+Defines the size in kilobytes of documents to allow.
+The default is `2048` (2Mb).
 If you have very large documents, you should increase this or they will be rejected.
 +
 Example: `multipartUploadLimitInKB=2048000`
 
 `parseContext.config`::
 If a Tika parser being used allows parameters, you can pass them to Tika by creating a parser configuration file and
-pointing Solr to it. See the section <<Parser-Specific Properties>> for more information about how to use this parameter.
+pointing Solr to it.
+See the section <<Parser-Specific Properties>> for more information about how to use this parameter.
 +
 Example: `parseContext.config=pdf-config.xml`
 
 `passwordsFile`::
-Defines a file path and name for a file of file name to password mappings. See the section
+Defines a file path and name for a file of file name to password mappings.
+See the section
 <<Indexing Encrypted Documents>> for more information about using a password file.
 +
 Example: `passwordsFile=/path/to/passwords.txt`
 
 `resource.name`::
-Specifies the name of the file to index. This is optional, but Tika can use it as a hint for detecting a file's MIME type.
+Specifies the name of the file to index.
+This is optional, but Tika can use it as a hint for detecting a file's MIME type.
 +
 Example: `resource.name=mydoc.doc`
 
 `resource.password`::
-Defines a password to use for a password-protected PDF or OOXML file. See the section <<Indexing Encrypted Documents>>
+Defines a password to use for a password-protected PDF or OOXML file.
+See the section <<Indexing Encrypted Documents>>
 for more information about using this parameter.
 +
 Example: `resource.password=secret`
 
 `tika.config`::
-Defines a file path and name to a custom Tika configuration file. This is only required if you have customized your Tika implementation.
+Defines a file path and name to a custom Tika configuration file.
+This is only required if you have customized your Tika implementation.
 +
 Example: `tika.config=/path/to/tika.config`
 
 `uprefix`::
-Prefixes all fields _that are undefined in the schema_ with the given prefix. This is very useful when combined with dynamic field definitions.
+Prefixes all fields _that are undefined in the schema_ with the given prefix.
+This is very useful when combined with dynamic field definitions.
 +
-Example: `uprefix=ignored_` would add `ignored_` as a prefix to all unknown fields. In this case, you could additionally define a rule in the Schema to not index these fields:
+Example: `uprefix=ignored_` would add `ignored_` as a prefix to all unknown fields.
+In this case, you could additionally define a rule in the Schema to not index these fields:
 +
 `<dynamicField name="ignored_*" type="ignored" />`
 
@@ -284,7 +313,8 @@ You will need to configure your `solrconfig.xml` to find the `ExtractingRequestH
   <lib dir="${solr.install.dir:../../..}/dist/" regex="solr-cell-\d.*\.jar" />
 ----
 
-You can then configure the `ExtractingRequestHandler` in `solrconfig.xml`. The following is the default
+You can then configure the `ExtractingRequestHandler` in `solrconfig.xml`.
+The following is the default
 configuration found in Solr's `_default` configset, which you can modify as needed:
 
 [source,xml]
@@ -309,7 +339,8 @@ that parse numbers and dates and do other manipulations on the metadata fields g
 In Solr's default configsets, <<schemaless-mode.adoc#,"schemaless">> (aka data driven, or field guessing) mode is enabled, which does a variety of such processing already.
 
 If you instead explicitly define the fields for your schema, you can selectively specify the desired URPs.
-An easy way to specify this is to configure the parameter `processor` (under `defaults`) to `uuid,remove-blank,field-name-mutating,parse-boolean,parse-long,parse-double,parse-date`. For example:
+An easy way to specify this is to configure the parameter `processor` (under `defaults`) to `uuid,remove-blank,field-name-mutating,parse-boolean,parse-long,parse-double,parse-date`.
+For example:
 
 [source,xml]
 ----
@@ -324,7 +355,9 @@ An easy way to specify this is to configure the parameter `processor` (under `de
 </requestHandler>
 ----
 
-The above suggested list was taken from the list of URPs that run as a part of schemaless mode and provide much of its functionality. However, one major part of the schemaless functionality is missing from the suggested list, `add-unknown-fields-to-the-schema`, which is the part that adds fields to the schema. So you can use the other URPs without worrying about unexpected field additions.
+The above suggested list was taken from the list of URPs that run as a part of schemaless mode and provide much of its functionality.
+However, one major part of the schemaless functionality is missing from the suggested list, `add-unknown-fields-to-the-schema`, which is the part that adds fields to the schema.
+So you can use the other URPs without worrying about unexpected field additions.
 ====
 
 === Parser-Specific Properties
@@ -332,7 +365,8 @@ The above suggested list was taken from the list of URPs that run as a part of s
 Parsers used by Tika may have specific properties to govern how data is extracted.
 These can be passed through Solr for special parsing situations.
 
-For instance, when using the Tika library from a Java program, the `PDFParserConfig` class has a method `setSortByPosition(boolean)` that can extract vertically oriented text. To access that method via configuration with the `ExtractingRequestHandler`, one can add the `parseContext.config` property to `solrconfig.xml` and then set properties in Tika's `PDFParserConfig` as in the example below.
+For instance, when using the Tika library from a Java program, the `PDFParserConfig` class has a method `setSortByPosition(boolean)` that can extract vertically oriented text.
+To access that method via configuration with the `ExtractingRequestHandler`, one can add the `parseContext.config` property to `solrconfig.xml` and then set properties in Tika's `PDFParserConfig` as in the example below.
 
 [source,xml]
 ----
@@ -351,7 +385,9 @@ Consult the Tika Java API documentation for configuration parameters that can be
 
 The ExtractingRequestHandler will decrypt encrypted files and index their content if you supply a password in either `resource.password` on the request, or in a `passwordsFile` file.
 
-In the case of `passwordsFile`, the file supplied must be formatted so there is one line per rule. Each rule contains a file name regular expression, followed by "=", then the password in clear-text. Because the passwords are in clear-text, the file should have strict access restrictions.
+In the case of `passwordsFile`, the file supplied must be formatted so there is one line per rule.
+Each rule contains a file name regular expression, followed by "=", then the password in clear-text.
+Because the passwords are in clear-text, the file should have strict access restrictions.
 
 [source,plain]
 ----
@@ -367,20 +403,26 @@ For a multi-core configuration, you can specify `sharedLib='lib'` in the `<solr/
 
 === Extending the ExtractingRequestHandler
 
-If you want to supply your own `ContentHandler` for Solr to use, you can extend the `ExtractingRequestHandler` and override the `createFactory()` method. This factory is responsible for constructing the `SolrContentHandler` that interacts with Tika, and allows literals to override Tika-parsed values. Set the parameter `literalsOverride`, which normally defaults to `true`, to `false` to append Tika-parsed values to literal values.
+If you want to supply your own `ContentHandler` for Solr to use, you can extend the `ExtractingRequestHandler` and override the `createFactory()` method.
+This factory is responsible for constructing the `SolrContentHandler` that interacts with Tika, and allows literals to override Tika-parsed values.
+Set the parameter `literalsOverride`, which normally defaults to `true`, to `false` to append Tika-parsed values to literal values.
 
 ==  Solr Cell Internals
 
 === Metadata Created by Tika
 
-As mentioned before, Tika produces metadata about the document. Metadata describes different aspects of a document, such as the author's name, the number of pages, the file size, and so on. The metadata produced depends on the type of document submitted. For instance, PDFs have different metadata than Word documents do.
+As mentioned before, Tika produces metadata about the document.
+Metadata describes different aspects of a document, such as the author's name, the number of pages, the file size, and so on.
+The metadata produced depends on the type of document submitted.
+For instance, PDFs have different metadata than Word documents do.
 
 === Metadata Added by Solr
 
 In addition to the metadata added by Tika's parsers, Solr adds the following metadata:
 
 `stream_name`::
-The name of the Content Stream as uploaded to Solr. Depending on how the file is uploaded, this may or may not be set.
+The name of the Content Stream as uploaded to Solr.
+Depending on how the file is uploaded, this may or may not be set.
 
 `stream_source_info`::
 Any source info about the stream.
@@ -398,7 +440,8 @@ set for these metadata elements on your content.
 
 Here is the order in which the Solr Cell framework processes its input:
 
-.  Tika generates fields or passes them in as literals specified by `literal.<fieldname>=<value>`. If `literalsOverride=false`, literals will be appended as multi-value to the Tika-generated field.
+.  Tika generates fields or passes them in as literals specified by `literal.<fieldname>=<value>`.
+If `literalsOverride=false`, literals will be appended as multi-value to the Tika-generated field.
 .  If `lowernames=true`, Tika maps fields to lowercase.
 .  Tika applies the mapping rules specified by `fmap.__source__=__target__` parameters.
 .  If `uprefix` is specified, any unknown field names are prefixed with that value, else if `defaultField` is specified, any unknown fields are copied to the default field.
@@ -437,7 +480,8 @@ bin/post -c gettingstarted -params "literal.id=doc5&captureAttr=true&defaultFiel
 
 === Extracting Data without Indexing
 
-Solr allows you to extract data without indexing. You might want to do this if you're using Solr solely as an extraction server or if you're interested in testing Solr extraction.
+Solr allows you to extract data without indexing.
+You might want to do this if you're using Solr solely as an extraction server or if you're interested in testing Solr extraction.
 
 The example below sets the `extractOnly=true` parameter to extract data without indexing it.
 
@@ -486,6 +530,9 @@ public class SolrCellRequestDemo {
 
 This operation streams the file `my-file.pdf` into the Solr index for `my_collection`.
 
-The sample code above calls the extract command, but you can easily substitute other commands that are supported by Solr Cell. The key class to use is the `ContentStreamUpdateRequest`, which makes sure the ContentStreams are set properly. SolrJ takes care of the rest.
+The sample code above calls the extract command, but you can easily substitute other commands that are supported by Solr Cell.
+The key class to use is the `ContentStreamUpdateRequest`, which makes sure the ContentStreams are set properly.
+SolrJ takes care of the rest.
 
-Note that the `ContentStreamUpdateRequest` is not just specific to Solr Cell. You can send CSV to the CSV Update handler and to any other Request Handler that works with Content Streams for updates.
+Note that the `ContentStreamUpdateRequest` is not just specific to Solr Cell.
+You can send CSV to the CSV Update handler and to any other Request Handler that works with Content Streams for updates.
diff --git a/solr/solr-ref-guide/src/indexing-with-update-handlers.adoc b/solr/solr-ref-guide/src/indexing-with-update-handlers.adoc
index 73d88c8..f5e85a6 100644
--- a/solr/solr-ref-guide/src/indexing-with-update-handlers.adoc
+++ b/solr/solr-ref-guide/src/indexing-with-update-handlers.adoc
@@ -21,7 +21,8 @@ Update handlers are request handlers designed to add, delete and update document
 In addition to having plugins for importing rich documents <<indexing-with-tika.adoc#,Indexing with Solr Cell and Apache Tika>>, Solr natively supports indexing structured documents in XML, CSV, and JSON.
 
 The recommended way to configure and use request handlers is with path based names that map to paths in the request URL.
-However, request handlers can also be specified with the `qt` (query type) parameter if the <<requestdispatcher.adoc#,`requestDispatcher`>> is appropriately configured. It is possible to access the same handler using more than one name, which can be useful if you wish to specify different sets of default options.
+However, request handlers can also be specified with the `qt` (query type) parameter if the <<requestdispatcher.adoc#,`requestDispatcher`>> is appropriately configured.
+It is possible to access the same handler using more than one name, which can be useful if you wish to specify different sets of default options.
 
 A single unified update request handler supports XML, CSV, JSON, and javabin update requests, delegating to the appropriate `ContentStreamLoader` based on the `Content-Type` of the <<content-streams.adoc#,ContentStream>>.
 
@@ -80,9 +81,11 @@ The add command supports some optional attributes which may be specified.
 Add the document within the specified number of milliseconds.
 
 `overwrite`::
-Default is `true`. Indicates if the unique key constraints should be checked to overwrite previous versions of the same document (see below).
+Default is `true`.
+Indicates if the unique key constraints should be checked to overwrite previous versions of the same document (see below).
 
-If the document schema defines a unique key, then by default an `/update` operation to add a document will overwrite (i.e., replace) any document in the index with the same unique key. If no unique key has been defined, indexing performance is somewhat faster, as no check has to be made for an existing documents to replace.
+If the document schema defines a unique key, then by default an `/update` operation to add a document will overwrite (i.e., replace) any document in the index with the same unique key.
+If no unique key has been defined, indexing performance is somewhat faster, as no check has to be made for an existing documents to replace.
 
 If you have a unique key field, but you feel confident that you can safely bypass the uniqueness check (e.g., you build your indexes in batch, and your indexing code guarantees it never adds the same document more than once) you can specify the `overwrite="false"` option when adding your documents.
 
@@ -90,24 +93,34 @@ If you have a unique key field, but you feel confident that you can safely bypas
 
 ==== Commit and Optimize During Updates
 
-The `<commit>` operation writes all documents loaded since the last commit to one or more segment files on the disk. Before a commit has been issued, newly indexed content is not visible to searches. The commit operation opens a new searcher, and triggers any event listeners that have been configured.
+The `<commit>` operation writes all documents loaded since the last commit to one or more segment files on the disk.
+Before a commit has been issued, newly indexed content is not visible to searches.
+The commit operation opens a new searcher, and triggers any event listeners that have been configured.
 
 Commits may be issued explicitly with a `<commit/>` message, and can also be triggered from `<autocommit>` parameters in `solrconfig.xml`.
 
-The `<optimize>` operation requests Solr to merge internal data structures. For a large index, optimization will take some time to complete, but by merging many small segment files into larger segments, search performance may improve. If you are using Solr's replication mechanism to distribute searches across many systems, be aware that after an optimize, a complete index will need to be transferred.
+The `<optimize>` operation requests Solr to merge internal data structures.
+For a large index, optimization will take some time to complete, but by merging many small segment files into larger segments, search performance may improve.
+If you are using Solr's replication mechanism to distribute searches across many systems, be aware that after an optimize, a complete index will need to be transferred.
 
-WARNING: You should only consider using optimize on static indexes, i.e., indexes that can be optimized as part of the regular update process (say once-a-day updates). Applications requiring NRT functionality should not use optimize.
+WARNING: You should only consider using optimize on static indexes, i.e., indexes that can be optimized as part of the regular update process (say once-a-day updates).
+Applications requiring NRT functionality should not use optimize.
 
 The `<commit>` and `<optimize>` elements accept these optional attributes:
 
 `waitSearcher`::
-Default is `true`. Blocks until a new searcher is opened and registered as the main query searcher, making the changes visible.
+Default is `true`.
+Blocks until a new searcher is opened and registered as the main query searcher, making the changes visible.
 
-`expungeDeletes`:: (commit only) Default is `false`. Merges segments that have more than 10% deleted docs, expunging the deleted documents in the process. Resulting segments will respect `maxMergedSegmentMB`.
+`expungeDeletes`:: (commit only) Default is `false`.
+Merges segments that have more than 10% deleted docs, expunging the deleted documents in the process.
+Resulting segments will respect `maxMergedSegmentMB`.
 +
 WARNING: expungeDeletes is "less expensive" than optimize, but the same warnings apply.
 
-`maxSegments`:: (optimize only) Default is unlimited, resulting segments respect the `maxMergedSegmentMB` setting. Makes a best effort attempt to merge the segments down to no more than this number of segments but does not guarantee that the goal will be achieved. Unless there is tangible evidence that optimizing to a small number of segments is beneficial, this parameter should be omitted and the default behavior accepted.
+`maxSegments`:: (optimize only) Default is unlimited, resulting segments respect the `maxMergedSegmentMB` setting.
+Makes a best effort attempt to merge the segments down to no more than this number of segments but does not guarantee that the goal will be achieved.
+Unless there is tangible evidence that optimizing to a small number of segments is beneficial, this parameter should be omitted and the default behavior accepted.
 
 Here are examples of `<commit>` and `<optimize>` using optional attributes:
 
@@ -123,7 +136,8 @@ Here are examples of `<commit>` and `<optimize>` using optional attributes:
 Documents can be deleted from the index in two ways.
 "Delete by ID" deletes the document with the specified ID, and can be used only if a UniqueID field has been defined in the schema.
 It doesn't work for child/nested docs.
-"Delete by Query" deletes all documents matching a specified query, although `commitWithin` is ignored for a Delete by Query. A single delete message can contain multiple delete operations.
+"Delete by Query" deletes all documents matching a specified query, although `commitWithin` is ignored for a Delete by Query.
+A single delete message can contain multiple delete operations.
 
 [source,xml]
 ----
@@ -138,13 +152,16 @@ It doesn't work for child/nested docs.
 [IMPORTANT]
 ====
 
-When using the Join query parser in a Delete By Query, you should use the `score` parameter with a value of "none" to avoid a `ClassCastException`. See the section on the <<other-parsers.adoc#,Join Query Parser>> for more details on the `score` parameter.
+When using the Join query parser in a Delete By Query, you should use the `score` parameter with a value of "none" to avoid a `ClassCastException`.
+See the section on the <<other-parsers.adoc#,Join Query Parser>> for more details on the `score` parameter.
 
 ====
 
 ==== Rollback Operations
 
-The rollback command rolls back all add and deletes made to the index since the last commit. It neither calls any event listeners nor creates a new searcher. Its syntax is simple: `<rollback/>`.
+The rollback command rolls back all add and deletes made to the index since the last commit.
+It neither calls any event listeners nor creates a new searcher.
+Its syntax is simple: `<rollback/>`.
 
 ==== Grouping Operations
 
@@ -168,7 +185,8 @@ You can post several commands in a single XML file by grouping them with the sur
 
 === Using curl to Perform Updates
 
-You can use the `curl` utility to perform any of the above commands, using its `--data-binary` option to append the XML message to the `curl` command, and generating a HTTP POST request. For example:
+You can use the `curl` utility to perform any of the above commands, using its `--data-binary` option to append the XML message to the `curl` command, and generating a HTTP POST request.
+For example:
 
 [source,bash]
 ----
@@ -201,7 +219,8 @@ This alternative `curl` command performs equivalent operations but with minimal
 curl http://localhost:8983/solr/my_collection/update -H "Content-Type: text/xml" -T "myfile.xml" -X POST
 ----
 
-Short requests can also be sent using a HTTP GET command, if enabled in <<requestdispatcher.adoc#requestparsers-element,`requestParsers`>> element of `solrconfig.xml`, URL-encoding the request, as in the following. Note the escaping of "<" and ">":
+Short requests can also be sent using a HTTP GET command, if enabled in <<requestdispatcher.adoc#requestparsers-element,`requestParsers`>> element of `solrconfig.xml`, URL-encoding the request, as in the following.
+Note the escaping of "<" and ">":
 
 [source,bash]
 ----
@@ -230,7 +249,8 @@ Learn more about adding the `dist/solr-scripting-*.jar` file into Solr's <<libs.
 
 === tr Parameter
 
-The XSLT Update Request Handler accepts one parameter: the `tr` parameter, which identifies the XML transformation to use. The transformation must be found in the Solr `conf/xslt` directory.
+The XSLT Update Request Handler accepts one parameter: the `tr` parameter, which identifies the XML transformation to use.
+The transformation must be found in the Solr `conf/xslt` directory.
 
 
 === XSLT Configuration
@@ -249,7 +269,8 @@ The example below, from the `sample_techproducts_configs` <<config-sets.adoc#,co
 </requestHandler>
 ----
 
-A value of 5 for `xsltCacheLifetimeSeconds` is good for development, to see XSLT changes quickly. For production you probably want a much higher value.
+A value of 5 for `xsltCacheLifetimeSeconds` is good for development, to see XSLT changes quickly.
+For production you probably want a much higher value.
 
 === XSLT Update Example
 
@@ -295,7 +316,8 @@ Here is the `sample_techproducts_configs/conf/xslt/updateXml.xsl` XSL file for c
 </xsl:stylesheet>
 ----
 
-This stylesheet transforms Solr's XML search result format into Solr's Update XML syntax. One example usage would be to copy a Solr 1.3 index (which does not have CSV response writer) into a format which can be indexed into another Solr file (provided that all fields are stored):
+This stylesheet transforms Solr's XML search result format into Solr's Update XML syntax.
+One example usage would be to copy a Solr 1.3 index (which does not have CSV response writer) into a format which can be indexed into another Solr file (provided that all fields are stored):
 
 [source,bash]
 ----
@@ -308,7 +330,8 @@ NOTE: You can see the opposite export/import cycle using the `tr` parameter in
 
 == JSON Formatted Index Updates
 
-Solr can accept JSON that conforms to a defined structure, or can accept arbitrary JSON-formatted documents. If sending arbitrarily formatted JSON, there are some additional parameters that need to be sent with the update request, described below in the section <<transforming-and-indexing-custom-json.adoc#,Transforming and Indexing Custom JSON>>.
+Solr can accept JSON that conforms to a defined structure, or can accept arbitrary JSON-formatted documents.
+If sending arbitrarily formatted JSON, there are some additional parameters that need to be sent with the update request, described below in the section <<transforming-and-indexing-custom-json.adoc#,Transforming and Indexing Custom JSON>>.
 
 === Solr-Style JSON
 
@@ -316,7 +339,8 @@ JSON formatted update requests may be sent to Solr's `/update` handler using `Co
 
 JSON formatted updates can take 3 basic forms, described in depth below:
 
-* <<Adding a Single JSON Document,A single document to add>>, expressed as a top level JSON Object. To differentiate this from a set of commands, the `json.command=false` request parameter is required.
+* <<Adding a Single JSON Document,A single document to add>>, expressed as a top level JSON Object.
+To differentiate this from a set of commands, the `json.command=false` request parameter is required.
 * <<Adding Multiple JSON Documents,A list of documents to add>>, expressed as a top level JSON Array containing a JSON Object per document.
 * <<Sending JSON Update Commands,A sequence of update commands>>, expressed as a top level JSON Object (aka: Map).
 
@@ -361,7 +385,8 @@ curl 'http://localhost:8983/solr/techproducts/update?commit=true' --data-binary
 
 ==== Sending JSON Update Commands
 
-In general, the JSON update syntax supports all of the update commands that the XML update handler supports, through a straightforward mapping. Multiple commands, adding and deleting documents, may be contained in one message:
+In general, the JSON update syntax supports all of the update commands that the XML update handler supports, through a straightforward mapping.
+Multiple commands, adding and deleting documents, may be contained in one message:
 
 [source,bash,subs="verbatim,callouts"]
 ----
@@ -400,7 +425,9 @@ curl -X POST -H 'Content-Type: application/json' 'http://localhost:8983/solr/my_
 
 As with other update handlers, parameters such as `commit`, `commitWithin`, `optimize`, and `overwrite` may be specified in the URL instead of in the body of the message.
 
-The JSON update format allows for a simple delete-by-id. The value of a `delete` can be an array which contains a list of zero or more specific document id's (not a range) to be deleted. For example, a single document:
+The JSON update format allows for a simple delete-by-id.
+The value of a `delete` can be an array which contains a list of zero or more specific document id's (not a range) to be deleted.
+For example, a single document:
 
 [source,json]
 ----
@@ -447,7 +474,8 @@ The `/update/json` path may be useful for clients sending in JSON formatted upda
 
 === Custom JSON Documents
 
-Solr can support custom JSON. This is covered in the section <<transforming-and-indexing-custom-json.adoc#,Transforming and Indexing Custom JSON>>.
+Solr can support custom JSON.
+This is covered in the section <<transforming-and-indexing-custom-json.adoc#,Transforming and Indexing Custom JSON>>.
 
 
 == CSV Formatted Index Updates
@@ -473,75 +501,97 @@ Character used as field separator; default is ",". This parameter is global; for
 Example:  `separator=%09`
 
 `trim`::
-If `true`, remove leading and trailing whitespace from values. The default is `false`. This parameter can be either global or per-field.
+If `true`, remove leading and trailing whitespace from values.
+The default is `false`.
+This parameter can be either global or per-field.
 +
 Examples: `f.isbn.trim=true` or `trim=false`
 
 `header`::
-Set to `true` if first line of input contains field names. These will be used if the `fieldnames` parameter is absent. This parameter is global.
+Set to `true` if first line of input contains field names.
+These will be used if the `fieldnames` parameter is absent.
+This parameter is global.
 
 `fieldnames`::
-Comma-separated list of field names to use when adding documents. This parameter is global.
+Comma-separated list of field names to use when adding documents.
+This parameter is global.
 +
 Example: `fieldnames=isbn,price,title`
 
 `literal._field_name_`::
-A literal value for a specified field name. This parameter is global.
+A literal value for a specified field name.
+This parameter is global.
 +
 Example: `literal.color=red`
 
 `skip`::
-Comma separated list of field names to skip. This parameter is global.
+Comma separated list of field names to skip.
+This parameter is global.
 +
 Example: `skip=uninteresting,shoesize`
 
 `skipLines`::
-Number of lines to discard in the input stream before the CSV data starts, including the header, if present. Default=`0`. This parameter is global.
+Number of lines to discard in the input stream before the CSV data starts, including the header, if present.
+Default=`0`.
+This parameter is global.
 +
 Example: `skipLines=5`
 
-`encapsulator`:: The character optionally used to surround values to preserve characters such as the CSV separator or whitespace. This standard CSV format handles the encapsulator itself appearing in an encapsulated value by doubling the encapsulator.
+`encapsulator`:: The character optionally used to surround values to preserve characters such as the CSV separator or whitespace.
+This standard CSV format handles the encapsulator itself appearing in an encapsulated value by doubling the encapsulator.
 +
 This parameter is global; for per-field usage, see `split`.
 +
 Example: `encapsulator="`
 
-`escape`:: The character used for escaping CSV separators or other reserved characters. If an escape is specified, the encapsulator is not used unless also explicitly specified since most formats use either encapsulation or escaping, not both. |g |
+`escape`:: The character used for escaping CSV separators or other reserved characte
+If an escape is specified, the encapsulator is not used unless also explicitly specified since most formats use either encapsulation or escaping, not both.
 +
 Example: `escape=\`
 
 `keepEmpty`::
-Keep and index zero length (empty) fields. The default is `false`. This parameter can be global or per-field.
+Keep and index zero length (empty) fields.
+The default is `false`.
+This parameter can be global or per-field.
 +
 Example: `f.price.keepEmpty=true`
 
-`map`:: Map one value to another. Format is value:replacement (which can be empty). This parameter can be global or per-field.
+`map`:: Map one value to another.
+Format is value:replacement (which can be empty).
+This parameter can be global or per-field.
 +
 Example: `map=left:right` or `f.subject.map=history:bunk`
 
 `split`::
-If `true`, split a field into multiple values by a separate parser. This parameter is used on a per-field basis.
+If `true`, split a field into multiple values by a separate parser.
+This parameter is used on a per-field basis.
 
 `overwrite`::
-If `true` (the default), check for and overwrite duplicate documents, based on the uniqueKey field declared in the Solr schema. If you know the documents you are indexing do not contain any duplicates then you may see a considerable speed up setting this to `false`.
+If `true` (the default), check for and overwrite duplicate documents, based on the uniqueKey field declared in the Solr schema.
+If you know the documents you are indexing do not contain any duplicates then you may see a considerable speed up setting this to `false`.
 +
 This parameter is global.
 
 `commit`::
-Issues a commit after the data has been ingested. This parameter is global.
+Issues a commit after the data has been ingested.
+This parameter is global.
 
 `commitWithin`::
-Add the document within the specified number of milliseconds. This parameter is global.
+Add the document within the specified number of milliseconds.
+This parameter is global.
 +
 Example: `commitWithin=10000`
 
 `rowid`::
-Map the `rowid` (line number) to a field specified by the value of the parameter, for instance if your CSV doesn't have a unique key and you want to use the row id as such. This parameter is global.
+Map the `rowid` (line number) to a field specified by the value of the parameter, for instance if your CSV doesn't have a unique key and you want to use the row id as such.
+This parameter is global.
 +
 Example: `rowid=id`
 
 `rowidOffset`::
-Add the given offset (as an integer) to the `rowid` before adding it to the document. Default is `0`. This parameter is global.
+Add the given offset (as an integer) to the `rowid` before adding it to the document.
+Default is `0`.
+This parameter is global.
 +
 Example: `rowidOffset=10`
 
diff --git a/solr/solr-ref-guide/src/indexupgrader-tool.adoc b/solr/solr-ref-guide/src/indexupgrader-tool.adoc
index e74ac03..b9b8f61 100644
--- a/solr/solr-ref-guide/src/indexupgrader-tool.adoc
+++ b/solr/solr-ref-guide/src/indexupgrader-tool.adoc
@@ -31,18 +31,23 @@ If you are currently using a release two or more major versions older, such as m
 The IndexUpgraderTool performs a forceMerge (optimize) down to one segment, which may be undesirable.
 ====
 
-In a Solr distribution, the Lucene files are located in `./server/solr-webapp/webapp/WEB-INF/lib`. You will need to include the `lucene-core-<version>.jar` and `lucene-backwards-codecs-<version>.jar` on the classpath when running the tool.
+In a Solr distribution, the Lucene files are located in `./server/solr-webapp/webapp/WEB-INF/lib`.
+You will need to include the `lucene-core-<version>.jar` and `lucene-backwards-codecs-<version>.jar` on the classpath when running the tool.
 
 [source,bash,subs="attributes"]
 ----
 java -cp lucene-core-{solr-docs-version}.0.jar:lucene-backward-codecs-{solr-docs-version}.0.jar org.apache.lucene.index.IndexUpgrader [-delete-prior-commits] [-verbose] /path/to/index
 ----
 
-This tool keeps only the last commit in an index. For this reason, if the incoming index has more than one commit, the tool refuses to run by default. Specify `-delete-prior-commits` to override this, allowing the tool to delete all but the last commit.
+This tool keeps only the last commit in an index.
+For this reason, if the incoming index has more than one commit, the tool refuses to run by default.
+Specify `-delete-prior-commits` to override this, allowing the tool to delete all but the last commit.
 
-Upgrading large indexes may take a long time. As a rule of thumb, the upgrade processes about 1 GB per minute.
+Upgrading large indexes may take a long time.
+As a rule of thumb, the upgrade processes about 1 GB per minute.
 
 [WARNING]
 ====
-This tool may reorder documents if the index was partially upgraded before execution (e.g., documents were added). If your application relies on monotonicity of document IDs (i.e., the order in which the documents were added to the index is preserved), do a full optimize instead.
+This tool may reorder documents if the index was partially upgraded before execution (e.g., documents were added).
+If your application relies on monotonicity of document IDs (i.e., the order in which the documents were added to the index is preserved), do a full optimize instead.
 ====
diff --git a/solr/solr-ref-guide/src/initparams.adoc b/solr/solr-ref-guide/src/initparams.adoc
index fee57c5..d214347 100644
--- a/solr/solr-ref-guide/src/initparams.adoc
+++ b/solr/solr-ref-guide/src/initparams.adoc
@@ -45,7 +45,8 @@ For example, here is one of the `<initParams>` sections defined by default in th
 This sets the default search field ("df") to be "_text_" for all of the request handlers named in the path section.
 If we later want to change the `/query` request handler to search a different field by default, we could override the `<initParams>` by defining the parameter in the `<requestHandler>` section for `/query`.
 
-The syntax and semantics are similar to that of a `<requestHandler>`. The following are the attributes:
+The syntax and semantics are similar to that of a `<requestHandler>`.
+The following are the attributes:
 
 `path`::
 A comma-separated list of paths which will use the parameters.
diff --git a/solr/solr-ref-guide/src/installing-solr.adoc b/solr/solr-ref-guide/src/installing-solr.adoc
index 09d56c2..b069502 100644
--- a/solr/solr-ref-guide/src/installing-solr.adoc
+++ b/solr/solr-ref-guide/src/installing-solr.adoc
@@ -23,19 +23,23 @@ Please be sure to review the <<system-requirements.adoc#,System Requirements>> b
 
 == Available Solr Packages
 
-Solr is available from the Solr website. Download the latest release https://solr.apache.org/downloads.html.
+Solr is available from the Solr website.
+Download the latest release https://solr.apache.org/downloads.html.
 
 There are three separate packages:
 
 * `solr-{solr-docs-version}.0.tgz` for Linux/Unix/OSX systems
 * `solr-{solr-docs-version}.0.zip` for Microsoft Windows systems
-* `solr-{solr-docs-version}.0-src.tgz` the package Solr source code. This is useful if you want to develop on Solr without using the official Git repository.
+* `solr-{solr-docs-version}.0-src.tgz` the package Solr source code.
+This is useful if you want to develop on Solr without using the official Git repository.
 
 == Preparing for Installation
 
-When getting started with Solr, all you need to do is extract the Solr distribution archive to a directory of your choosing. This will suffice as an initial development environment, but take care not to overtax this "toy" installation before setting up your true development and production environments.
+When getting started with Solr, all you need to do is extract the Solr distribution archive to a directory of your choosing.
+This will suffice as an initial development environment, but take care not to overtax this "toy" installation before setting up your true development and production environments.
 
-When you've progressed past initial evaluation of Solr, you'll want to take care to plan your implementation. You may need to reinstall Solr on another server or make a clustered SolrCloud environment.
+When you've progressed past initial evaluation of Solr, you'll want to take care to plan your implementation.
+You may need to reinstall Solr on another server or make a clustered SolrCloud environment.
 
 When you're ready to setup Solr for a production environment, please refer to the instructions provided on the <<taking-solr-to-production.adoc#,Taking Solr to Production>> page.
 
@@ -44,10 +48,13 @@ When you're ready to setup Solr for a production environment, please refer to th
 ====
 How to size your Solr installation is a complex question that relies on a number of factors, including the number and structure of documents, how many fields you intend to store, the number of users, etc.
 
-It's highly recommended that you spend a bit of time thinking about the factors that will impact hardware sizing for your Solr implementation. A very good blog post that discusses the issues to consider is https://lucidworks.com/2012/07/23/sizing-hardware-in-the-abstract-why-we-dont-have-a-definitive-answer/[Sizing Hardware in the Abstract: Why We Don't have a Definitive Answer].
+It's highly recommended that you spend a bit of time thinking about the factors that will impact hardware sizing for your Solr implementation.
+A very good blog post that discusses the issues to consider is https://lucidworks.com/2012/07/23/sizing-hardware-in-the-abstract-why-we-dont-have-a-definitive-answer/[Sizing Hardware in the Abstract: Why We Don't have a Definitive Answer].
 ====
 
-One thing to note when planning your installation is that a hard limit exists in Lucene for the number of documents in a single index: approximately 2.14 billion documents (2,147,483,647 to be exact). In practice, it is highly unlikely that such a large number of documents would fit and perform well in a single index, and you will likely need to distribute your index across a cluster before you ever approach this number. If you know you will exceed this number of documents in total befor [...]
+One thing to note when planning your installation is that a hard limit exists in Lucene for the number of documents in a single index: approximately 2.14 billion documents (2,147,483,647 to be exact).
+In practice, it is highly unlikely that such a large number of documents would fit and perform well in a single index, and you will likely need to distribute your index across a cluster before you ever approach this number.
+If you know you will exceed this number of documents in total before you've even started indexing, it's best to plan your installation with <<cluster-types.adoc#solrcloud-mode,SolrCloud>> as part of your design from the start.
 
 == Package Installation
 
@@ -68,15 +75,20 @@ After installing Solr, you'll see the following directories and files within the
 bin/::
 This directory includes several important scripts that will make using Solr easier.
 
-solr and solr.cmd::: This is <<solr-control-script-reference.adoc#,Solr's Control Script>>, also known as `bin/solr` (*nix) / `bin/solr.cmd` (Windows). This script is the preferred tool to start and stop Solr. You can also create collections or cores, configure authentication, and work with configuration files when running in SolrCloud mode.
+solr and solr.cmd::: This is <<solr-control-script-reference.adoc#,Solr's Control Script>>, also known as `bin/solr` (*nix) / `bin/solr.cmd` (Windows).
+This script is the preferred tool to start and stop Solr.
+You can also create collections or cores, configure authentication, and work with configuration files when running in SolrCloud mode.
 
 post::: The <<post-tool.adoc#,PostTool>>, which provides a simple command line interface for POSTing content to Solr.
 
 solr.in.sh and solr.in.cmd:::
-These are property files for *nix and Windows systems, respectively. System-level properties for Java, Jetty, and Solr are configured here. Many of these settings can be overridden when using `bin/solr` / `bin/solr.cmd`, but this allows you to set all the properties in one place.
+These are property files for *nix and Windows systems, respectively.
+System-level properties for Java, Jetty, and Solr are configured here.
+Many of these settings can be overridden when using `bin/solr` / `bin/solr.cmd`, but this allows you to set all the properties in one place.
 
 install_solr_services.sh:::
-This script is used on *nix systems to install Solr as a service. It is described in more detail in the section <<taking-solr-to-production.adoc#,Taking Solr to Production>>.
+This script is used on *nix systems to install Solr as a service.
+It is described in more detail in the section <<taking-solr-to-production.adoc#,Taking Solr to Production>>.
 
 contrib/::
 Solr's `contrib` directory includes add-on plugins for specialized features of Solr.
@@ -88,36 +100,44 @@ docs/::
 The `docs` directory includes a link to online Javadocs for Solr.
 
 example/::
-The `example` directory includes several types of examples that demonstrate various Solr capabilities. See the section <<Solr Examples>> below for more details on what is in this directory.
+The `example` directory includes several types of examples that demonstrate various Solr capabilities.
+See the section <<Solr Examples>> below for more details on what is in this directory.
 
 licenses/::
 The `licenses` directory includes all of the licenses for 3rd party libraries used by Solr.
 
 server/::
-This directory is where the heart of the Solr application resides. A README in this directory provides a detailed overview, but here are some highlights:
+This directory is where the heart of the Solr application resides.
+A README in this directory provides a detailed overview, but here are some highlights:
 * Solr's Admin UI (`server/solr-webapp`)
 * Jetty libraries (`server/lib`)
-* Log files (`server/logs`) and log configurations (`server/resources`). See the section <<configuring-logging.adoc#,Configuring Logging>> for more details on how to customize Solr's default logging.
+* Log files (`server/logs`) and log configurations (`server/resources`).
+See the section <<configuring-logging.adoc#,Configuring Logging>> for more details on how to customize Solr's default logging.
 * Sample configsets (`server/solr/configsets`)
 
 == Solr Examples
 
-Solr includes a number of example documents and configurations to use when getting started. If you ran through the <<solr-tutorial.adoc#,Solr Tutorial>>, you have already interacted with some of these files.
+Solr includes a number of example documents and configurations to use when getting started.
+If you ran through the <<solr-tutorial.adoc#,Solr Tutorial>>, you have already interacted with some of these files.
 
 Here are the examples included with Solr:
 
 exampledocs::
-This is a small set of simple CSV, XML, and JSON files that can be used with `bin/post` when first getting started with Solr. For more information about using `bin/post` with these files, see <<post-tool.adoc#,Post Tool>>.
+This is a small set of simple CSV, XML, and JSON files that can be used with `bin/post` when first getting started with Solr.
+For more information about using `bin/post` with these files, see <<post-tool.adoc#,Post Tool>>.
 
 files::
-The `files` directory provides a basic search UI for documents such as Word or PDF that you may have stored locally. See the README there for details on how to use this example.
+The `files` directory provides a basic search UI for documents such as Word or PDF that you may have stored locally.
+See the README there for details on how to use this example.
 
 films::
-The `films` directory includes a robust set of data about movies in three formats: CSV, XML, and JSON. See the README there for details on how to use this dataset.
+The `films` directory includes a robust set of data about movies in three formats: CSV, XML, and JSON.
+See the README there for details on how to use this dataset.
 
 == Starting Solr
 
-Solr includes a command line interface tool called `bin/solr` (Linux/MacOS) or `bin\solr.cmd` (Windows). This tool allows you to start and stop Solr, create cores and collections, configure authentication, and check the status of your system.
+Solr includes a command line interface tool called `bin/solr` (Linux/MacOS) or `bin\solr.cmd` (Windows).
+This tool allows you to start and stop Solr, create cores and collections, configure authentication, and check the status of your system.
 
 To use it to start Solr you can simply enter:
 
@@ -141,17 +161,21 @@ TIP: All of the options for the Solr CLI are described in the section <<solr-con
 
 === Start Solr with a Specific Bundled Example
 
-Solr also provides a number of useful examples to help you learn about key features. You can launch the examples using the `-e` flag. For instance, to launch the "techproducts" example, you would do:
+Solr also provides a number of useful examples to help you learn about key features.
+You can launch the examples using the `-e` flag.
+For instance, to launch the "techproducts" example, you would do:
 
 [source,bash]
 ----
 bin/solr -e techproducts
 ----
 
-Currently, the available examples you can run are: techproducts, schemaless, and cloud. See the section <<solr-control-script-reference.adoc#running-with-example-configurations,Running with Example Configurations>> for details on each example.
+Currently, the available examples you can run are: techproducts, schemaless, and cloud.
+See the section <<solr-control-script-reference.adoc#running-with-example-configurations,Running with Example Configurations>> for details on each example.
 
 .Getting Started with SolrCloud
-NOTE: Running the `cloud` example starts Solr in <<cluster-types.adoc#solrcloud-mode,SolrCloud>> mode. For more information on starting Solr in SolrCloud mode, see the section <<tutorial-solrcloud.adoc#,Getting Started with SolrCloud>>.
+NOTE: Running the `cloud` example starts Solr in <<cluster-types.adoc#solrcloud-mode,SolrCloud>> mode.
+For more information on starting Solr in SolrCloud mode, see the section <<tutorial-solrcloud.adoc#,Getting Started with SolrCloud>>.
 
 === Check if Solr is Running
 
@@ -164,18 +188,21 @@ bin/solr status
 
 This will search for running Solr instances on your computer and then gather basic information about them, such as the version and memory usage.
 
-That's it! Solr is running. If you need convincing, use a Web browser to see the Admin Console.
+That's it! Solr is running.
+If you need convincing, use a Web browser to see the Admin Console.
 
 `\http://localhost:8983/solr/`
 
 .The Solr Admin interface.
 image::images/installing-solr/SolrAdminDashboard.png[Solr's Admin UI,pdfwidth=75%]
 
-If Solr is not running, your browser will complain that it cannot connect to the server. Check your port number and try again.
+If Solr is not running, your browser will complain that it cannot connect to the server.
+Check your port number and try again.
 
 === Create a Core
 
-If you did not start Solr with an example configuration, you would need to create a core in order to be able to index and search. You can do so by running:
+If you did not start Solr with an example configuration, you would need to create a core in order to be able to index and search.
+You can do so by running:
 
 [source,bash]
 ----
diff --git a/solr/solr-ref-guide/src/jmx-with-solr.adoc b/solr/solr-ref-guide/src/jmx-with-solr.adoc
index 252659d..695948a 100644
--- a/solr/solr-ref-guide/src/jmx-with-solr.adoc
+++ b/solr/solr-ref-guide/src/jmx-with-solr.adoc
@@ -28,20 +28,24 @@ If you are unfamiliar with JMX, you may  find the following overview useful: htt
 
 JMX support is configured by defining a metrics reporter, as described in the section the section <<metrics-reporting.adoc#jmx-reporter,JMX Reporter>>.
 
-If you have an existing MBean server running in Solr's JVM, or if you start Solr with the system property `-Dcom.sun.management.jmxremote`, Solr will automatically identify its location on startup even if you have not defined a reporter explicitly in `solr.xml`. You can also define the location of the MBean server with parameters defined in the reporter definition.
+If you have an existing MBean server running in Solr's JVM, or if you start Solr with the system property `-Dcom.sun.management.jmxremote`, Solr will automatically identify its location on startup even if you have not defined a reporter explicitly in `solr.xml`.
+You can also define the location of the MBean server with parameters defined in the reporter definition.
 
 == Configuring MBean Servers
 
-Versions of Solr prior to 7.0 defined JMX support in `solrconfig.xml`. This has been changed to the metrics reporter configuration defined above.
+Versions of Solr prior to 7.0 defined JMX support in `solrconfig.xml`.
+This has been changed to the metrics reporter configuration defined above.
 Parameters for the reporter configuration allow defining the location or address of an existing MBean server.
 
-An MBean server can be started at the time of Solr's startup by passing the system parameter `-Dcom.sun.management.jmxremote`. See Oracle's documentation for additional settings available to start and control an MBean server at http://docs.oracle.com/javase/8/docs/technotes/guides/management/agent.html.
+An MBean server can be started at the time of Solr's startup by passing the system parameter `-Dcom.sun.management.jmxremote`.
+See Oracle's documentation for additional settings available to start and control an MBean server at http://docs.oracle.com/javase/8/docs/technotes/guides/management/agent.html.
 
 === Configuring a Remote Connection to Solr JMX
 
 If you need to attach a JMX-enabled Java profiling tool, such as JConsole or VisualVM, to a remote Solr server, then you need to enable remote JMX access when starting the Solr server.
 Simply change the `ENABLE_REMOTE_JMX_OPTS` property in the `solr.in.sh` or `solr.in.cmd` (for Windows) file to `true`.
-You’ll also need to choose a port for the JMX RMI connector to bind to, such as 18983. For example, if your Solr include script sets:
+You’ll also need to choose a port for the JMX RMI connector to bind to, such as 18983.
+For example, if your Solr include script sets:
 
 [source,bash]
 ----
@@ -49,7 +53,8 @@ ENABLE_REMOTE_JMX_OPTS=true
 RMI_PORT=18983
 ----
 
-The JMX RMI connector will allow Java profiling tools to attach to port 18983. When enabled, the following properties are passed to the JVM when starting Solr:
+The JMX RMI connector will allow Java profiling tools to attach to port 18983.
+When enabled, the following properties are passed to the JVM when starting Solr:
 
 [source,plain]
 ----
diff --git a/solr/solr-ref-guide/src/join-query-parser.adoc b/solr/solr-ref-guide/src/join-query-parser.adoc
index b1536ca..83d94c2 100644
--- a/solr/solr-ref-guide/src/join-query-parser.adoc
+++ b/solr/solr-ref-guide/src/join-query-parser.adoc
@@ -97,7 +97,8 @@ This method must be used if score information is required, and should also be co
 .dvWithScore and single value numerics
 [WARNING]
 ====
-The `dvWithScore` method doesn't support single value numeric fields. Users migrating from versions prior to 7.0 are encouraged to change field types to string and rebuild indexes during migration.
+The `dvWithScore` method doesn't support single value numeric fields.
+Users migrating from versions prior to 7.0 are encouraged to change field types to string and rebuild indexes during migration.
 ====
 +
 `topLevelDV` can only be used when `to` and `from` fields have docValues data, and does not currently support numeric fields.
@@ -109,9 +110,11 @@ Consider this method when the "from" query matches a large number of documents a
 
 == Joining Across Single Shard Collections
 
-You can also specify a `fromIndex` parameter to join with a field from another core or a single shard collection. If running in SolrCloud mode, then the collection specified in the `fromIndex` parameter must have a single shard and a replica on all Solr nodes where the collection you're joining to has a replica.
+You can also specify a `fromIndex` parameter to join with a field from another core or a single shard collection.
+If running in SolrCloud mode, then the collection specified in the `fromIndex` parameter must have a single shard and a replica on all Solr nodes where the collection you're joining to has a replica.
 
-Let's consider an example where you want to use a Solr join query to filter movies by directors that have won an Oscar. Specifically, imagine we have two collections with the following fields:
+Let's consider an example where you want to use a Solr join query to filter movies by directors that have won an Oscar.
+Specifically, imagine we have two collections with the following fields:
 
 *movies*: id, title, director_id, ...
 
@@ -124,9 +127,12 @@ To filter movies by directors that have won an Oscar using a Solr join on the *m
 fq={!join from=id fromIndex=movie_directors to=director_id}has_oscar:true
 ----
 
-Notice that the query criteria of the filter (`has_oscar:true`) is based on a field in the collection specified using `fromIndex`. Keep in mind that you cannot return fields from the `fromIndex` collection using join queries, you can only use the fields for filtering results in the "to" collection (movies).
+Notice that the query criteria of the filter (`has_oscar:true`) is based on a field in the collection specified using `fromIndex`.
+Keep in mind that you cannot return fields from the `fromIndex` collection using join queries, you can only use the fields for filtering results in the "to" collection (movies).
 
-Next, let's understand how these collections need to be deployed in your cluster. Imagine the *movies* collection is deployed to a four node SolrCloud cluster and has two shards with a replication factor of two. Specifically, the *movies* collection has replicas on the following four nodes:
+Next, let's understand how these collections need to be deployed in your cluster.
+Imagine the *movies* collection is deployed to a four node SolrCloud cluster and has two shards with a replication factor of two.
+Specifically, the *movies* collection has replicas on the following four nodes:
 
 node 1: movies_shard1_replica1
 
@@ -136,7 +142,8 @@ node 3: movies_shard2_replica1
 
 node 4: movies_shard2_replica2
 
-To use the *movie_directors* collection in Solr join queries with the *movies* collection, it needs to have a replica on each of the four nodes. In other words, *movie_directors* must have one shard and replication factor of four:
+To use the *movie_directors* collection in Solr join queries with the *movies* collection, it needs to have a replica on each of the four nodes.
+In other words, *movie_directors* must have one shard and replication factor of four:
 
 node 1: movie_directors_shard1_replica1
 
@@ -146,7 +153,10 @@ node 3: movie_directors_shard1_replica3
 
 node 4: movie_directors_shard1_replica4
 
-At query time, the `JoinQParser` will access the local replica of the *movie_directors* collection to perform the join. If a local replica is not available or active, then the query will fail. At this point, it should be clear that since you're limited to a single shard and the data must be replicated across all nodes where it is needed, this approach works better with smaller data sets where there is a one-to-many relationship between the from collection and the to collection. Moreover, [...]
+At query time, the `JoinQParser` will access the local replica of the *movie_directors* collection to perform the join.
+If a local replica is not available or active, then the query will fail.
+At this point, it should be clear that since you're limited to a single shard and the data must be replicated across all nodes where it is needed, this approach works better with smaller data sets where there is a one-to-many relationship between the from collection and the to collection.
+Moreover, if you add a replica to the to collection, then you also need to add a replica for the from collection.
 
 For more information, Erick Erickson has written a blog post about join performance titled https://lucidworks.com/2012/06/20/solr-and-joins/[Solr and Joins].
 
@@ -180,10 +190,13 @@ The remote Solr collection does not have any specific sharding requirements.
 The cross collection join has some configuration options that can be specified in  `solrconfig.xml`.
 
 `routerField`::
-If the documents are routed to shards using the CompositeID router by the join field, then that field name should be specified in the configuration here.  This will allow the parser to optimize the resulting HashRange query.
+If the documents are routed to shards using the CompositeID router by the join field, then that field name should be specified in the configuration here.
+ This will allow the parser to optimize the resulting HashRange query.
 
 `solrUrl`::
-If specified, this array of strings specifies the white listed Solr URLs that you can pass to the solrUrl query parameter. Without this configuration the solrUrl parameter cannot be used. This restriction is necessary to prevent an attacker from using Solr to explore the network.
+If specified, this array of strings specifies the white listed Solr URLs that you can pass to the solrUrl query parameter.
+Without this configuration the solrUrl parameter cannot be used.
+This restriction is necessary to prevent an attacker from using Solr to explore the network.
 
 [source,xml]
 ----
@@ -201,10 +214,15 @@ If specified, this array of strings specifies the white listed Solr URLs that yo
 The name of the external Solr collection to be queried to retrieve the set of join key values (required).
 
 `zkHost`::
-The connection string to be used to connect to ZooKeeper. `zkHost` and `solrUrl` are both optional parameters, and at most one of them should be specified. If neither `zkHost` nor `solrUrl` are specified, the local ZooKeeper cluster will be used. (optional).
+The connection string to be used to connect to ZooKeeper.
+`zkHost` and `solrUrl` are both optional parameters, and at most one of them should be specified.
+If neither `zkHost` nor `solrUrl` are specified, the local ZooKeeper cluster will be used.
+(optional).
 
 `solrUrl`::
-The URL of the external Solr node to be queried. Must be a character for character exact match of a whitelisted url. (optional, disabled by default for security).
+The URL of the external Solr node to be queried.
+Must be a character for character exact match of a whitelisted url.
+(optional, disabled by default for security).
 
 `from`::
 The join key field name in the external collection (required).
@@ -213,7 +231,8 @@ The join key field name in the external collection (required).
 The join key field name in the local collection.
 
 `v`::
-The query substituted in as a local param.  This is the query string that will match documents in the remote collection.
+The query substituted in as a local param.
+This is the query string that will match documents in the remote collection.
 
 `routed`::
 If `true`, the cross collection join query will use each shard's hash range to determine the set of join keys to retrieve for that shard.
diff --git a/solr/solr-ref-guide/src/json-facet-api.adoc b/solr/solr-ref-guide/src/json-facet-api.adoc
index 1154502..bf5b63a 100644
--- a/solr/solr-ref-guide/src/json-facet-api.adoc
+++ b/solr/solr-ref-guide/src/json-facet-api.adoc
@@ -21,7 +21,8 @@
 
 == Facet & Analytics Module
 
-The JSON Faceting module exposes similar functionality to Solr's traditional faceting module but with a stronger emphasis on usability.  It has several benefits over traditional faceting:
+The JSON Faceting module exposes similar functionality to Solr's traditional faceting module but with a stronger emphasis on usability.
+It has several benefits over traditional faceting:
 
 * easier programmatic construction of complex or nested facets
 * the nesting and structure offered by JSON makes facets easier to read and understand than the flat namespace of the traditional faceting API.
@@ -73,7 +74,8 @@ include::{example-source-dir}JsonRequestApiTest.java[tag=solrj-json-simple-terms
 ====
 --
 
-The response below shows us that 32 documents match the default root domain. and 12 documents have `cat:electronics`, 4 documents have `cat:currency`, etc.
+The response below shows us that 32 documents match the default root domain.
+Twelve documents have `cat:electronics`, 4 documents have `cat:currency`, etc.
 
 [source,java]
 ----
@@ -97,7 +99,9 @@ The response below shows us that 32 documents match the default root domain. and
 
 === Stat Facet Example
 
-Stat (also called `aggregation` or `analytic`) facets are useful for displaying information derived from query results, in addition to those results themselves.  For example, stat facets can be used to provide context to users on an e-commerce site looking for memory.  The example below computes the average price (and other statistics) and would allow a user to gauge whether the memory stick in their cart is a good price.
+Stat (also called `aggregation` or `analytic`) facets are useful for displaying information derived from query results, in addition to those results themselves.
+For example, stat facets can be used to provide context to users on an e-commerce site looking for memory.
+The example below computes the average price (and other statistics) and would allow a user to gauge whether the memory stick in their cart is a good price.
 
 [.dynamic-tabs]
 --
@@ -372,7 +376,8 @@ The output from the range facet above would look a bit like:
 
 ==== Range Facet Parameters
 
-Range facet parameter names and semantics largely mirror facet.range query-parameter style faceting. For example "start" here corresponds to "facet.range.start" in a facet.range command.
+Range facet parameter names and semantics largely mirror facet.range query-parameter style faceting.
+For example "start" here corresponds to "facet.range.start" in a facet.range command.
 
 [width="100%",cols="10%,90%",options="header",]
 |===
@@ -411,7 +416,8 @@ Refer <<Arbitrary Range>>
 
 ==== Arbitrary Range
 
-An arbitrary range consists of from and to values over which range bucket is computed. This range can be specified in two syntax.
+An arbitrary range consists of from and to values over which range bucket is computed.
+This range can be specified in two syntax.
 
 [width="100%",cols="10%,90%",options="header",]
 |===
@@ -442,7 +448,8 @@ For example, For range `(5,10]` 5 is excluded and 10 is included
 
 ===== include with ranges
 
-`include` parameter is ignored when `ranges` is specified but there are ways to achieve same behavior with `ranges`. `lower`, `upper`, `outer`, `edge` all can be achieved using combination of `inclusive_to` and `inclusive_from`.
+`include` parameter is ignored when `ranges` is specified but there are ways to achieve same behavior with `ranges`.
+`lower`, `upper`, `outer`, `edge` all can be achieved using combination of `inclusive_to` and `inclusive_from`.
 
 Range facet with `ranges`
 
@@ -491,7 +498,9 @@ The output from the range facet above would look a bit like:
 }
 ----
 
-NOTE: When `range` is specified, its value in the request is used as key in the response. In the other case, key is generated using `from`, `to`, `inclusive_to` and `inclusive_from`. Currently, custom `key` is not supported.
+NOTE: When `range` is specified, its value in the request is used as key in the response.
+In the other case, key is generated using `from`, `to`, `inclusive_to` and `inclusive_from`.
+Currently, custom `key` is not supported.
 
 === Heatmap Facet
 
@@ -559,7 +568,8 @@ And the facet response will look like:
 
 === Stat Facet Functions
 
-Unlike all the facets discussed so far, Aggregation functions (also called *facet functions*, *analytic functions*, or *metrics*) do not partition data into buckets.  Instead, they calculate something over all the documents in the domain.
+Unlike all the facets discussed so far, Aggregation functions (also called *facet functions*, *analytic functions*, or *metrics*) do not partition data into buckets.
+Instead, they calculate something over all the documents in the domain.
 
 [width="100%",cols="10%,30%,60%",options="header",]
 |===
@@ -578,7 +588,7 @@ Unlike all the facets discussed so far, Aggregation functions (also called *face
 |sumsq |`sumsq(rent)` |sum of squares of field or function
 |variance |`variance(rent)` |variance of numeric field or function
 |stddev |`stddev(rent)` |standard deviation of field or function
-|relatedness |`relatedness('popularity:[100 TO *]','inStock:true')`|A function for computing a relatedness score of the documents in the domain to a Foreground set, relative to a Background set (both defined as queries).  This is primarily for use when building <<json-facet-api.adoc#relatedness-and-semantic-knowledge-graphs,Semantic Knowledge Graphs>>.
+|relatedness |`relatedness('popularity:[100 TO *]','inStock:true')`|A function for computing a relatedness score of the documents in the domain to a Foreground set, relative to a Background set (both defined as queries). This is primarily for use when building <<json-facet-api.adoc#relatedness-and-semantic-knowledge-graphs,Semantic Knowledge Graphs>>.
 |===
 
 Numeric aggregation functions such as `avg` can be on any numeric field, or on a <<function-queries.adoc#,nested function>> of multiple numeric fields such as `avg(div(popularity,price))`.
@@ -617,7 +627,8 @@ include::{example-source-dir}JsonRequestApiTest.java[tag=solrj-json-metrics-face
 ====
 --
 
-An expanded form allows for <<local-params.adoc#,local params>> to be specified.  These may be used explicitly by some specialized aggregations such as `<<json-facet-api.adoc#relatedness-options,relatedness()>>`, but can also be used as parameter references to make aggregation expressions more readable, with out needing to use (global) request parameters:
+An expanded form allows for <<local-params.adoc#,local params>> to be specified.
+These may be used explicitly by some specialized aggregations such as `<<json-facet-api.adoc#relatedness-options,relatedness()>>`, but can also be used as parameter references to make aggregation expressions more readable, with out needing to use (global) request parameters:
 
 [.dynamic-tabs]
 --
@@ -659,9 +670,11 @@ include::{example-source-dir}JsonRequestApiTest.java[tag=solrj-json-metrics-face
 
 == Nested Facets
 
-Nested facets, or **sub-facets**, allow one to nest facet commands under any facet command that partitions the domain into buckets (i.e., `terms`, `range`, `query`).  These sub-facets are then evaluated against the domains defined by the set of all documents in each bucket of their parent.
+Nested facets, or **sub-facets**, allow one to nest facet commands under any facet command that partitions the domain into buckets (i.e., `terms`, `range`, `query`).
+These sub-facets are then evaluated against the domains defined by the set of all documents in each bucket of their parent.
 
-The syntax is identical to top-level facets - just add a `facet` command to the facet command block of the parent facet.  Technically, every facet command is actually a sub-facet since we start off with a single facet bucket with a domain defined by the main query and filters.
+The syntax is identical to top-level facets - just add a `facet` command to the facet command block of the parent facet.
+Technically, every facet command is actually a sub-facet since we start off with a single facet bucket with a domain defined by the main query and filters.
 
 === Nested Facet Example
 
@@ -699,7 +712,9 @@ include::{example-source-dir}JsonRequestApiTest.java[tag=solrj-json-simple-terms
 ====
 --
 
-The response for the facet above will show the top category and the number of documents that falls into each category bucket. Nested facets can be used to gather additional information about each bucket of documents.  For example, using the nested facet below, we can find the top categories as well as who the leading manufacturer is in each category:
+The response for the facet above will show the top category and the number of documents that falls into each category bucket.
+Nested facets can be used to gather additional information about each bucket of documents.
+For example, using the nested facet below, we can find the top categories as well as who the leading manufacturer is in each category:
 
 [.dynamic-tabs]
 --
@@ -766,7 +781,8 @@ And the response will look something like:
 
 === Sorting Facets By Nested Functions
 
-The default sort for a field or terms facet is by bucket count descending. We can optionally `sort` ascending or descending by any facet function that appears in each bucket.
+The default sort for a field or terms facet is by bucket count descending.
+We can optionally `sort` ascending or descending by any facet function that appears in each bucket.
 
 
 [.dynamic-tabs]
@@ -805,7 +821,9 @@ include::{example-source-dir}JsonRequestApiTest.java[tag=solrj-json-nested-cat-f
 ====
 --
 
-In some situations the desired `sort` may be an aggregation function that is very costly to compute for every bucket.  A `prelim_sort` option can be used to specify an approximation of the `sort`, for initially ranking the buckets to determine the top candidates (based on the `limit` and `overrequest`).  Only after the top candidate buckets have been refined, will the actual `sort` be used.
+In some situations the desired `sort` may be an aggregation function that is very costly to compute for every bucket.
+A `prelim_sort` option can be used to specify an approximation of the `sort`, for initially ranking the buckets to determine the top candidates (based on the `limit` and `overrequest`).
+Only after the top candidate buckets have been refined, will the actual `sort` be used.
 
 [source,java]
 ----
@@ -834,11 +852,15 @@ As discussed above, facets compute buckets or statistics based on their "domain"
 * By default, top-level facets use the set of all documents matching the main query as their domain.
 * Nested "sub-facets" are computed for every bucket of their parent facet, using a domain containing all documents in that bucket.
 
-In addition to this default behavior, domains can be also be widened, narrowed, or changed entirely.  The JSON Faceting API supports modifying domains through its `domain` property.  This is discussed in more detail <<json-faceting-domain-changes.adoc#,here>>
+In addition to this default behavior, domains can be also be widened, narrowed, or changed entirely.
+The JSON Faceting API supports modifying domains through its `domain` property.
+This is discussed in more detail <<json-faceting-domain-changes.adoc#,here>>
 
 == Special Stat Facet Functions
 
-Most stat facet functions (`avg`, `sumsq`, etc.) allow users to perform math computations on groups of documents.  A few functions are more involved though, and deserve an explanation of their own.  These are described in more detail in the sections below.
+Most stat facet functions (`avg`, `sumsq`, etc.) allow users to perform math computations on groups of documents.
+A few functions are more involved though, and deserve an explanation of their own.
+These are described in more detail in the sections below.
 
 === uniqueBlock() and Block Join Counts
 
@@ -903,25 +925,34 @@ The `relatedness(...)` stat function allows for sets of documents to be scored r
 
 [quote, Grainger et al., 'https://arxiv.org/abs/1609.00464[The Semantic Knowledge Graph]']
 ____
-At its heart, the Semantic Knowledge Graph leverages an inverted index, along with a complementary uninverted index, to represent nodes (terms) and edges (the documents within intersecting postings lists for multiple terms/nodes). This provides a layer of indirection between each pair of nodes and their corresponding edge, enabling edges to materialize dynamically from underlying corpus statistics. As a result, any combination of nodes can have edges to any other nodes materialize and be [...]
+At its heart, the Semantic Knowledge Graph leverages an inverted index, along with a complementary uninverted index, to represent nodes (terms) and edges (the documents within intersecting postings lists for multiple terms/nodes).
+This provides a layer of indirection between each pair of nodes and their corresponding edge, enabling edges to materialize dynamically from underlying corpus statistics.
+As a result, any combination of nodes can have edges to any other nodes materialize and be scored to reveal latent relationships between the nodes.
 ____
 
 The `relatedness(...)` function is used to "score" these relationships, relative to "Foreground" and "Background" sets of documents, specified in the function params as queries.
 
 Unlike most aggregation functions, the `relatedness(...)` function is aware of whether and how it's used in <<nested-facets,Nested Facets>>.  It evaluates the query defining the current bucket _independently_ from its parent/ancestor buckets, and intersects those documents with a "Foreground Set" defined by the foreground query _combined with the ancestor buckets_.  The result is then compared to a similar intersection done against the "Background Set" (defined exclusively by background  [...]
 
-NOTE: The semantics of `relatedness(...)` in an `allBuckets` context is currently undefined. Accordingly, although the `relatedness(...)` stat may be specified for a facet request that also specifies `allBuckets:true`, the `allBuckets` bucket itself will not include a relatedness calculation.
+NOTE: The semantics of `relatedness(...)` in an `allBuckets` context is currently undefined.
+Accordingly, although the `relatedness(...)` stat may be specified for a facet request that also specifies `allBuckets:true`, the `allBuckets` bucket itself will not include a relatedness calculation.
 
-NOTE: While it's very common to define the Background Set as `\*:*`, or some other super-set of the Foreground Query, it is not strictly required.  The `relatedness(...)` function can be used to compare the statistical relatedness of sets of documents to orthogonal foreground/background queries.
+NOTE: While it's very common to define the Background Set as `\*:*`, or some other super-set of the Foreground Query, it is not strictly required.
+The `relatedness(...)` function can be used to compare the statistical relatedness of sets of documents to orthogonal foreground/background queries.
 
 [[relatedness-options]]
 ==== relatedness() Options
 
 When using the extended `type:func` syntax for specifying a `relatedness()` aggregation, an optional `min_popularity` (float) option can be used to specify a lower bound on the `foreground_popularity` and `background_popularity` values, that must be met in order for the `relatedness` score to be valid -- If this `min_popularity` is not met, then the `relatedness` score will be `-Infinity`.
 
-The default implementation for calculating `relatedness()` domain correlation depends on the type of facet being calculated. Generic domain correlation is calculated per-term, by selectively retrieving a DocSet for each bucket-associated query (consulting the `filterCache`) and calculating DocSet intersections with "foreground" and "background" sets. For term facets (especially over high-cardinality fields) this approach can lead to `filterCache` thrashing; accordingly, `relatedness()` o [...]
+The default implementation for calculating `relatedness()` domain correlation depends on the type of facet being calculated.
+Generic domain correlation is calculated per-term, by selectively retrieving a DocSet for each bucket-associated query (consulting the `filterCache`) and calculating DocSet intersections with "foreground" and "background" sets.
+For term facets (especially over high-cardinality fields) this approach can lead to `filterCache` thrashing; accordingly, `relatedness()` over term facets defaults where possible to an approach that collects facet counts directly over all multiple domains in a single sweep (never touching the `filterCache`).
+It is possible to explicitly control this "single sweep" collection by setting the extended `type:func` syntax `sweep_collection` option to `true` (the default) or `false` (to disable sweep collection).
 
-NOTE: Disabling sweep collection for `relatedness()` stats over low-cardinality fields may yield a performance benefit, provided the `filterCache` is sufficiently large to accommodate an entry for each value in the associated field without inducing thrashing for anticipated use patterns. A reasonable heuristic is that fields of cardinality less than 1,000 _may_ benefit from disabling sweep. This heuristic is _not_ used to determine default behavior, particularly because non-sweep collect [...]
+NOTE: Disabling sweep collection for `relatedness()` stats over low-cardinality fields may yield a performance benefit, provided the `filterCache` is sufficiently large to accommodate an entry for each value in the associated field without inducing thrashing for anticipated use patterns.
+A reasonable heuristic is that fields of cardinality less than 1,000 _may_ benefit from disabling sweep.
+This heuristic is _not_ used to determine default behavior, particularly because non-sweep collection can so easily induce `filterCache` thrashing, with system-wide detrimental effects.
 
 [source,json]
 ----
@@ -935,7 +966,8 @@ This can be particularly useful when using a descending sorting on `relatedness(
 
 [TIP]
 ====
-When sorting on `relatedness(...)` requests can be processed much more quickly by adding a `prelim_sort: "count desc"` option.  Increasing the `overrequest` can help improve the accuracy of the top buckets.
+When sorting on `relatedness(...)` requests can be processed much more quickly by adding a `prelim_sort: "count desc"` option.
+Increasing the `overrequest` can help improve the accuracy of the top buckets.
 ====
 
 ==== Semantic Knowledge Graph Example
diff --git a/solr/solr-ref-guide/src/json-faceting-domain-changes.adoc b/solr/solr-ref-guide/src/json-faceting-domain-changes.adoc
index 8c9fd27..d50bda8 100644
--- a/solr/solr-ref-guide/src/json-faceting-domain-changes.adoc
+++ b/solr/solr-ref-guide/src/json-faceting-domain-changes.adoc
@@ -18,7 +18,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
-Facet computation operates on a "domain" of documents.  By default, this domain consists of the documents matched by the main query.  For sub-facets, the domain consists of all documents placed in their bucket by the parent facet.
+Facet computation operates on a "domain" of documents.
+By default, this domain consists of the documents matched by the main query.
+For sub-facets, the domain consists of all documents placed in their bucket by the parent facet.
 
 Users can also override the "domain" of a facet that partitions data, using an explicit `domain` attribute whose value is a JSON object that can support various options for restricting, expanding, or completely changing the original domain before the buckets are computed for the associated facet.
 
@@ -32,7 +34,8 @@ A `\*:*` query facet with a `domain` change can be used to group multiple sub-fa
 
 == Adding Domain Filters
 
-The simplest example of a domain change is to specify an additional filter which will be applied to the existing domain. This can be done via the `filter` keyword in the `domain` block of the facet.
+The simplest example of a domain change is to specify an additional filter which will be applied to the existing domain.
+This can be done via the `filter` keyword in the `domain` block of the facet.
 
 [.dynamic-tabs]
 --
@@ -69,10 +72,12 @@ include::{example-source-dir}JsonRequestApiTest.java[tag=solrj-json-facet-filter
 ====
 --
 
-The value of `filter` can be a single query to treat as a filter, or a JSON list of filter queries.  Each query can be:
+The value of `filter` can be a single query to treat as a filter, or a JSON list of filter queries.
+Each query can be:
 
 * a string containing a query in Solr query syntax.
-* a reference to a request parameter containing Solr query syntax, of the form: `{param: <request_param_name>}`. It's possible to refer to one or multiple queries in DSL syntax defined under <<json-query-dsl.adoc#additional-queries,queries>> key in JSON Request API.
+* a reference to a request parameter containing Solr query syntax, of the form: `{param: <request_param_name>}`.
+It's possible to refer to one or multiple queries in DSL syntax defined under <<json-query-dsl.adoc#additional-queries,queries>> key in JSON Request API.
 The referred parameter might have 0 (absent) or many values.
 ** When no values are specified, no filter is applied and no error is thrown.
 ** When many values are specified, each value is parsed and used as filters in conjunction.
@@ -120,7 +125,8 @@ When a `filter` option is combined with other `domain` changing options, the fil
 
 Domains can also be expanded by using the `excludeTags` keyword to discard or ignore particular tagged query filters.
 
-This is used in the example below to show the top two manufacturers matching a search. The search results match the filter `manu_id_s:apple`, but the computed facet discards this filter and operates a domain widened by discarding the `manu_id_s`  filter.
+This is used in the example below to show the top two manufacturers matching a search.
+The search results match the filter `manu_id_s:apple`, but the computed facet discards this filter and operates a domain widened by discarding the `manu_id_s`  filter.
 
 [.dynamic-tabs]
 --
@@ -165,7 +171,9 @@ See also the section on <<faceting.adoc#tagging-and-excluding-filters,multi-sele
 
 == Arbitrary Domain Query
 
-A `query` domain can be specified when you wish to compute a facet against an arbitrary set of documents, regardless of the original domain.  The most common use case would be to compute a top level facet against a specific subset of the collection, regardless of the main query.  But it can also be useful on nested facets when building <<json-facet-api.adoc#relatedness-and-semantic-knowledge-graphs,Semantic Knowledge Graphs>>.
+A `query` domain can be specified when you wish to compute a facet against an arbitrary set of documents, regardless of the original domain.
+The most common use case would be to compute a top level facet against a specific subset of the collection, regardless of the main query.
+But it can also be useful on nested facets when building <<json-facet-api.adoc#relatedness-and-semantic-knowledge-graphs,Semantic Knowledge Graphs>>.
 
 Example:
 
@@ -202,7 +210,8 @@ include::{example-source-dir}JsonRequestApiTest.java[tag=solrj-json-facet-query-
 ====
 --
 
-The value of `query` can be a single query, or a JSON list of queries.  Each query can be:
+The value of `query` can be a single query, or a JSON list of queries.
+Each query can be:
 
 * a string containing a query in Solr query syntax.
 * a reference to a request parameter containing Solr query syntax, of the form: `{param: <request_param_name>}`.
@@ -216,7 +225,10 @@ NOTE: While a `query` domain can be combined with an additional domain `filter`,
 
 When a collection contains <<indexing-nested-documents.adoc#, Nested Documents>>, the `blockChildren` or `blockParent` domain options can be used to transform an existing domain containing one type of document, into a domain containing the documents with the specified relationship (child or parent of) to the documents from the original domain.
 
-Both of these options work similarly to the corresponding <<block-join-query-parser.adoc#,Block Join Query Parsers>> by taking in a single String query that exclusively matches all parent documents in the collection.  If `blockParent` is used, then the resulting domain will contain all parent documents of the children from the original domain.  If `blockChildren` is used, then the resulting domain will contain all child documents of the parents from the original domain. Quite often facet [...]
+Both of these options work similarly to the corresponding <<block-join-query-parser.adoc#,Block Join Query Parsers>> by taking in a single String query that exclusively matches all parent documents in the collection.
+If `blockParent` is used, then the resulting domain will contain all parent documents of the children from the original domain.
+If `blockChildren` is used, then the resulting domain will contain all child documents of the parents from the original domain.
+Quite often facets over child documents needs to be counted in parent documents, this can be done by `uniqueBlock(\_root_)` as described in <<json-facet-api#uniqueblock-and-block-join-counts, Block Join Facet Counts>>.
 
 [source,json,subs="verbatim,callouts"]]
 ----
@@ -235,7 +247,8 @@ Both of these options work similarly to the corresponding <<block-join-query-par
 ----
 <1> This example assumes we parent documents corresponding to Products, with child documents corresponding to individual SKUs with unique colors, and that our original query was against SKU documents.
 <2> The `colors` facet will be computed against all of the original SKU documents matching our search.
-<3> For each bucket in the `colors` facet, the set of all matching SKU documents will be transformed into the set of corresponding parent Product documents.  The resulting `brands` sub-facet will count how many Product documents (that have SKUs with the associated color) exist for each Brand.
+<3> For each bucket in the `colors` facet, the set of all matching SKU documents will be transformed into the set of corresponding parent Product documents.
+The resulting `brands` sub-facet will count how many Product documents (that have SKUs with the associated color) exist for each Brand.
 
 == Join Query Domain Changes
 
@@ -268,7 +281,9 @@ Example:
 
 ----
 
-`join` domain changes support an optional `method` parameter, which allows users to specify which join implementation they would like to use in this domain transform.  Solr offers several join implementations, each with different performance characteristics.  For more information on these implementations and their tradeoffs, see the `method` parameter documentation <<join-query-parser.adoc#parameters,here>>.  Join domain changes support all `method` values except `crossCollection`.
+`join` domain changes support an optional `method` parameter, which allows users to specify which join implementation they would like to use in this domain transform.
+Solr offers several join implementations, each with different performance characteristics.
+For more information on these implementations and their tradeoffs, see the `method` parameter documentation <<join-query-parser.adoc#parameters,here>>.  Join domain changes support all `method` values except `crossCollection`.
 
 == Graph Traversal Domain Changes
 
diff --git a/solr/solr-ref-guide/src/json-query-dsl.adoc b/solr/solr-ref-guide/src/json-query-dsl.adoc
index 81b90d8..33d0fdf 100644
--- a/solr/solr-ref-guide/src/json-query-dsl.adoc
+++ b/solr/solr-ref-guide/src/json-query-dsl.adoc
@@ -23,18 +23,22 @@ Queries and filters provided in JSON requests can be specified using a rich, pow
 == Query DSL Structure
 The JSON Request API accepts query values in three different formats:
 
-* A valid <<standard-query-parser.adoc#,query string>> that uses the default `deftype` (`lucene`, in most cases). e.g., `title:solr`.
+* A valid <<standard-query-parser.adoc#,query string>> that uses the default `deftype` (`lucene`, in most cases), e.g., `title:solr`.
 
-* A valid <<local-params.adoc#,local params query string>> that specifies its `deftype` explicitly. e.g., `{!dismax qf=title}solr`.
+* A valid <<local-params.adoc#,local params query string>> that specifies its `deftype` explicitly, e.g., `{!dismax qf=title}solr`.
 
-* A valid JSON object with the name of the query parser and any relevant parameters. e.g., `{ "lucene": {"df":"title", "query":"solr"}}`.
-** The top level "query" JSON block generally only has a single property representing the name of the query parser to use.  The value for the query parser property is a child block containing any relevant parameters as JSON properties.  The whole structure is analogous to a "local-params" query string.  The query itself (often represented in local params using the name `v`) is specified with the key `query` instead.
+* A valid JSON object with the name of the query parser and any relevant parameters, e.g., `{ "lucene": {"df":"title", "query":"solr"}}`.
+** The top level "query" JSON block generally only has a single property representing the name of the query parser to use.
+The value for the query parser property is a child block containing any relevant parameters as JSON properties.
+The whole structure is analogous to a "local-params" query string.
+The query itself (often represented in local params using the name `v`) is specified with the key `query` instead.
 
 All of these syntaxes can be used to specify queries for either the JSON Request API's `query` or `filter` properties.
 
 === Query DSL Examples
 
-The examples below show how to use each of the syntaxes discussed above to represent a query.  Each snippet represents the same basic search: the term `iPod` in a field called `name`:
+The examples below show how to use each of the syntaxes discussed above to represent a query.
+Each snippet represents the same basic search: the term `iPod` in a field called `name`:
 
 . Using the standard query API, with a simple query string
 +
@@ -139,7 +143,9 @@ include::{example-source-dir}JsonRequestApiTest.java[tag=solrj-ipod-query-dsl-3]
 --
 
 == Nested Queries
-Many of Solr's query parsers allow queries to be nested within one another.  When these are used, requests using the standard query API quickly become hard to write, read, and understand.  These sorts of queries are often much easier to work with in the JSON Request API.
+Many of Solr's query parsers allow queries to be nested within one another.
+When these are used, requests using the standard query API quickly become hard to write, read, and understand.
+These sorts of queries are often much easier to work with in the JSON Request API.
 
 === Nested Boost Query Example
 As an example, consider the three requests below, which wrap a simple query (the term `iPod` in the field `name`) within a boost query:
@@ -235,7 +241,8 @@ include::{example-source-dir}JsonRequestApiTest.java[tag=solrj-ipod-query-booste
 === Nested Boolean Query Example
 Query nesting is commonly seen when combining multiple query clauses together using pseudo-boolean logic with the <<other-parsers.adoc#boolean-query-parser,BoolQParser>>.
 
-The example below shows how the `BoolQParser` can be used to create powerful nested queries.  In this example, a user searches for results with `iPod` in the field `name` which are _not_ in the bottom half of the `popularity` rankings.
+The example below shows how the `BoolQParser` can be used to create powerful nested queries.
+In this example, a user searches for results with `iPod` in the field `name` which are _not_ in the bottom half of the `popularity` rankings.
 
 [.dynamic-tabs]
 --
@@ -388,12 +395,19 @@ curl -X POST http://localhost:8983/solr/techproducts/query -d '
 }'
 ----
 
-Overall this example doesn't make much sense, but just demonstrates the syntax. This feature is useful in <<json-faceting-domain-changes.adoc#adding-domain-filters,filtering domain>> in JSON Facet API <<json-facet-api.adoc#changing-the-domain,domain changes>>. Note that these declarations add request parameters underneath, so using same names with other parameters might cause unexpected behavior.
+Overall this example doesn't make much sense, but just demonstrates the syntax.
+This feature is useful in <<json-faceting-domain-changes.adoc#adding-domain-filters,filtering domain>> in JSON Facet API <<json-facet-api.adoc#changing-the-domain,domain changes>>. Note that these declarations add request parameters underneath, so using same names with other parameters might cause unexpected behavior.
 
 == Tagging in JSON Query DSL
-Query and filter clauses can also be individually "tagged".  Tags serve as handles for query clauses, allowing them to be referenced from elsewhere in the request.  This is most commonly used by the filter-exclusion functionality offered by both <<faceting.adoc#tagging-and-excluding-filters,traditional>> and <<json-faceting-domain-changes.adoc#filter-exclusions,JSON>> faceting.
+Query and filter clauses can also be individually "tagged".  Tags serve as handles for query clauses, allowing them to be referenced from elsewhere in the request.
+This is most commonly used by the filter-exclusion functionality offered by both <<faceting.adoc#tagging-and-excluding-filters,traditional>> and <<json-faceting-domain-changes.adoc#filter-exclusions,JSON>> faceting.
 
-Queries and filters are tagged by wrapping them in a surrounding JSON object.  The name of the tag is specified as a JSON key, with the query string (or object) becoming the value associated with that key.  Tag name properties are prefixed with a hash, and may include multiple tags, separated by commas.  For example: `{"\#title,tag2,tag3":"title:solr"}`.  Note that unlike the rest of the JSON request API which uses lax JSON parsing rules, tags must be surrounded by double-quotes because  [...]
+Queries and filters are tagged by wrapping them in a surrounding JSON object.
+The name of the tag is specified as a JSON key, with the query string (or object) becoming the value associated with that key.
+Tag name properties are prefixed with a hash, and may include multiple tags, separated by commas.
+For example: `{"\#title,tag2,tag3":"title:solr"}`.
+Note that unlike the rest of the JSON request API which uses lax JSON parsing rules, tags must be surrounded by double-quotes because of the leading `#` character.
+The example below creates two tagged clauses: `titleTag` and `inStockTag`.
 
 [.dynamic-tabs]
 --
@@ -427,4 +441,5 @@ include::{example-source-dir}JsonRequestApiTest.java[tag=solrj-tagged-query]
 ====
 --
 
-Note that the tags created in the example above have no impact in how the search is executed.  Tags will not affect a query unless they are referenced by some other part of the request that uses them.
+Note that the tags created in the example above have no impact in how the search is executed.
+Tags will not affect a query unless they are referenced by some other part of the request that uses them.
diff --git a/solr/solr-ref-guide/src/jvm-settings.adoc b/solr/solr-ref-guide/src/jvm-settings.adoc
index f0a7e66..9b49a63 100644
--- a/solr/solr-ref-guide/src/jvm-settings.adoc
+++ b/solr/solr-ref-guide/src/jvm-settings.adoc
@@ -18,32 +18,52 @@
 
 Optimizing the JVM can be a key factor in getting the most from your Solr installation.
 
-Configuring your JVM is a complex topic and a full discussion is beyond the scope of this document. Luckily, most modern JVMs are quite good at making the best use of available resources with default settings. The following sections contain a few tips that may be helpful when the defaults are not optimal for your situation.
+Configuring your JVM is a complex topic and a full discussion is beyond the scope of this document.
+Luckily, most modern JVMs are quite good at making the best use of available resources with default settings.
+The following sections contain a few tips that may be helpful when the defaults are not optimal for your situation.
 
 For more general information about improving Solr performance, see https://cwiki.apache.org/confluence/display/solr/SolrPerformanceFactors[Solr Performance Factors] in the Solr Wiki.
 
 == Choosing Memory Heap Settings
 
-The most important JVM configuration settings control the heap allocated to the JVM: `-Xms`, which sets the initial size of the JVM's memory heap, and `-Xmx`, which sets the maximum size of the heap. Setting these two options to the same value is a common practice.
+The most important JVM configuration settings control the heap allocated to the JVM: `-Xms`, which sets the initial size of the JVM's memory heap, and `-Xmx`, which sets the maximum size of the heap.
+Setting these two options to the same value is a common practice.
 
-Heap size is critical and unfortunately there is no "one size fits all" solution, you must test with your data and your application. The best way to determine the correct size is to analyze the garbage collection (GC) logs located in your logs directory. There are various tools that help analyze these logs and, in particular, show the amount of memory used after GC has completed (http://www.tagtraum.com/gcviewer.html[GCViewer] and https://gceasy.io/[GCEasy] are two). Also you can attach  [...]
+Heap size is critical and unfortunately there is no "one size fits all" solution, you must test with your data and your application.
+The best way to determine the correct size is to analyze the garbage collection (GC) logs located in your logs directory.
+There are various tools that help analyze these logs and, in particular, show the amount of memory used after GC has completed (http://www.tagtraum.com/gcviewer.html[GCViewer] and https://gceasy.io/[GCEasy] are two).
+Also you can attach jconsole (distributed with most Java runtimes) to check memory consumption as Solr is running.
+This will show the absolute minimum amount of memory required; adding 25-50% "headroom" is a reasonable starting point.
 
 There are several points to keep in mind:
 
-* Running Solr with too little "headroom" allocated for the heap can cause excessive resources to be consumed by continual GC. Thus the 25-50% recommendation above.
-* Lucene/Solr makes extensive use of MMapDirectory, which uses RAM _not_ reserved for the JVM for most of the Lucene index. Therefore, as much memory as possible should be left for the operating system to use for this purpose.
-* The heap allocated should be as small as possible while maintaining good performance. 8-16Gb is quite common, and larger heaps are sometimes used. When heaps grow to larger sizes, it is imperative to test extensively before going to production.
+* Running Solr with too little "headroom" allocated for the heap can cause excessive resources to be consumed by continual GC.
+Thus the 25-50% recommendation above.
+* Lucene/Solr makes extensive use of MMapDirectory, which uses RAM _not_ reserved for the JVM for most of the Lucene index.
+Therefore, as much memory as possible should be left for the operating system to use for this purpose.
+* The heap allocated should be as small as possible while maintaining good performance.
+8-16Gb is quite common, and larger heaps are sometimes used.
+When heaps grow to larger sizes, it is imperative to test extensively before going to production.
 * The G1GC garbage collector is currently preferred when using a JVM that supports it (Java 9 and later).
-* Modern hardware can be configured with hundreds of gigabytes of physical RAM and many CPUs. It is often better in these cases to run multiple JVMs, each with a limited amount of memory allocated to their heaps. One way to achieve this is to run Solr as a https://hub.docker.com/_/solr?tab=tags[Docker container].
+* Modern hardware can be configured with hundreds of gigabytes of physical RAM and many CPUs.
+It is often better in these cases to run multiple JVMs, each with a limited amount of memory allocated to their heaps.
+One way to achieve this is to run Solr as a https://hub.docker.com/_/solr?tab=tags[Docker container].
 * It's good practice to periodically re-analyze the GC logs and/or monitor with <<metrics-reporting#metrics-reporting,Metrics Reporting>> to see if the memory usage has changed due to changes in your application, number of documents, etc.
-* On *nix systems, Solr will run with "OOM killer script" (see `solr/bin/oom_solr.sh`). This will forcefully stop Solr when the heap is exhausted rather than continue in an indeterminate state. You can additionally request a heap dump on OOM through the values in `solr.in.sh`
-* All current (Java 11) garbage collectors can hit "stop the world" collections, which suspend the JVM until completed. If, through monitoring, these garbage collections are frequent and greater than your application can tolerate, additional tuning should be considered. "Stop the world" pauses greater than 5 seconds are rarely acceptable, and having them be less than 1 second is desirable.
+* On *nix systems, Solr will run with "OOM killer script" (see `solr/bin/oom_solr.sh`).
+This will forcefully stop Solr when the heap is exhausted rather than continue in an indeterminate state.
+You can additionally request a heap dump on OOM through the values in `solr.in.sh`
+* All current (Java 11) garbage collectors can hit "stop the world" collections, which suspend the JVM until completed.
+If, through monitoring, these garbage collections are frequent and greater than your application can tolerate, additional tuning should be considered.
+"Stop the world" pauses greater than 5 seconds are rarely acceptable, and having them be less than 1 second is desirable.
 
 Consult your JVM vendor's documentation for specifics in your particular case, the recommendations above are intended as starting points.
 
 == Use the Server HotSpot VM
 
-If you are using Sun's JVM, add the `-server` command-line option when you start Solr. This tells the JVM that it should optimize for a long running, server process. If the Java runtime on your system is a JRE, rather than a full JDK distribution (including `javac` and other development tools), then it is possible that it may not support the `-server` JVM option. Test this by running `java -help` and look for `-server` as an available option in the displayed usage message.
+If you are using Sun's JVM, add the `-server` command-line option when you start Solr.
+This tells the JVM that it should optimize for a long running, server process.
+If the Java runtime on your system is a JRE, rather than a full JDK distribution (including `javac` and other development tools), then it is possible that it may not support the `-server` JVM option.
+Test this by running `java -help` and look for `-server` as an available option in the displayed usage message.
 
 == Checking JVM Settings
 
diff --git a/solr/solr-ref-guide/src/jwt-authentication-plugin.adoc b/solr/solr-ref-guide/src/jwt-authentication-plugin.adoc
index 8b6bc8e..9fa29b3 100644
--- a/solr/solr-ref-guide/src/jwt-authentication-plugin.adoc
+++ b/solr/solr-ref-guide/src/jwt-authentication-plugin.adoc
@@ -39,7 +39,7 @@ The simplest possible `security.json` for registering the plugin without configu
 
 The plugin will by default require a valid JWT token for all traffic.
 
-If the `blockUnknown` property is set to `false` as in the above example, it is possible to start configuring the plugin using unauthenticated REST API calls, which is further described in section <<editing-jwt-authentication-plugin-configuration,Editing JWT Authentication Plugin Configuration>>.
+If the `blockUnknown` property is set to `false` as in the above example, it is possible to start configuring the plugin using unauthenticated REST API calls, which is further described in section <<Editing JWT Authentication Plugin Configuration>>.
 
 == Configuration Parameters
 
@@ -66,7 +66,9 @@ issuers              ; List of issuers (Identity providers) to  support. See sec
 
 === Issuer Configuration
 
-This plugin supports one or more token issuers (IdPs). Issuers are configured as a list of JSON objects under the `issuers` configuration key. The first issuer in the list is the "Primary Issuer", which is the one used for logging in to the Admin UI.
+This plugin supports one or more token issuers (IdPs).
+Issuers are configured as a list of JSON objects under the `issuers` configuration key.
+The first issuer in the list is the "Primary Issuer", which is the one used for logging in to the Admin UI.
 
 [%header,format=csv,separator=;,cols="25%,50%,25%"]
 |===
@@ -180,9 +182,14 @@ However, in development, it may be useful to use regular HTTP URLs, and bypass t
 To support this you can set the environment variable `-Dsolr.auth.jwt.allowOutboundHttp=true` at startup.
 
 === Trusting the IdP server
-All communication with the Oauth2 server (IdP) is done over HTTPS. By default, Java's built-in TrustStore is used. However, by configuring one of the options `trustedCertsFile` or `trustedCerts`, the plugin will *instead* trust the set of certificates provided, not any certificate signed by a root CA. This is both more secure and also lets you trust self-signed certificates. It also has the benefit of working even if Solr is not started in SSL mode.
-
-Please configure either the `trustedCerts` or `trustedCertsFile` option. Configuring both will cause an error.
+All communication with the Oauth2 server (IdP) is done over HTTPS.
+By default, Java's built-in TrustStore is used.
+However, by configuring one of the options `trustedCertsFile` or `trustedCerts`, the plugin will *instead* trust the set of certificates provided, not any certificate signed by a root CA.
+This is both more secure and also lets you trust self-signed certificates.
+It also has the benefit of working even if Solr is not started in SSL mode.
+
+Please configure either the `trustedCerts` or `trustedCertsFile` option.
+Configuring both will cause an error.
 
 == Editing JWT Authentication Plugin Configuration
 
diff --git a/solr/solr-ref-guide/src/kerberos-authentication-plugin.adoc b/solr/solr-ref-guide/src/kerberos-authentication-plugin.adoc
index a955368..a116419 100644
--- a/solr/solr-ref-guide/src/kerberos-authentication-plugin.adoc
+++ b/solr/solr-ref-guide/src/kerberos-authentication-plugin.adoc
@@ -101,7 +101,8 @@ We'll walk through each of these steps below.
 [IMPORTANT]
 ====
 To use host names instead of IP addresses, use the `SOLR_HOST` configuration in `bin/solr.in.sh` or pass a `-Dhost=<hostname>` system parameter during Solr startup.
-This guide uses IP addresses. If you specify a hostname, replace all the IP addresses in the guide with the Solr hostname as appropriate.
+This guide uses IP addresses.
+If you specify a hostname, replace all the IP addresses in the guide with the Solr hostname as appropriate.
 ====
 
 === Get Service Principals and Keytabs
@@ -445,8 +446,10 @@ NOTE: If you have defined `ZK_HOST` in `solr.in.sh`/`solr.in.cmd` (see <<zookeep
 
 === Test the Configuration
 
-. Do a `kinit` with your username. For example, `kinit \user@EXAMPLE.COM`.
-. Try to access Solr using `curl`. You should get a successful response.
+. Do a `kinit` with your username.
+For example, `kinit \user@EXAMPLE.COM`.
+. Try to access Solr using `curl`.
+You should get a successful response.
 +
 [source,bash]
 ----
diff --git a/solr/solr-ref-guide/src/language-analysis.adoc b/solr/solr-ref-guide/src/language-analysis.adoc
index ab8f7c0..71f38cf 100644
--- a/solr/solr-ref-guide/src/language-analysis.adoc
+++ b/solr/solr-ref-guide/src/language-analysis.adoc
@@ -234,7 +234,8 @@ Unicode Collation in Solr is fast, because all the work is done at index time.
 Rather than specifying an analyzer within `<fieldtype ... class="solr.TextField">`, the `solr.CollationField` and `solr.ICUCollationField` field type classes provide this functionality.
 `solr.ICUCollationField`, which is backed by http://site.icu-project.org[the ICU4J library], provides more flexible configuration, has more locales, is significantly faster, and requires less memory and less index space, since its keys are smaller than those produced by the JDK implementation that backs `solr.CollationField`.
 
-To use `solr.ICUCollationField`, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).  See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
+To use `solr.ICUCollationField`, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
 
 `solr.ICUCollationField` and `solr.CollationField` fields can be created in two ways:
 
@@ -248,28 +249,36 @@ Using a System collator:
 `locale`:: (required) http://www.rfc-editor.org/rfc/rfc3066.txt[RFC 3066] locale ID.
 See http://demo.icu-project.org/icu-bin/locexp[the ICU locale explorer] for a list of supported locales.
 
-`strength`:: Valid values are `primary`, `secondary`, `tertiary`, `quaternary`, or `identical`. See http://userguide.icu-project.org/collation/concepts#TOC-Comparison-Levels[Comparison Levels in ICU Collation Concepts] for more information.
+`strength`:: Valid values are `primary`, `secondary`, `tertiary`, `quaternary`, or `identical`.
+See http://userguide.icu-project.org/collation/concepts#TOC-Comparison-Levels[Comparison Levels in ICU Collation Concepts] for more information.
 
-`decomposition`:: Valid values are `no` or `canonical`. See http://userguide.icu-project.org/collation/concepts#TOC-Normalization[Normalization in ICU Collation Concepts] for more information.
+`decomposition`:: Valid values are `no` or `canonical`.
+See http://userguide.icu-project.org/collation/concepts#TOC-Normalization[Normalization in ICU Collation Concepts] for more information.
 
 Using a Tailored ruleset:
 
 `custom`:: (required) Path to a UTF-8 text file containing rules supported by the ICU http://icu-project.org/apiref/icu4j/com/ibm/icu/text/RuleBasedCollator.html[`RuleBasedCollator`]
 
-`strength`:: Valid values are `primary`, `secondary`, `tertiary`, `quaternary`, or `identical`. See http://userguide.icu-project.org/collation/concepts#TOC-Comparison-Levels[Comparison Levels in ICU Collation Concepts] for more information.
+`strength`:: Valid values are `primary`, `secondary`, `tertiary`, `quaternary`, or `identical`.
+See http://userguide.icu-project.org/collation/concepts#TOC-Comparison-Levels[Comparison Levels in ICU Collation Concepts] for more information.
 
-`decomposition`:: Valid values are `no` or `canonical`. See http://userguide.icu-project.org/collation/concepts#TOC-Normalization[Normalization in ICU Collation Concepts] for more information.
+`decomposition`:: Valid values are `no` or `canonical`.
+See http://userguide.icu-project.org/collation/concepts#TOC-Normalization[Normalization in ICU Collation Concepts] for more information.
 
 Expert options:
 
-`alternate`:: Valid values are `shifted` or `non-ignorable`. Can be used to ignore punctuation/whitespace.
+`alternate`:: Valid values are `shifted` or `non-ignorable`.
+Can be used to ignore punctuation/whitespace.
 
 `caseLevel`:: (true/false) If true, in combination with `strength="primary"`, accents are ignored but case is taken into account.
-The default is `false`. See http://userguide.icu-project.org/collation/concepts#TOC-CaseLevel[CaseLevel in ICU Collation Concepts] for more information.
+The default is `false`.
+See http://userguide.icu-project.org/collation/concepts#TOC-CaseLevel[CaseLevel in ICU Collation Concepts] for more information.
 
-`caseFirst`:: Valid values are `lower` or `upper`. Useful to control which is sorted first when case is not ignored.
+`caseFirst`:: Valid values are `lower` or `upper`.
+Useful to control which is sorted first when case is not ignored.
 
-`numeric`:: (true/false) If true, digits are sorted according to numeric value, e.g., foobar-9 sorts before foobar-10. The default is false.
+`numeric`:: (true/false) If true, digits are sorted according to numeric value, e.g., foobar-9 sorts before foobar-10.
+The default is false.
 
 `variableTop`:: Single character or contraction.
 Controls what is variable for `alternate`.
@@ -327,7 +336,8 @@ q=*:*&fl=city&sort=city_sort+asc
 
 === Sorting Text for Multiple Languages
 
-There are two approaches to supporting multiple languages: if there is a small list of languages you wish to support, consider defining collated fields for each language and using `copyField`. However, adding a large number of sort fields can increase disk and indexing costs.
+There are two approaches to supporting multiple languages: if there is a small list of languages you wish to support, consider defining collated fields for each language and using `copyField`.
+However, adding a large number of sort fields can increase disk and indexing costs.
 An alternative approach is to use the Unicode `default` collator.
 
 The Unicode `default` or `ROOT` locale has rules that are designed to work well for most languages.
@@ -346,7 +356,8 @@ This Unicode default sort is still significantly more advanced than the standard
 You can define your own set of sorting rules.
 It's easiest to take existing rules that are close to what you want and customize them.
 
-In the example below, we create a custom rule set for German called DIN 5007-2. This rule set treats umlauts in German differently: it treats ö as equivalent to oe, ä as equivalent to ae, and ü as equivalent to ue.
+In the example below, we create a custom rule set for German called DIN 5007-2.
+This rule set treats umlauts in German differently: it treats ö as equivalent to oe, ä as equivalent to ae, and ü as equivalent to ue.
 For more information, see the http://icu-project.org/apiref/icu4j/com/ibm/icu/text/RuleBasedCollator.html[ICU RuleBasedCollator javadocs].
 
 This example shows how to create a custom rule set for `solr.ICUCollationField` and dump it to a file:
@@ -398,17 +409,21 @@ Using a System collator (see http://www.oracle.com/technetwork/java/javase/java8
 
 `variant`:: Vendor or browser-specific code
 
-`strength`:: Valid values are `primary`, `secondary`, `tertiary` or `identical`. See {java-javadocs}java/text/Collator.html[Java Collator javadocs] for more information.
+`strength`:: Valid values are `primary`, `secondary`, `tertiary` or `identical`.
+See {java-javadocs}java/text/Collator.html[Java Collator javadocs] for more information.
 
-`decomposition`:: Valid values are `no`, `canonical`, or `full`. See {java-javadocs}java/text/Collator.html[Java Collator javadocs] for more information.
+`decomposition`:: Valid values are `no`, `canonical`, or `full`.
+See {java-javadocs}java/text/Collator.html[Java Collator javadocs] for more information.
 
 Using a Tailored ruleset:
 
 `custom`:: (required) Path to a UTF-8 text file containing rules supported by the {java-javadocs}java/text/RuleBasedCollator.html[`JDK RuleBasedCollator`]
 
-`strength`:: Valid values are `primary`, `secondary`, `tertiary` or `identical`. See {java-javadocs}java/text/Collator.html[Java Collator javadocs] for more information.
+`strength`:: Valid values are `primary`, `secondary`, `tertiary` or `identical`.
+See {java-javadocs}java/text/Collator.html[Java Collator javadocs] for more information.
 
-`decomposition`:: Valid values are `no`, `canonical`, or `full`. See {java-javadocs}java/text/Collator.html[Java Collator javadocs] for more information.
+`decomposition`:: Valid values are `no`, `canonical`, or `full`.
+See {java-javadocs}java/text/Collator.html[Java Collator javadocs] for more information.
 
 .A `solr.CollationField` example:
 [source,xml]
@@ -517,7 +532,8 @@ In addition to these analysis components, Solr also provides an update request p
 
 NOTE: The <<OpenNLP Tokenizer>> must be used with all other OpenNLP analysis components, for two reasons: first, the OpenNLP Tokenizer detects and marks the sentence boundaries required by all the OpenNLP filters; and second, since the pre-trained OpenNLP models used by these filters were trained using the corresponding language-specific sentence-detection/tokenization models, the same tokenization, using the same models, must be used at runtime for optimal performance.
 
-To use the OpenNLP components, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>). See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
+To use the OpenNLP components, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
 
 === OpenNLP Tokenizer
 
@@ -951,7 +967,8 @@ They use the Lucene classes `org.apache.lucene.analysis.bn.BengaliNormalizationF
 === Brazilian Portuguese
 
 This is a Java filter written specifically for stemming the Brazilian dialect of the Portuguese language.
-It uses the Lucene class `org.apache.lucene.analysis.br.BrazilianStemmer`. Although that stemmer can be configured to use a list of protected words (which should not be stemmed), this factory does not accept any arguments to specify such a list.
+It uses the Lucene class `org.apache.lucene.analysis.br.BrazilianStemmer`.
+Although that stemmer can be configured to use a list of protected words (which should not be stemmed), this factory does not accept any arguments to specify such a list.
 
 *Factory class:* `solr.BrazilianStemFilterFactory`
 
@@ -1031,7 +1048,8 @@ Solr includes a light stemmer for Bulgarian, following http://members.unine.ch/j
 
 === Catalan
 
-Solr can stem Catalan using the Snowball Porter Stemmer with an argument of `language="Catalan"`. Solr includes a set of contractions for Catalan, which can be stripped using `solr.ElisionFilterFactory`.
+Solr can stem Catalan using the Snowball Porter Stemmer with an argument of `language="Catalan"`.
+Solr includes a set of contractions for Catalan, which can be stripped using `solr.ElisionFilterFactory`.
 
 *Factory class:* `solr.SnowballPorterFilterFactory`
 
@@ -1083,7 +1101,8 @@ Solr can stem Catalan using the Snowball Porter Stemmer with an argument of `lan
 
 The default configuration of the <<tokenizers.adoc#icu-tokenizer,ICU Tokenizer>> is suitable for Traditional Chinese text.
 It follows the Word Break rules from the Unicode Text Segmentation algorithm for non-Chinese text, and uses a dictionary to segment Chinese words.
-To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>). See the `solr/contrib/analysis-extras/README.md` for information on which jars you need to add.
+To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+See the `solr/contrib/analysis-extras/README.md` for information on which jars you need to add.
 
 <<tokenizers.adoc#standard-tokenizer,Standard Tokenizer>> can also be used to tokenize Traditional Chinese text.
 Following the Word Break rules from the Unicode Text Segmentation algorithm, it produces one token per Chinese character.
@@ -1135,7 +1154,8 @@ When combined with <<CJK Bigram Filter>>, overlapping bigrams of Chinese charact
 
 Forms bigrams (overlapping 2-character sequences) of CJK characters that are generated from <<tokenizers.adoc#standard-tokenizer,Standard Tokenizer>> or <<tokenizers.adoc#icu-tokenizer,ICU Tokenizer>>.
 
-By default, all CJK characters produce bigrams, but finer grained control is available by specifying orthographic type arguments `han`, `hiragana`, `katakana`, and `hangul`.  When set to `false`, characters of the corresponding type will be passed through as unigrams, and will not be included in any bigrams.
+By default, all CJK characters produce bigrams, but finer grained control is available by specifying orthographic type arguments `han`, `hiragana`, `katakana`, and `hangul`.
+When set to `false`, characters of the corresponding type will be passed through as unigrams, and will not be included in any bigrams.
 
 When a CJK character has no adjacent characters to form a bigram, it is output in unigram form.
 If you want to always output both unigrams and bigrams, set the `outputUnigrams` argument to `true`.
@@ -1164,11 +1184,13 @@ See the example under <<Traditional Chinese>>.
 === Simplified Chinese
 
 For Simplified Chinese, Solr provides support for Chinese sentence and word segmentation with the <<HMM Chinese Tokenizer>>. This component includes a large dictionary and segments Chinese text into words with the Hidden Markov Model.
-To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>). See the `solr/contrib/analysis-extras/README.md` for information on which jars you need to add.
+To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+See the `solr/contrib/analysis-extras/README.md` for information on which jars you need to add.
 
 The default configuration of the <<tokenizers.adoc#icu-tokenizer,ICU Tokenizer>> is also suitable for Simplified Chinese text.
 It follows the Word Break rules from the Unicode Text Segmentation algorithm for non-Chinese text, and uses a dictionary to segment Chinese words.
-To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>). See the `solr/contrib/analysis-extras/README.md` for information on which jars you need to add.
+To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+See the `solr/contrib/analysis-extras/README.md` for information on which jars you need to add.
 
 Also useful for Chinese analysis:
 
@@ -1225,7 +1247,8 @@ Also useful for Chinese analysis:
 
 For Simplified Chinese, Solr provides support for Chinese sentence and word segmentation with the `solr.HMMChineseTokenizerFactory` in the `analysis-extras` contrib module.
 This component includes a large dictionary and segments Chinese text into words with the Hidden Markov Model.
-To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).  See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
+To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
 
 *Factory class:* `solr.HMMChineseTokenizerFactory`
 
@@ -1479,7 +1502,8 @@ This filter can be useful for languages such as French, Catalan, Italian, and Ir
 *Arguments:*
 
 `articles`:: The pathname of a file that contains a list of articles, one per line, to be stripped.
-Articles are words such as "le", which are commonly abbreviated, such as in _l'avion_ (the plane). This file should include the abbreviated form, which precedes the apostrophe.
+Articles are words such as "le", which are commonly abbreviated, such as in _l'avion_ (the plane).
+This file should include the abbreviated form, which precedes the apostrophe.
 In this case, simply "_l_". If no `articles` attribute is specified, a default set of French articles is used.
 
 `ignoreCase`:: (boolean) If true, the filter ignores the case of words when comparing them to the common word file.
@@ -1525,7 +1549,8 @@ Defaults to `false`
 
 ==== French Light Stem Filter
 
-Solr includes three stemmers for French: one in the `solr.SnowballPorterFilterFactory`, a lighter stemmer called `solr.FrenchLightStemFilterFactory`, and an even less aggressive stemmer called `solr.FrenchMinimalStemFilterFactory`. Lucene includes an example stopword list.
+Solr includes three stemmers for French: one in the `solr.SnowballPorterFilterFactory`, a lighter stemmer called `solr.FrenchLightStemFilterFactory`, and an even less aggressive stemmer called `solr.FrenchMinimalStemFilterFactory`.
+Lucene includes an example stopword list.
 
 *Factory classes:* `solr.FrenchLightStemFilterFactory`, `solr.FrenchMinimalStemFilterFactory`
 
@@ -1608,7 +1633,8 @@ Solr includes a stemmer for Galician following http://bvg.udc.es/recursos_lingua
 
 === German
 
-Solr includes four stemmers for German: one in the `solr.SnowballPorterFilterFactory language="German"`, a stemmer called `solr.GermanStemFilterFactory`, a lighter stemmer called `solr.GermanLightStemFilterFactory`, and an even less aggressive stemmer called `solr.GermanMinimalStemFilterFactory`. Lucene includes an example stopword list.
+Solr includes four stemmers for German: one in the `solr.SnowballPorterFilterFactory language="German"`, a stemmer called `solr.GermanStemFilterFactory`, a lighter stemmer called `solr.GermanLightStemFilterFactory`, and an even less aggressive stemmer called `solr.GermanMinimalStemFilterFactory`.
+Lucene includes an example stopword list.
 
 *Factory classes:* `solr.GermanStemFilterFactory`, `solr.LightGermanStemFilterFactory`, `solr.MinimalGermanStemFilterFactory`
 
@@ -1674,7 +1700,8 @@ This filter converts uppercase letters in the Greek character set to the equival
 
 [IMPORTANT]
 ====
-Use of custom charsets is no longer supported as of Solr 3.1. If you need to index text in these encodings, please use Java's character set conversion facilities (InputStreamReader, etc.) during I/O, so that Lucene can analyze this text as Unicode instead.
+Use of custom charsets is no longer supported as of Solr 3.1.
+If you need to index text in these encodings, please use Java's character set conversion facilities (InputStreamReader, etc.) during I/O, so that Lucene can analyze this text as Unicode instead.
 ====
 
 *Example:*
@@ -1791,7 +1818,8 @@ Solr includes support for stemming Indonesian (Bahasa Indonesia) following http:
 
 === Italian
 
-Solr includes two stemmers for Italian: one in the `solr.SnowballPorterFilterFactory language="Italian"`, and a lighter stemmer called `solr.ItalianLightStemFilterFactory`. Lucene includes an example stopword list.
+Solr includes two stemmers for Italian: one in the `solr.SnowballPorterFilterFactory language="Italian"`, and a lighter stemmer called `solr.ItalianLightStemFilterFactory`.
+Lucene includes an example stopword list.
 
 *Factory class:* `solr.ItalianStemFilterFactory`
 
@@ -1839,7 +1867,8 @@ Solr includes two stemmers for Italian: one in the `solr.SnowballPorterFilterFac
 
 === Irish
 
-Solr can stem Irish using the Snowball Porter Stemmer with an argument of `language="Irish"`. Solr includes `solr.IrishLowerCaseFilterFactory`, which can handle Irish-specific constructs.
+Solr can stem Irish using the Snowball Porter Stemmer with an argument of `language="Irish"`.
+Solr includes `solr.IrishLowerCaseFilterFactory`, which can handle Irish-specific constructs.
 Solr also includes a set of contractions for Irish which can be stripped using `solr.ElisionFilterFactory`.
 
 *Factory class:* `solr.SnowballPorterFilterFactory`
@@ -1946,7 +1975,8 @@ See `lang/userdict_ja.txt` for a sample user dictionary file.
 
 ==== Japanese Base Form Filter
 
-Replaces original terms' text with the corresponding base form (lemma). (`JapaneseTokenizer` annotates each term with its base form.)
+Replaces original terms' text with the corresponding base form (lemma).
+(`JapaneseTokenizer` annotates each term with its base form.)
 
 *Factory class:* `JapaneseBaseFormFilterFactory`
 
@@ -2037,7 +2067,8 @@ Example:
 === Hebrew, Lao, Myanmar, Khmer
 
 Lucene provides support, in addition to UAX#29 word break rules, for Hebrew's use of the double and single quote characters, and for segmenting Lao, Myanmar, and Khmer into syllables with the `solr.ICUTokenizerFactory` in the `analysis-extras` contrib module.
-To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).  See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
+To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
 
 See <<tokenizers.adoc#icu-tokenizer,the ICUTokenizer>> for more information.
 
@@ -2091,7 +2122,8 @@ Solr includes support for stemming Latvian, and Lucene includes an example stopw
 
 === Norwegian
 
-Solr includes two classes for stemming Norwegian, `NorwegianLightStemFilterFactory` and `NorwegianMinimalStemFilterFactory`. Lucene includes an example stopword list.
+Solr includes two classes for stemming Norwegian, `NorwegianLightStemFilterFactory` and `NorwegianMinimalStemFilterFactory`.
+Lucene includes an example stopword list.
 
 Another option is to use the Snowball Porter Stemmer with an argument of language="Norwegian".
 
@@ -2294,7 +2326,8 @@ Solr includes support for normalizing Persian, and Lucene includes an example st
 
 Solr provides support for Polish stemming with the `solr.StempelPolishStemFilterFactory`, and `solr.MorphologikFilterFactory` for lemmatization, in the `contrib/analysis-extras` module.
 The `solr.StempelPolishStemFilterFactory` component includes an algorithmic stemmer with tables for Polish.
-To use either of these filters, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>). See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
+To use either of these filters, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
 
 *Factory class:* `solr.StempelPolishStemFilterFactory` and `solr.MorfologikFilterFactory`
 
@@ -2356,7 +2389,8 @@ If the dictionary attribute is not provided, the Polish dictionary is loaded and
 
 === Portuguese
 
-Solr includes four stemmers for Portuguese: one in the `solr.SnowballPorterFilterFactory`, an alternative stemmer called `solr.PortugueseStemFilterFactory`, a lighter stemmer called `solr.PortugueseLightStemFilterFactory`, and an even less aggressive stemmer called `solr.PortugueseMinimalStemFilterFactory`. Lucene includes an example stopword list.
+Solr includes four stemmers for Portuguese: one in the `solr.SnowballPorterFilterFactory`, an alternative stemmer called `solr.PortugueseStemFilterFactory`, a lighter stemmer called `solr.PortugueseLightStemFilterFactory`, and an even less aggressive stemmer called `solr.PortugueseMinimalStemFilterFactory`.
+Lucene includes an example stopword list.
 
 *Factory classes:* `solr.PortugueseStemFilterFactory`, `solr.PortugueseLightStemFilterFactory`, `solr.PortugueseMinimalStemFilterFactory`
 
@@ -2460,7 +2494,8 @@ Solr can stem Romanian using the Snowball Porter Stemmer with an argument of `la
 
 ==== Russian Stem Filter
 
-Solr includes two stemmers for Russian: one in the `solr.SnowballPorterFilterFactory language="Russian"`, and a lighter stemmer called `solr.RussianLightStemFilterFactory`. Lucene includes an example stopword list.
+Solr includes two stemmers for Russian: one in the `solr.SnowballPorterFilterFactory language="Russian"`, and a lighter stemmer called `solr.RussianLightStemFilterFactory`.
+Lucene includes an example stopword list.
 
 *Factory class:* `solr.RussianLightStemFilterFactory`
 
@@ -2635,7 +2670,8 @@ Valid values are:
 
 === Spanish
 
-Solr includes two stemmers for Spanish: one in the `solr.SnowballPorterFilterFactory language="Spanish"`, and a lighter stemmer called `solr.SpanishLightStemFilterFactory`. Lucene includes an example stopword list.
+Solr includes two stemmers for Spanish: one in the `solr.SnowballPorterFilterFactory language="Spanish"`, and a lighter stemmer called `solr.SpanishLightStemFilterFactory`.
+Lucene includes an example stopword list.
 
 *Factory class:* `solr.SpanishStemFilterFactory`
 
@@ -2682,7 +2718,8 @@ Solr includes two stemmers for Spanish: one in the `solr.SnowballPorterFilterFac
 
 ==== Swedish Stem Filter
 
-Solr includes two stemmers for Swedish: one in the `solr.SnowballPorterFilterFactory language="Swedish"`, and a lighter stemmer called `solr.SwedishLightStemFilterFactory`. Lucene includes an example stopword list.
+Solr includes two stemmers for Swedish: one in the `solr.SnowballPorterFilterFactory language="Swedish"`, and a lighter stemmer called `solr.SwedishLightStemFilterFactory`.
+Lucene includes an example stopword list.
 
 Also relevant are the <<Scandinavian,Scandinavian normalization filters>>.
 
@@ -2821,7 +2858,8 @@ Solr includes support for stemming Turkish with the `solr.SnowballPorterFilterFa
 === Ukrainian
 
 Solr provides support for Ukrainian lemmatization with the `solr.MorphologikFilterFactory`, in the `contrib/analysis-extras` module.
-To use this filter, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>). See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
+To use this filter, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
 
 Lucene also includes an example Ukrainian stopword list, in the `lucene-analyzers-morfologik` jar.
 
diff --git a/solr/solr-ref-guide/src/language-detection.adoc b/solr/solr-ref-guide/src/language-detection.adoc
index 3383ebd..70ba97d 100644
--- a/solr/solr-ref-guide/src/language-detection.adoc
+++ b/solr/solr-ref-guide/src/language-detection.adoc
@@ -33,7 +33,8 @@ For more information about language analysis in Solr, see <<language-analysis.ad
 
 == Configuring Language Detection
 
-You can configure the `langid` UpdateRequestProcessor in `solrconfig.xml`. Both implementations take the same parameters, which are described in the following section.
+You can configure the `langid` UpdateRequestProcessor in `solrconfig.xml`.
+Both implementations take the same parameters, which are described in the following section.
 At a minimum, you must specify the fields for language identification and a field for the resulting language code.
 
 === Configuring Tika Language Detection
@@ -100,7 +101,8 @@ As previously mentioned, both implementations of the `langid` UpdateRequestProce
 When `true`, the default, enables language detection.
 
 `langid.fl`::
-A comma- or space-delimited list of fields to be processed by `langid`. This parameter is required.
+A comma- or space-delimited list of fields to be processed by `langid`.
+This parameter is required.
 
 `langid.langField`::
 Specifies the field for the returned language code.
@@ -136,7 +138,8 @@ Use this in combination with `langid.map` to ensure that you only index document
 
 `langid.map`::
 Enables field name mapping.
-If `true`, Solr will map field names for all fields listed in `langid.fl`. The default is `false`.
+If `true`, Solr will map field names for all fields listed in `langid.fl`.
+The default is `false`.
 
 `langid.map.fl`::
 A comma-separated list of fields for `langid.map` that is different than the fields specified in `langid.fl`.
@@ -171,7 +174,8 @@ A list defined with this parameter will override any configuration set with `lan
 By default, fields are mapped as <field>_<language>. To change this pattern, you can specify a Java regular expression in this parameter.
 
 `langid.map.replace`::
-By default, fields are mapped as `<field>_<language>`. To change this pattern, you can specify a Java replace in this parameter.
+By default, fields are mapped as `<field>_<language>`.
+To change this pattern, you can specify a Java replace in this parameter.
 
 `langid.enforceSchema`::
 If `false`, the `langid` processor does not validate field names against your schema.
diff --git a/solr/solr-ref-guide/src/learning-to-rank.adoc b/solr/solr-ref-guide/src/learning-to-rank.adoc
index 3786985..baf5a0a 100644
--- a/solr/solr-ref-guide/src/learning-to-rank.adoc
+++ b/solr/solr-ref-guide/src/learning-to-rank.adoc
@@ -18,21 +18,25 @@
 
 With the *Learning To Rank* (or *LTR* for short) contrib module you can configure and run machine learned ranking models in Solr.
 
-The module also supports feature extraction inside Solr. The only thing you need to do outside Solr is train your own ranking model.
+The module also supports feature extraction inside Solr.
+The only thing you need to do outside Solr is train your own ranking model.
 
 == Learning to Rank Concepts
 
 === Re-Ranking
 
-Re-Ranking allows you to run a simple query for matching documents and then re-rank the top N documents using the scores from a different, more complex query. This page describes the use of *LTR* complex queries, information on other rank queries included in the Solr distribution can be found on the <<query-re-ranking.adoc#,Query Re-Ranking>> page.
+Re-Ranking allows you to run a simple query for matching documents and then re-rank the top N documents using the scores from a different, more complex query.
+This page describes the use of *LTR* complex queries, information on other rank queries included in the Solr distribution can be found on the <<query-re-ranking.adoc#,Query Re-Ranking>> page.
 
 === Learning To Rank Models
 
-In information retrieval systems, https://en.wikipedia.org/wiki/Learning_to_rank[Learning to Rank] is used to re-rank the top N retrieved documents using trained machine learning models. The hope is that such sophisticated models can make more nuanced ranking decisions than standard ranking functions like https://en.wikipedia.org/wiki/Tf%E2%80%93idf[TF-IDF] or https://en.wikipedia.org/wiki/Okapi_BM25[BM25].
+In information retrieval systems, https://en.wikipedia.org/wiki/Learning_to_rank[Learning to Rank] is used to re-rank the top N retrieved documents using trained machine learning models.
+The hope is that such sophisticated models can make more nuanced ranking decisions than standard ranking functions like https://en.wikipedia.org/wiki/Tf%E2%80%93idf[TF-IDF] or https://en.wikipedia.org/wiki/Okapi_BM25[BM25].
 
 ==== Ranking Model
 
-A ranking model computes the scores used to rerank documents. Irrespective of any particular algorithm or implementation, a ranking model's computation can use three types of inputs:
+A ranking model computes the scores used to rerank documents.
+Irrespective of any particular algorithm or implementation, a ranking model's computation can use three types of inputs:
 
 * parameters that represent the scoring algorithm
 * features that represent the document being scored
@@ -47,17 +51,21 @@ Interleaving is an approach to Online Search Quality evaluation that allows to c
 
 ==== Feature
 
-A feature is a value, a number, that represents some quantity or quality of the document being scored or of the query for which documents are being scored. For example documents often have a 'recency' quality and 'number of past purchases' might be a quantity that is passed to Solr as part of the search query.
+A feature is a value, a number, that represents some quantity or quality of the document being scored or of the query for which documents are being scored.
+For example documents often have a 'recency' quality and 'number of past purchases' might be a quantity that is passed to Solr as part of the search query.
 
 ==== Normalizer
 
-Some ranking models expect features on a particular scale. A normalizer can be used to translate arbitrary feature values into normalized values e.g., on a 0..1 or 0..100 scale.
+Some ranking models expect features on a particular scale.
+A normalizer can be used to translate arbitrary feature values into normalized values e.g., on a 0..1 or 0..100 scale.
 
 === Training Models
 
 ==== Feature Engineering
 
-The LTR contrib module includes several feature classes as well as support for custom features. Each feature class's javadocs contain an example to illustrate use of that class. The process of https://en.wikipedia.org/wiki/Feature_engineering[feature engineering] itself is then entirely up to your domain expertise and creativity.
+The LTR contrib module includes several feature classes as well as support for custom features.
+Each feature class's javadocs contain an example to illustrate use of that class.
+The process of https://en.wikipedia.org/wiki/Feature_engineering[feature engineering] itself is then entirely up to your domain expertise and creativity.
 
 [cols=",,,",options="header",]
 |===
@@ -87,7 +95,10 @@ The ltr contrib module includes a <<document-transformers.adoc#,[features>> tran
 
 ==== Feature Selection and Model Training
 
-Feature selection and model training take place offline and outside Solr. The ltr contrib module supports two generalized forms of models as well as custom models. Each model class's javadocs contain an example to illustrate configuration of that class. In the form of JSON files your trained model or models (e.g., different models for different customer geographies) can then be directly uploaded into Solr using provided REST APIs.
+Feature selection and model training take place offline and outside Solr.
+The ltr contrib module supports two generalized forms of models as well as custom models.
+Each model class's javadocs contain an example to illustrate configuration of that class.
+In the form of JSON files your trained model or models (e.g., different models for different customer geographies) can then be directly uploaded into Solr using provided REST APIs.
 
 [cols=",,",options="header",]
 |===
@@ -461,7 +472,8 @@ Read more about model evolution in the <<LTR Lifecycle>> section of this page.
 
 === Training Example
 
-Example training data and a demo `train_and_upload_demo_model.py` script can be found in the `solr/contrib/ltr/example` folder in the https://gitbox.apache.org/repos/asf?p=lucene-solr.git;a=tree;f=solr/contrib/ltr/example[Apache lucene-solr Git repository] (mirrored on https://github.com/apache/lucene-solr/tree/releases/lucene-solr/{solr-docs-version}.0/solr/contrib/ltr/example[github.com]). This example folder is not shipped in the Solr binary release.
+Example training data and a demo `train_and_upload_demo_model.py` script can be found in the `solr/contrib/ltr/example` folder in the https://gitbox.apache.org/repos/asf?p=lucene-solr.git;a=tree;f=solr/contrib/ltr/example[Apache lucene-solr Git repository] (mirrored on https://github.com/apache/lucene-solr/tree/releases/lucene-solr/{solr-docs-version}.0/solr/contrib/ltr/example[github.com]).
+This example folder is not shipped in the Solr binary release.
 
 == Installation of LTR
 
@@ -473,7 +485,8 @@ Learning-To-Rank is a contrib module and therefore its plugins must be configure
 
 === Minimum Requirements
 
-* Include the required contrib JARs. Note that by default paths are relative to the Solr core so they may need adjustments to your configuration, or an explicit specification of the `$solr.install.dir`.
+* Include the required contrib JARs.
+Note that by default paths are relative to the Solr core so they may need adjustments to your configuration, or an explicit specification of the `$solr.install.dir`.
 +
 [source,xml]
 ----
@@ -519,7 +532,8 @@ Learning-To-Rank is a contrib module and therefore its plugins must be configure
 
 ==== LTRThreadModule
 
-A thread module can be configured for the query parser and/or the transformer to parallelize the creation of feature weights. For details, please refer to the {solr-javadocs}/contrib/ltr/org/apache/solr/ltr/LTRThreadModule.html[LTRThreadModule] javadocs.
+A thread module can be configured for the query parser and/or the transformer to parallelize the creation of feature weights.
+For details, please refer to the {solr-javadocs}/contrib/ltr/org/apache/solr/ltr/LTRThreadModule.html[LTRThreadModule] javadocs.
 
 ==== Feature Vector Customization
 
@@ -543,7 +557,8 @@ How does Solr Learning-To-Rank work under the hood?::
 Please refer to the `ltr` {solr-javadocs}/contrib/ltr/org/apache/solr/ltr/package-summary.html[javadocs] for an implementation overview.
 
 How could I write additional models and/or features?::
-Contributions for further models, features, normalizers and interleaving algorithms are welcome. Related links:
+Contributions for further models, features, normalizers and interleaving algorithms are welcome.
+Related links:
 +
 * {solr-javadocs}/contrib/ltr/org/apache/solr/ltr/model/LTRScoringModel.html[LTRScoringModel javadocs]
 * {solr-javadocs}/contrib/ltr/org/apache/solr/ltr/feature/Feature.html[Feature javadocs]
@@ -612,7 +627,8 @@ curl -XDELETE 'http://localhost:8983/solr/techproducts/schema/feature-store/curr
 
 ==== Using Large Models
 
-With SolrCloud, large models may fail to upload due to the limitation of ZooKeeper's buffer. In this case, `DefaultWrapperModel` may help you to separate the model definition from uploaded file.
+With SolrCloud, large models may fail to upload due to the limitation of ZooKeeper's buffer.
+In this case, `DefaultWrapperModel` may help you to separate the model definition from uploaded file.
 
 Assuming that you consider to use a large model placed at `/path/to/models/myModel.json` through `DefaultWrapperModel`.
 
diff --git a/solr/solr-ref-guide/src/loading.adoc b/solr/solr-ref-guide/src/loading.adoc
index 86bfa26..6f3454a 100644
--- a/solr/solr-ref-guide/src/loading.adoc
+++ b/solr/solr-ref-guide/src/loading.adoc
@@ -22,7 +22,8 @@ These functions are designed to cut down the time spent on data preparation and
 == Reading Files
 
 The `cat` function can be used to read files under the *userfiles* directory in
-`$SOLR_HOME`. The `cat` function takes two parameters.
+`$SOLR_HOME`.
+The `cat` function takes two parameters.
 
 The first parameter is a comma-delimited list of paths.
 If the path list contains directories, `cat` will crawl all the files in the directory and sub-directories.
@@ -81,12 +82,10 @@ When this expression is sent to the `/stream` handler it responds with:
 
 == Parsing CSV and TSV Files
 
-The `parseCSV` and `parseTSV` functions wrap the `cat` function and parse CSV
-(comma separated values) and TSV (tab separated values). Both of these functions
-expect a CSV or TSV header record at the beginning of each file.
+The `parseCSV` and `parseTSV` functions wrap the `cat` function and parse CSV (comma separated values) and TSV (tab separated values).
+Both of these functions expect a CSV or TSV header record at the beginning of each file.
 
-Both `parseCSV` and `parseTSV` emit tuples with the header values mapped to their
-corresponding values in each line.
+Both `parseCSV` and `parseTSV` emit tuples with the header values mapped to their corresponding values in each line.
 
 
 [source,text]
@@ -144,28 +143,24 @@ When this expression is sent to the `/stream` handler it responds with:
 
 == Visualizing
 
-Once that data has been parsed into tuples with `parseCSV` or `parseTSV` it can be
-visualized using Zeppelin-Solr.
+Once that data has been parsed into tuples with `parseCSV` or `parseTSV` it can be visualized using Zeppelin-Solr.
 
 The example below shows the output of the `parseCSV` function visualized as a table.
 
 image::images/math-expressions/csvtable.png[]
 
 Columns from the table can then be visualized using one of Apache Zeppelin's
-visualizations. The example below shows a scatter plot of the `petal_length` and `petal_width`
-grouped by `species`.
+visualizations.
+The example below shows a scatter plot of the `petal_length` and `petal_width` grouped by `species`.
 
 image::images/math-expressions/csv.png[]
 
 == Selecting Fields and Field Types
 
-The `select` function can be used to select specific fields from
-the CSV file and map them to new field names for indexing.
+The `select` function can be used to select specific fields from the CSV file and map them to new field names for indexing.
 
-Fields in the CSV file can be mapped to field names with
-dynamic field suffixes. This approach allows for fine grain
-control over schema field types without having to make any
-changes to schema files.
+Fields in the CSV file can be mapped to field names with dynamic field suffixes.
+This approach allows for fine grain control over schema field types without having to make any changes to schema files.
 
 Below is an example of selecting fields and mapping them
 to specific field types.
@@ -192,16 +187,15 @@ The section below shows some useful transformations that can be applied while an
 === Unique IDs
 
 Both `parseCSV` and `parseTSV` emit an *id* field if one is not present in the data already.
-The *id* field is a concatenation of the file path and the line number. This is a
-convenient way to ensure that records have consistent ids if an id
+The *id* field is a concatenation of the file path and the line number.
+This is a convenient way to ensure that records have consistent ids if an id
 is not present in the file.
 
 You can also map any fields in the file to the id field using the `select` function.
 The `concat` function can be used to concatenate two or more fields in the file
-to create an id. Or the `uuid` function can be used to create a random unique id. If
-the `uuid` function is used the data cannot be reloaded without first deleting
-the data, as the `uuid` function does not produce the same id for each document
-on subsequent loads.
+to create an id.
+Or the `uuid` function can be used to create a random unique id.
+If the `uuid` function is used the data cannot be reloaded without first deleting the data, as the `uuid` function does not produce the same id for each document on subsequent loads.
 
 Below is an example using the `concat` function to create a new id.
 
@@ -213,9 +207,8 @@ image::images/math-expressions/selectuuid.png[]
 
 === Record Numbers
 
-The `recNum` function can be used inside of a `select` function to add a record number
-to each tuple. The record number is useful for tracking location in the result set
-and can be used for filtering strategies such as skipping, paging and striding described in
+The `recNum` function can be used inside of a `select` function to add a record number to each tuple.
+The record number is useful for tracking location in the result set and can be used for filtering strategies such as skipping, paging and striding described in
 the <<Filtering Results>> section below.
 
 The example below shows the syntax of the `recNum` function:
@@ -260,8 +253,8 @@ When this expression is sent to the `/stream` handler it responds with:
 Then we can use the `dateTime` function to format the datetime and
 map it to a Solr date field.
 
-The `dateTime` function takes three parameters. The field in the data
-with the date string, a template to parse the date using a Java https://docs.oracle.com/javase/9/docs/api/java/text/SimpleDateFormat.html[`SimpleDateFormat` template],
+The `dateTime` function takes three parameters.
+The field in the data with the date string, a template to parse the date using a Java https://docs.oracle.com/javase/9/docs/api/java/text/SimpleDateFormat.html[`SimpleDateFormat` template],
 and an optional time zone.
 
 If the time zone is not present the time zone defaults to GMT time unless
@@ -299,8 +292,7 @@ When this expression is sent to the `/stream` handler it responds with:
 
 === String Manipulation
 
-The `upper`, `lower`, `split`, `valueAt`, `trim`, and `concat` functions can be used to manipulate
-strings inside of the `select` function.
+The `upper`, `lower`, `split`, `valueAt`, `trim`, and `concat` functions can be used to manipulate strings inside of the `select` function.
 
 The example below shows the `upper` function used to upper case the *species*
 field.
@@ -308,8 +300,8 @@ field.
 image::images/math-expressions/selectupper.png[]
 
 The example below shows the `split` function which splits a field on
-a delimiter. This can be used to create multi-value fields from fields
-with an internal delimiter.
+a delimiter.
+This can be used to create multi-value fields from fields with an internal delimiter.
 
 The example below demonstrates this with a direct call to
 the `/stream` handler:
@@ -360,8 +352,7 @@ When this expression is sent to the `/stream` handler it responds with:
       }]}}
 ----
 
-The `valueAt` function can be used to select a specific index from
-a split array.
+The `valueAt` function can be used to select a specific index from a split array.
 
 image::images/math-expressions/valueat.png[]
 
@@ -401,22 +392,21 @@ image::images/math-expressions/paging.png[]
 ==== Striding
 
 The `eq` and nested `mod` function can be used to stride through the data at specific
-record number intervals. This allows for a sample to be taken at different intervals in the data
-in a systematic way.
+record number interval
+This allows for a sample to be taken at different intervals in the data in a systematic way.
 
 image::images/math-expressions/striding.png[]
 
 ==== Regex Matching
 
-The `matches` function can be used to test if a field in the record matches a specific
-regular expression. This provides a powerful *grep* like capability over the record set.
+The `matches` function can be used to test if a field in the record matches a specific regular expression.
+This provides a powerful *grep* like capability over the record set.
 
 image::images/math-expressions/matches.png[]
 
 === Handling Nulls
 
-In most cases nulls do not need to be handled directly unless there is specific logic needed
-to handle nulls during the load.
+In most cases nulls do not need to be handled directly unless there is specific logic needed to handle nulls during the load.
 
 The `select` function does not output fields that contain a null value.
 This means as nulls are encountered in the data the fields are not included in the tuples.
@@ -465,19 +455,15 @@ function can be used to expand the list of tokens to a stream of tuples.
 There are a number of interesting use cases for the `analyze` function:
 
 * Previewing the output of different analyzers before indexing.
-* Annotating documents with NLP generated tokens (entity extraction, noun phrases etc...)
-before the documents reach the indexing pipeline.
-This removes heavy NLP processing from the servers that may also be handling queries. It also allows
-more compute resources to be applied to the NLP indexing then is available on the search cluster.
-* Using the `cartesianProduct` function the analyzed tokens can be indexed as individual documents which allows
-analyzed tokens to be searched and analyzed with Solr's aggregation and graph expressions.
-* Also using `cartesianProduct` the analyzed tokens can be aggregated, analyzed and visualized using
-streaming expressions directly before indexing occurs.
+* Annotating documents with NLP generated tokens (entity extraction, noun phrases etc...) before the documents reach the indexing pipeline.
+This removes heavy NLP processing from the servers that may also be handling queries.
+It also allows more compute resources to be applied to the NLP indexing then is available on the search cluster.
+* Using the `cartesianProduct` function the analyzed tokens can be indexed as individual documents which allows analyzed tokens to be searched and analyzed with Solr's aggregation and graph expressions.
+* Also using `cartesianProduct` the analyzed tokens can be aggregated, analyzed and visualized using streaming expressions directly before indexing occurs.
 
 
-Below is an example of the `analyze` function being applied to the *Resolution.Description*
-field in the tuples. The *\_text_* fields analyzer is used to analyze the text and the
-analyzed tokens are added to the documents in the *token_ss* field.
+Below is an example of the `analyze` function being applied to the *Resolution.Description* field in the tuples.
+The *\_text_* fields analyzer is used to analyze the text and the analyzed tokens are added to the documents in the *token_ss* field.
 
 [source,text]
 ----
@@ -533,9 +519,9 @@ When this expression is sent to the `/stream` handler it responds with:
 }
 ----
 
-The example below shows the `cartesianProduct` function expanding the analyzed terms in the `term_s` field into
-their own documents. Notice that the other fields from the document are maintained with each term. This allows each term
-to be indexed in a separate document so the relationships between terms and the other fields can be explored through
+The example below shows the `cartesianProduct` function expanding the analyzed terms in the `term_s` field into their own documents.
+Notice that the other fields from the document are maintained with each term.
+This allows each term to be indexed in a separate document so the relationships between terms and the other fields can be explored through
 graph expressions or aggregations.
 
 
diff --git a/solr/solr-ref-guide/src/logs.adoc b/solr/solr-ref-guide/src/logs.adoc
index 807cc05..b09d0b1 100644
--- a/solr/solr-ref-guide/src/logs.adoc
+++ b/solr/solr-ref-guide/src/logs.adoc
@@ -24,8 +24,7 @@ See the <<math-start.adoc#,Getting Started>> chapter to learn how to get started
 
 == Loading
 
-The out-of-the-box Solr log format can be loaded into a Solr index using the `bin/postlogs` command line tool
-located in the `bin/` directory of the Solr distribution.
+The out-of-the-box Solr log format can be loaded into a Solr index using the `bin/postlogs` command line tool located in the `bin/` directory of the Solr distribution.
 
 NOTE: If working from the source distribution the
 distribution must first be built before `postlogs` can be run.
@@ -51,32 +50,25 @@ The example above will index all the log files under `/var/logs/solrlogs` to the
 
 Log exploration is often the first step in log analytics and visualization.
 
-When working with unfamiliar installations exploration can be used to understand which collections are
-covered in the logs, what shards and cores are in those collections and the types of operations being
-performed on those collections.
+When working with unfamiliar installations exploration can be used to understand which collections are covered in the logs, what shards and cores are in those collections and the types of operations being performed on those collections.
 
-Even with familiar Solr installations exploration is still extremely
-important while troubleshooting because it will often turn up surprises such as unknown errors or
-unexpected admin or indexing operations.
+Even with familiar Solr installations exploration is still extremely important while troubleshooting because it will often turn up surprises such as unknown errors or unexpected admin or indexing operations.
 
 === Sampling
 
-The first step in exploration is to take a random sample from the `logs` collection
-with the `random` function.
+The first step in exploration is to take a random sample from the `logs` collection with the `random` function.
 
-In the example below the `random` function is run with one
-parameter which is the name of the collection to sample.
+In the example below the `random` function is run with one parameter which is the name of the collection to sample.
 
 image::images/math-expressions/logs-sample.png[]
 
-The sample contains 500 random records with the their full field list. By looking
-at this sample we can quickly learn about the *fields* available in the `logs` collection.
+The sample contains 500 random records with the their full field list.
+By looking at this sample we can quickly learn about the *fields* available in the `logs` collection.
 
 === Time Period
 
 Each log record contains a time stamp in the `date_dt` field.
-Its often useful to understand what time period the logs cover and how many log records have been
-indexed.
+Its often useful to understand what time period the logs cover and how many log records have been indexed.
 
 The `stats` function can be run to display this information.
 
@@ -85,22 +77,18 @@ image::images/math-expressions/logs-dates.png[]
 
 === Record Types
 
-One of the key fields in the index is the `type_s` field which is the type of log
-record.
+One of the key fields in the index is the `type_s` field which is the type of log record.
 
-The `facet` expression can be used to visualize the different types of log records and how many
-records of each type are in the index.
+The `facet` expression can be used to visualize the different types of log records and how many records of each type are in the index.
 
 image::images/math-expressions/logs-type.png[]
 
 
 === Collections
 
-Another important field is the `collection_s` field which is the collection that the
-log record was generated from.
+Another important field is the `collection_s` field which is the collection that the log record was generated from.
 
-The `facet` expression can be used to visualize the different collections and how many log records
-they generate.
+The `facet` expression can be used to visualize the different collections and how many log records they generate.
 
 image::images/math-expressions/logs-collection.png[]
 
@@ -114,11 +102,9 @@ image::images/math-expressions/logs-type-collection.png[]
 
 === Time Series
 
-The `timeseries` function can be used to visualize a time series for a specific time range
-of the logs.
+The `timeseries` function can be used to visualize a time series for a specific time range of the logs.
 
-In the example below a time series is used to visualize the log record counts
-at 15 second intervals.
+In the example below a time series is used to visualize the log record counts at 15 second intervals.
 
 image::images/math-expressions/logs-time-series.png[]
 
@@ -127,19 +113,16 @@ Then a burst of log activity occurs from minute 27 to minute 52.
 
 This is then followed by a large spike of log activity.
 
-The example below breaks this down further by adding a query on the `type_s` field to only
-visualize *query* activity in the log.
+The example below breaks this down further by adding a query on the `type_s` field to only visualize *query* activity in the log.
 
 
 image::images/math-expressions/logs-time-series2.png[]
 
-Notice the query activity accounts for more then half of the burst of log records between
-21:27 and 21:52. But the query activity does not account for the large spike in
-log activity that follows.
+Notice the query activity accounts for more then half of the burst of log records between 21:27 and 21:52.
+But the query activity does not account for the large spike in log activity that follows.
 
-We can account for that spike by changing the search to include only *update*, *commit*,
-and *deleteByQuery* records in the logs. We can also narrow by collection
-so we know where these activities are taking place.
+We can account for that spike by changing the search to include only *update*, *commit*, and *deleteByQuery* records in the logs.
+We can also narrow by collection so we know where these activities are taking place.
 
 
 image::images/math-expressions/logs-time-series3.png[]
@@ -150,10 +133,9 @@ better understanding of what's contained in the logs.
 
 == Query Counting
 
-Distributed searches produce more than one log record for each query. There will be one *top level* log
-record for
-the top level distributed query and a *shard level* log record on one replica from each shard. There may also
-be a set of *ids* queries to retrieve fields by id from the shards to complete the page of results.
+Distributed searches produce more than one log record for each query.
+There will be one *top level* log record for the top level distributed query and a *shard level* log record on one replica from each shard.
+There may also be a set of *ids* queries to retrieve fields by id from the shards to complete the page of results.
 
 There are fields in the log index that can be used to differentiate between the three types of query records.
 
@@ -170,16 +152,14 @@ image::images/math-expressions/query-top-level.png[]
 
 === Shard Level Queries
 
-To find all the shard level queries that are not IDs queries, adjust the query to limit results to logs with `distrib_s:false AND ids_s:false`
-as follows:
+To find all the shard level queries that are not IDs queries, adjust the query to limit results to logs with `distrib_s:false AND ids_s:false` as follows:
 
 image::images/math-expressions/query-shard-level.png[]
 
 
 === ID Queries
 
-To find all the *ids* queries, adjust the query to limit results to logs with `distrib_s:false AND ids_s:true`
-as follows:
+To find all the *ids* queries, adjust the query to limit results to logs with `distrib_s:false AND ids_s:true` as follows:
 
 image::images/math-expressions/query-ids.png[]
 
@@ -197,8 +177,7 @@ There are number of powerful visualizations and statistical approaches for analy
 
 Scatter plots can be used to visualize random samples of the `qtime_i`
 field.
-The example below demonstrates a scatter plot of 500 random samples
-from the `ptest1` collection of log records.
+The example below demonstrates a scatter plot of 500 random samples from the `ptest1` collection of log records.
 
 In this example, `qtime_i` is plotted on the y-axis and the x-axis is simply a sequence to spread the query times out across the plot.
 
@@ -211,8 +190,7 @@ From this scatter plot we can tell a number of important things about the query
 
 * The sample query times range from a low of 122 to a high of 643.
 * The mean appears to be just above 400 ms.
-* The query times tend to cluster closer to the mean and become less frequent as they move away
-from the mean.
+* The query times tend to cluster closer to the mean and become less frequent as they move away from the mean.
 
 
 === Highest QTime Scatter Plot
@@ -234,8 +212,7 @@ From this plot we can see that the 500 highest query times start at 510ms and sl
 
 === QTime Distribution
 
-In this example a visualization is created which shows the
-distribution of query times rounded to the nearest second.
+In this example a visualization is created which shows the distribution of query times rounded to the nearest second.
 
 The example below starts by taking a random sample of 10000 log records with a `type_s`* of `query`.
 The results of the `random` function are assigned to the variable `a`.
@@ -250,8 +227,7 @@ The result is set to variable `c`.
 The `round` function then rounds all elements of the query times vector to the nearest second.
 This means all query times less than 500ms will round to 0.
 
-The `freqTable` function is then applied to the vector of query times rounded to
-the nearest second.
+The `freqTable` function is then applied to the vector of query times rounded to the nearest second.
 
 The resulting frequency table is shown in the visualization below.
 The x-axis is the number of seconds.
@@ -274,11 +250,9 @@ Then a random sample of 10000 log records is drawn and set to variable `a`.
 The `col` function is then used to extract the `qtime_i` field from the sample results and this vector is set to variable `b`.
 
 The `percentile` function is then used to calculate the value at each percentile for the vector of query times.
-The array of percentiles set to variable `p` tells the `percentile` function
-which percentiles to calculate.
+The array of percentiles set to variable `p` tells the `percentile` function which percentiles to calculate.
 
-Then the `zplot` function is used to plot the *percentiles* on the x-axis and
-the *query time* at each percentile on the y-axis.
+Then the `zplot` function is used to plot the *percentiles* on the x-axis and the *query time* at each percentile on the y-axis.
 
 image::images/math-expressions/query-qq.png[]
 
@@ -307,8 +281,7 @@ Therefore one slow node can be responsible for slow overall search time.
 The fields `core_s`, `replica_s` and `shard_s` are available in the log records.
 These fields allow average query time to be calculated by *core*, *replica* or *shard*.
 
-The `core_s` field is particularly useful as its the most granular element and
-the naming convention often includes the collection, shard and replica information.
+The `core_s` field is particularly useful as its the most granular element and the naming convention often includes the collection, shard and replica information.
 
 The example below uses the `facet` function to calculate `avg(qtime_i)` by core.
 
@@ -326,14 +299,13 @@ If query analysis shows that most queries are performing well but there are outl
 The `q_s` and `q_t` fields both hold the value of the *q* parameter from Solr requests.
 The `q_s` field is a string field and the `q_t` field has been tokenized.
 
-The `search` function can be used to return the top N slowest queries in the logs by sorting the results by `qtime_i desc`. the example
-below demonstrates this:
+The `search` function can be used to return the top N slowest queries in the logs by sorting the results by `qtime_i desc`.
+The example below demonstrates this:
 
 image::images/math-expressions/slow-queries.png[]
 
 Once the queries have been retrieved they can be inspected and tried individually to determine if the query is consistently slow.
-If the query is shown to be slow a plan to improve the query performance
-can be devised.
+If the query is shown to be slow a plan to improve the query performance can be devised.
 
 === Commits
 
@@ -378,7 +350,8 @@ in max `qtime_i`.
 
 == Errors
 
-The log index will contain any error records found in the logs. Error records will have a `type_s` field value of `error`.
+The log index will contain any error records found in the logs.
+Error records will have a `type_s` field value of `error`.
 
 The example below searches for error records:
 
diff --git a/solr/solr-ref-guide/src/luke-request-handler.adoc b/solr/solr-ref-guide/src/luke-request-handler.adoc
index bf799eb..f3a2aa3 100644
--- a/solr/solr-ref-guide/src/luke-request-handler.adoc
+++ b/solr/solr-ref-guide/src/luke-request-handler.adoc
@@ -17,12 +17,17 @@
 // under the License.
 
 The Luke Request Handler offers programmatic access to the information provided on the <<schema-browser-screen#schema-browser-screen,Schema Browser>> page of the Admin UI.
-It is modeled after the Luke, the Lucene Index Browser by Andrzej Bialecki.  It is an implicit handler, so you don't need to define it.
+It is modeled after the Luke, the Lucene Index Browser by Andrzej Bialecki.
+It is an implicit handler, so you don't need to define it.
 
 The Luke Request Handler accepts the following parameters:
 
 `show`::
-The data about the index to include in the response.  Options are `schema`, `index`, `doc`, `all`.  `index` describes the high level details about the index.  `schema` returns details about the `schema` plus the `index` data.  `doc` works in conjunction with `docId` or `id` parameters and returns details about a specific document plus the `index` data.
+The data about the index to include in the response.
+Options are `schema`, `index`, `doc`, `all`.
+* `index` describes the high level details about the index.
+* `schema` returns details about the `schema` plus the `index` data.
+* `doc` works in conjunction with `docId` or `id` parameters and returns details about a specific document plus the `index` data.
 
 `id`::
 Get a document using the uniqueKeyField specified in schema.xml.
@@ -31,13 +36,16 @@ Get a document using the uniqueKeyField specified in schema.xml.
 Get a document using a Lucene documentID.
 
 `fl`::
-Limit the returned values to a set of fields. This is useful if you want to increase the `numTerms` and don't want a massive response.
+Limit the returned values to a set of fields.
+This is useful if you want to increase the `numTerms` and don't want a massive response.
 
 `numTerms`::
-How many top terms for each field. The default is 10.
+How many top terms for each field.
+The default is `10`.
 
 `includeIndexFieldFlags`::
-Choose whether /luke should return the index-flags for each field. Fetching and returning the index-flags for each field in the index has non-zero cost, and can slow down requests to /luke.
+Choose whether /luke should return the index-flags for each field.
+Fetching and returning the index-flags for each field in the index has non-zero cost, and can slow down requests to /luke.
 
 
 == LukeRequestHandler Examples
diff --git a/solr/solr-ref-guide/src/machine-learning.adoc b/solr/solr-ref-guide/src/machine-learning.adoc
index 6ddb56c..c7952a6 100644
--- a/solr/solr-ref-guide/src/machine-learning.adoc
+++ b/solr/solr-ref-guide/src/machine-learning.adoc
@@ -33,8 +33,7 @@ There are six distance measure functions that return a function that performs th
 * `cosine`
 * `haversineMeters` (Geospatial distance measure)
 
-The distance measure functions can be used with all machine learning functions
-that support distance measures.
+The distance measure functions can be used with all machine learning functions that support distance measures.
 
 Below is an example for computing Euclidean distance for two numeric arrays:
 
@@ -94,53 +93,43 @@ When this expression is sent to the `/stream` handler it responds with:
 
 === Distance Matrices
 
-Distance matrices are powerful tools for visualizing the distance
-between two or more
-vectors.
+Distance matrices are powerful tools for visualizing the distance between two or more vectors.
 
-The `distance` function builds a distance matrix
-if a matrix is passed as the parameter. The distance matrix is computed for the *columns*
-of the matrix.
+The `distance` function builds a distance matrix if a matrix is passed as the parameter.
+The distance matrix is computed for the *columns* of the matrix.
 
 The example below demonstrates the power of distance matrices combined with 2 dimensional faceting.
 
-In this example the `facet2D` function is used to generate a two dimensional facet aggregation
-over the fields `complaint_type_s` and `zip_s` from the `nyc311` complaints database.
+In this example the `facet2D` function is used to generate a two dimensional facet aggregation over the fields `complaint_type_s` and `zip_s` from the `nyc311` complaints database.
 The *top 20* complaint types and the *top 25* zip codes for each complaint type are aggregated.
 The result is a stream of tuples each containing the fields `complaint_type_s`, `zip_s` and the count for the pair.
 
-The `pivot` function is then used to pivot the fields into a *matrix* with the `zip_s`
-field as the *rows* and the `complaint_type_s` field as the *columns*. The `count(*)` field populates
-the values in the cells of the matrix.
+The `pivot` function is then used to pivot the fields into a *matrix* with the `zip_s` field as the *rows* and the `complaint_type_s` field as the *columns*.
+The `count(*)` field populates the values in the cells of the matrix.
 
-The `distance` function is then used to compute the distance matrix for the columns
-of the matrix using `cosine` distance. This produces a distance matrix
-that shows distance between complaint types based on the zip codes they appear in.
+The `distance` function is then used to compute the distance matrix for the columns of the matrix using `cosine` distance.
+This produces a distance matrix that shows distance between complaint types based on the zip codes they appear in.
 
-Finally the `zplot` function is used to plot the distance matrix as a heat map. Notice that the
-heat map has been configured so that the intensity of color increases as the distance between vectors
-decreases.
+Finally the `zplot` function is used to plot the distance matrix as a heat map.
+Notice that the heat map has been configured so that the intensity of color increases as the distance between vectors decreases.
 
 
 image::images/math-expressions/distance.png[]
 
-The heat map is interactive, so mousing over one of the cells pops up the values
-for the cell.
+The heat map is interactive, so mousing over one of the cells pops up the values for the cell.
 
 image::images/math-expressions/distanceview.png[]
 
-Notice that HEAT/HOT WATER and UNSANITARY CONDITION complaints have a cosine distance of .1 (rounded to the nearest
-tenth).
+Notice that HEAT/HOT WATER and UNSANITARY CONDITION complaints have a cosine distance of .1 (rounded to the nearest tenth).
 
 
 == K-Nearest Neighbor (KNN)
 
 The `knn` function searches the rows of a matrix with a search vector and
-returns a matrix of the k-nearest neighbors. This allows for secondary vector
-searches over result sets.
+returns a matrix of the k-nearest neighbors.
+This allows for secondary vector searches over result sets.
 
-The `knn` function supports changing of the distance measure by providing one of the following
-distance measure functions:
+The `knn` function supports changing of the distance measure by providing one of the following distance measure functions:
 
 * `euclidean` (Default)
 * `manhattan`
@@ -150,18 +139,15 @@ distance measure functions:
 * `haversineMeters` (Geospatial distance measure)
 
 The example below shows how to perform a secondary search over an aggregation
-result set. The goal of the example is to find zip codes in the nyc311 complaint
-database that have similar complaint types to the zip code 10280.
+result set.
+The goal of the example is to find zip codes in the nyc311 complaint database that have similar complaint types to the zip code 10280.
 
-The first step in the example is to use the `facet2D` function to perform a two
-dimensional aggregation over the `zip_s` and `complaint_type_s` fields. In the example
-the top 119 zip codes and top 5 complaint types for each zip code are calculated
-for the borough of Manhattan. The result is a list of tuples each containing
-the `zip_s`, `complaint_type_s` and the `count(*)` for the combination.
+The first step in the example is to use the `facet2D` function to perform a two-dimensional aggregation over the `zip_s` and `complaint_type_s` fields.
+In the example the top 119 zip codes and top 5 complaint types for each zip code are calculated for the borough of Manhattan.
+The result is a list of tuples each containing the `zip_s`, `complaint_type_s` and the `count(*)` for the combination.
 
 The list of tuples is then *pivoted* into a matrix with the `pivot` function.
-The `pivot` function in this example returns a matrix with rows of zip codes
-and columns of complaint types.
+The `pivot` function in this example returns a matrix with rows of zip codes and columns of complaint types.
 The `count(*)` field from the tuples populates the cells of the matrix.
 This matrix will be used as the secondary search matrix.
 
@@ -174,126 +160,102 @@ The `rowAt` function is then used to return the vector at that *index* from the
 This vector is the *search vector*.
 
 Now that we have a matrix and search vector we can use the `knn` function to perform the search.
-In the example the `knn` function searches the matrix with the search vector with a K of 5, using
-*cosine* distance. Cosine distance is useful for comparing sparse vectors which is the case in this
-example. The `knn` function returns a matrix with the top 5 nearest neighbors to the search vector.
+In the example the `knn` function searches the matrix with the search vector with a K of 5, using *cosine* distance.
+Cosine distance is useful for comparing sparse vectors which is the case in this
+example.
+The `knn` function returns a matrix with the top 5 nearest neighbors to the search vector.
 
 The `knn` function populates the row and column labels of the return matrix and
 also adds a vector of *distances* for each row as an attribute to the matrix.
 
-In the example the `zplot` function extracts the row labels and
-the distance vector with the `getRowLabels` and `getAttribute` functions.
-The `topFeatures` function is used to extract
-the top 5 column labels for each zip code vector, based on the counts for each
-column. Then `zplot` outputs the data in a format that can be visualized in
+In the example the `zplot` function extracts the row labels and the distance vector with the `getRowLabels` and `getAttribute` functions.
+The `topFeatures` function is used to extract the top 5 column labels for each zip code vector, based on the counts for each column.
+Then `zplot` outputs the data in a format that can be visualized in
 a table with Zeppelin-Solr.
 
 image::images/math-expressions/knn.png[]
 
-The table above shows each zip code returned by the `knn` function along
-with the list of complaints and distances. These are the zip codes that are most similar
-to the 10280 zip code based on their top 5 complaint types.
+The table above shows each zip code returned by the `knn` function along with the list of complaints and distances.
+These are the zip codes that are most similar to the 10280 zip code based on their top 5 complaint types.
 
 == K-Nearest Neighbor Regression
 
 K-nearest neighbor regression is a non-linear, bivariate and multivariate regression method.
-KNN regression is a lazy learning
-technique which means it does not fit a model to the training set in advance. Instead the
-entire training set of observations and outcomes are held in memory and predictions are made
-by averaging the outcomes of the k-nearest neighbors.
+KNN regression is a lazy learning technique which means it does not fit a model to the training set in advance.
+Instead the entire training set of observations and outcomes are held in memory and predictions are made by averaging the outcomes of the k-nearest neighbors.
 
 The `knnRegress` function is used to perform nearest neighbor regression.
 
-
 === 2D Non-Linear Regression
 
 The example below shows the *regression plot* for KNN regression applied to a 2D scatter plot.
 
-In this example the `random` function is used to draw 500 random samples from the `logs` collection
-containing two fields `filesize_d` and `eresponse_d`. The sample is then vectorized with the
-`filesize_d` field stored in a vector assigned to variable *x* and the `eresponse_d` vector stored in
-variable `y`. The `knnRegress` function is then applied with `20` as the nearest neighbor parameter,
-which returns a KNN function which can be used to predict values.
+In this example the `random` function is used to draw 500 random samples from the `logs` collection containing two fields `filesize_d` and `eresponse_d`.
+The sample is then vectorized with the `filesize_d` field stored in a vector assigned to variable *x* and the `eresponse_d` vector stored in
+variable `y`.
+The `knnRegress` function is then applied with `20` as the nearest neighbor parameter, which returns a KNN function which can be used to predict values.
 The `predict` function is then called on the KNN function to predict values for the original `x` vector.
 Finally `zplot` is used to plot the original `x` and `y` vectors along with the predictions.
 
 image::images/math-expressions/knnRegress.png[]
 
-Notice that the regression plot shows a non-linear relations ship between the `filesize_d`
-field and the `eresponse_d` field. Also note that KNN regression
-plots a non-linear curve through the scatter plot. The larger the size
-of K (nearest neighbors), the smoother the line.
+Notice that the regression plot shows a non-linear relations ship between the `filesize_d` field and the `eresponse_d` field.
+Also note that KNN regression plots a non-linear curve through the scatter plot.
+The larger the size of K (nearest neighbors), the smoother the line.
 
 === Multivariate Non-Linear Regression
 
-The `knnRegress` function is also a powerful and flexible tool for
-multi-variate non-linear regression.
+The `knnRegress` function is also a powerful and flexible tool for multi-variate non-linear regression.
 
-In the example below a multi-variate regression is performed using
-a database designed for analyzing and predicting wine quality. The
-database contains nearly 1600 records with 9 predictors of wine quality:
-pH, alcohol, fixed_acidity, sulphates, density, free_sulfur_dioxide,
-volatile_acidity, citric_acid, residual_sugar. There is also a field
-called quality assigned to each wine ranging
-from 3 to 8.
+In the example below a multi-variate regression is performed using a database designed for analyzing and predicting wine quality.
+The database contains nearly 1600 records with 9 predictors of wine quality:
+pH, alcohol, fixed_acidity, sulphates, density, free_sulfur_dioxide, volatile_acidity, citric_acid, residual_sugar.
+There is also a field called quality assigned to each wine ranging from 3 to 8.
 
-KNN regression can be used to predict wine quality for vectors containing
-the predictor values.
+KNN regression can be used to predict wine quality for vectors containing the predictor values.
 
-In the example a search is performed on the `redwine` collection to
-return all the rows in the database of observations. Then the quality field and
-predictor fields are read into vectors and set to variables.
+In the example a search is performed on the `redwine` collection to return all the rows in the database of observations.
+Then the quality field and predictor fields are read into vectors and set to variables.
 
-The predictor variables are added as rows to a matrix which is
-transposed so each row in the matrix contains one observation with the 9
-predictor values.
+The predictor variables are added as rows to a matrix which is transposed so each row in the matrix contains one observation with the 9 predictor values.
 This is our observation matrix which is assigned to the variable `obs`.
 
 Then the `knnRegress` function regresses the observations with quality outcomes.
-The value for K is set to 5 in the example, so the average quality of the 5
-nearest neighbors will be used to calculate the quality.
+The value for K is set to 5 in the example, so the average quality of the 5 nearest neighbors will be used to calculate the quality.
 
-The `predict` function is then used to generate a vector of predictions
-for the entire observation set. These predictions will be used to determine
-how well the KNN regression performed over the observation data.
+The `predict` function is then used to generate a vector of predictions for the entire observation set.
+These predictions will be used to determine how well the KNN regression performed over the observation data.
 
-The error, or *residuals*, for the regression are then calculated by
-subtracting the *predicted* quality from the *observed* quality.
-The `ebeSubtract` function is used to perform the element-by-element
-subtraction between the two vectors.
+The error, or *residuals*, for the regression are then calculated by subtracting the *predicted* quality from the *observed* quality.
+The `ebeSubtract` function is used to perform the element-by-element subtraction between the two vectors.
 
-Finally the `zplot` function formats the predictions and errors for
-for the visualization of the *residual plot*.
+Finally the `zplot` function formats the predictions and errors for the visualization of the *residual plot*.
 
 image::images/math-expressions/redwine1.png[]
 
-The residual plot plots the *predicted* values on the x-axis and the *error* for the
-prediction on the y-axis. The scatter plot shows how the errors
-are distributed across the full range of predictions.
+The residual plot plots the *predicted* values on the x-axis and the *error* for the prediction on the y-axis.
+The scatter plot shows how the errors are distributed across the full range of predictions.
 
-The residual plot can be interpreted to understand how the KNN regression performed on the
-training data.
+The residual plot can be interpreted to understand how the KNN regression performed on the training data.
 
-* The plot shows the prediction error appears to be fairly evenly distributed
-above and below zero. The density of the errors increases as it approaches zero. The
-bubble size reflects the density of errors at the specific point in the plot.
+* The plot shows the prediction error appears to be fairly evenly distributed above and below zero.
+The density of the errors increases as it approaches zero.
+The bubble size reflects the density of errors at the specific point in the plot.
 This provides an intuitive feel for the distribution of the model's error.
 
 * The plot also visualizes the variance of the error across the range of
-predictions. This provides an intuitive understanding of whether the KNN predictions
-will have similar error variance across the full range predictions.
+predictions.
+This provides an intuitive understanding of whether the KNN predictions will have similar error variance across the full range predictions.
 
-The residuals can also be visualized using a histogram to better understand
-the shape of the residuals distribution. The example below shows the same KNN
-regression as above with a plot of the distribution of the errors.
+The residuals can also be visualized using a histogram to better understand the shape of the residuals distribution.
+The example below shows the same KNN regression as above with a plot of the distribution of the errors.
 
-In the example the `zplot` function is used to plot the `empiricalDistribution`
-function of the residuals, with an 11 bin histogram.
+In the example the `zplot` function is used to plot the `empiricalDistribution` function of the residuals, with an 11 bin histogram.
 
 image::images/math-expressions/redwine2.png[]
 
-Notice that the errors follow a bell curve centered close to 0. From this plot
-we can see the probability of getting prediction errors between -1 and 1 is quite high.
+Notice that the errors follow a bell curve centered close to 0.
+From this plot we can see the probability of getting prediction errors between -1 and 1 is quite high.
 
 *Additional KNN Regression Parameters*
 
@@ -319,9 +281,8 @@ Sample syntax:
 r=knnRegress(obs, quality, 5, robust="true"),
 ----
 
-. The `scale` named parameter can be used to scale the columns of the observations and search vectors
-at prediction time. This can improve the performance of the KNN regression when the feature columns
-are at different scales causing the distance calculations to be place too much weight on the larger columns.
+. The `scale` named parameter can be used to scale the columns of the observations and search vectors at prediction time.
+This can improve the performance of the KNN regression when the feature columns are at different scales causing the distance calculations to be place too much weight on the larger columns.
 +
 Sample syntax:
 +
@@ -332,22 +293,20 @@ r=knnRegress(obs, quality, 5, scale="true"),
 
 == knnSearch
 
-The `knnSearch` function returns the k-nearest neighbors
-for a document based on text similarity.
+The `knnSearch` function returns the k-nearest neighbors for a document based on text similarity.
 Under the covers the `knnSearch` function uses Solr's <<other-parsers.adoc#more-like-this-query-parser,More Like This>> query parser plugin.
 This capability uses the search engine's query, term statistics, scoring, and ranking capability to perform a fast, nearest neighbor search for similar documents over large distributed indexes.
 
 The results of this search can be used directly or provide *candidates* for machine learning operations such as a secondary KNN vector search.
 
-The example below shows the `knnSearch` function on a movie reviews data set. The search returns the 50 documents most similar to a specific document ID (`83e9b5b0...`) based on the similarity of the `review_t` field.
+The example below shows the `knnSearch` function on a movie reviews data set.
+The search returns the 50 documents most similar to a specific document ID (`83e9b5b0...`) based on the similarity of the `review_t` field.
 The `mindf` and `maxdf` specify the minimum and maximum document frequency of the terms used to perform the search.
 These parameters can make the query faster by eliminating high frequency terms and also improves accuracy by removing noise terms from the search.
 
 image::images/math-expressions/knnSearch.png[]
 
-NOTE: In this example the `select`
-function is used to truncate the review in the output to 220 characters to make it easier
-to read in a table.
+NOTE: In this example the `select` function is used to truncate the review in the output to 220 characters to make it easier to read in a table.
 
 == DBSCAN
 
@@ -363,25 +322,25 @@ DBSCAN uses two parameters to filter result sets to clusters of specific density
 
 The `zplot` function has direct support for plotting 2D clusters by using the `clusters` named parameter.
 
-The example below uses DBSCAN clustering and cluster visualization to find
-the *hot spots* on a map for rat sightings in the NYC 311 complaints database.
+The example below uses DBSCAN clustering and cluster visualization to find the *hot spots* on a map for rat sightings in the NYC 311 complaints database.
 
-In this example the `random` function draws a sample of records from the `nyc311` collection where
-the complaint description matches "rat sighting" and latitude is populated in the record.
+In this example the `random` function draws a sample of records from the `nyc311` collection where the complaint description matches "rat sighting" and latitude is populated in the record.
 The latitude and longitude fields are then vectorized and added as rows to a matrix.
-The matrix is transposed so each row contains a single latitude, longitude
-point.
+The matrix is transposed so each row contains a single latitude, longitude point.
 The `dbscan` function is then used to cluster the latitude and longitude points.
 Notice that the `dbscan` function in the example has four parameters.
 
 * `obs` : The observation matrix of lat/lon points
 
-* `eps` : The distance between points to be considered a cluster. 100 meters in the example.
+* `eps` : The distance between points to be considered a cluster.
+100 meters in the example.
 
-* `min points`: The minimum points in a cluster for the cluster to be returned by the function. `5` in the example.
+* `min points`: The minimum points in a cluster for the cluster to be returned by the function.
+`5` in the example.
 
 * `distance measure`: An optional distance measure used to determine the
-distance between points. The default is Euclidean distance.
+distance between points.
+The default is Euclidean distance.
 The example uses `haversineMeters` which returns the distance in meters which is much more meaningful for geospatial use cases.
 
 Finally, the `zplot` function is used to visualize the clusters on a map with Zeppelin-Solr.
@@ -390,8 +349,8 @@ The map below has been zoomed to a specific area of Brooklyn with a high density
 image::images/math-expressions/dbscan1.png[]
 
 Notice in the visualization that only 1019 points were returned from the 5000 samples.
-This is the power of the DBSCAN algorithm to filter records that don't match the criteria
-of a cluster. The points that are plotted all belong to clearly defined clusters.
+This is the power of the DBSCAN algorithm to filter records that don't match the criteria of a cluster.
+The points that are plotted all belong to clearly defined clusters.
 
 The map visualization can be zoomed further to explore the locations of specific clusters.
 The example below shows a zoom into an area of dense clusters.
@@ -402,74 +361,62 @@ image::images/math-expressions/dbscan2.png[]
 == K-Means Clustering
 
 The `kmeans` functions performs k-means clustering of the rows of a matrix.
-Once the clustering has been completed there are a number of useful functions available
-for examining and visualizing the clusters and centroids.
+Once the clustering has been completed there are a number of useful functions available for examining and visualizing the clusters and centroids.
 
 
 === Clustered Scatter Plot
 
-In this example we'll again be clustering 2D lat/lon points of rat sightings. But unlike the DBSCAN example, k-means clustering
-does not on its own
-perform any noise reduction. So in order to reduce the noise a smaller random sample is selected from the data than was used
-for the DBSCAN example.
+In this example we'll again be clustering 2D lat/lon points of rat sightings.
+But unlike the DBSCAN example, k-means clustering does not on its own perform any noise reduction.
+So in order to reduce the noise a smaller random sample is selected from the data than was used for the DBSCAN example.
 
 We'll see that sampling itself is a powerful noise reduction tool which helps visualize the cluster density.
-This is because there is a higher probability that samples will be drawn from higher density clusters and a lower
-probability that samples will be drawn from lower density clusters.
+This is because there is a higher probability that samples will be drawn from higher density clusters and a lower probability that samples will be drawn from lower density clusters.
 
-In this example the `random` function draws a sample of 1500 records from the `nyc311` (complaints database) collection where
-the complaint description matches "rat sighting" and latitude is populated in the record. The latitude and longitude fields
-are then vectorized and added as rows to a matrix. The matrix is transposed so each row contains a single latitude, longitude
-point. The `kmeans` function is then used to cluster the latitude and longitude points into 21 clusters.
+In this example the `random` function draws a sample of 1500 records from the `nyc311` (complaints database) collection where the complaint description matches "rat sighting" and latitude is populated in the record.
+The latitude and longitude fields are then vectorized and added as rows to a matrix.
+The matrix is transposed so each row contains a single latitude, longitude point.
+The `kmeans` function is then used to cluster the latitude and longitude points into 21 clusters.
 Finally, the `zplot` function is used to visualize the clusters as a scatter plot.
 
 image::images/math-expressions/2DCluster1.png[]
 
-The scatter plot above shows each lat/lon point plotted on a Euclidean plain with longitude on the
-x-axis and
-latitude on the y-axis. The plot is dense enough so the outlines of the different boroughs are visible
-if you know the boroughs of New York City.
-
+The scatter plot above shows each lat/lon point plotted on a Euclidean plain with longitude on the x-axis and latitude on the y-axis.
+The plot is dense enough so the outlines of the different boroughs are visible if you know the boroughs of New York City.
 
-Each cluster is shown in a different color. This plot provides interesting
-insight into the densities of rat sightings throughout the five boroughs of New York City. For
-example it highlights a cluster of dense sightings in Brooklyn at cluster1
+Each cluster is shown in a different color.
+This plot provides interesting insight into the densities of rat sightings throughout the five boroughs of New York City.
+For example it highlights a cluster of dense sightings in Brooklyn at cluster1
 surrounded by less dense but still high activity clusters.
 
 === Plotting the Centroids
 
-The centroids of each cluster can then be plotted on a map to visualize the center of the
-clusters. In the example below the centroids are extracted from the clusters using the `getCentroids`
-function, which returns a matrix of the centroids.
-
-The centroids matrix contains 2D lat/lon points. The `colAt` function can then be used
-to extract the latitude and longitude columns by index from the matrix so they can be
-plotted with `zplot`. A map visualization is used below to display the centroids.
+The centroids of each cluster can then be plotted on a map to visualize the center of the clusters.
+In the example below the centroids are extracted from the clusters using the `getCentroids` function, which returns a matrix of the centroids.
 
+The centroids matrix contains 2D lat/lon points.
+The `colAt` function can then be used to extract the latitude and longitude columns by index from the matrix so they can be plotted with `zplot`.
+A map visualization is used below to display the centroids.
 
 image::images/math-expressions/centroidplot.png[]
 
 
-The map can then be zoomed to get a closer look at the centroids in the high density areas shown
-in the cluster scatter plot.
+The map can then be zoomed to get a closer look at the centroids in the high density areas shown in the cluster scatter plot.
 
 image::images/math-expressions/centroidzoom.png[]
 
 
 === Phrase Extraction
 
-K-means clustering produces centroids or *prototype* vectors which can be used to represent
-each cluster. In this example the key features of the centroids are extracted
-to represent the key phrases for clusters of TF-IDF term vectors.
+K-means clustering produces centroids or *prototype* vectors which can be used to represent each cluster.
+In this example the key features of the centroids are extracted to represent the key phrases for clusters of TF-IDF term vectors.
 
 NOTE: The example below works with TF-IDF _term vectors_.
-The section <<term-vectors.adoc#,Text Analysis and Term Vectors>> offers
-a full explanation of this features.
+The section <<term-vectors.adoc#,Text Analysis and Term Vectors>> offers a full explanation of this features.
 
 In the example the `search` function returns documents where the `review_t` field matches the phrase "star wars".
-The `select` function is run over the result set and applies the `analyze` function
-which uses the Lucene/Solr analyzer attached to the schema field `text_bigrams` to re-analyze the `review_t`
-field. This analyzer returns bigrams which are then annotated to documents in a field called `terms`.
+The `select` function is run over the result set and applies the `analyze` function which uses the Lucene/Solr analyzer attached to the schema field `text_bigrams` to re-analyze the `review_t` field.
+This analyzer returns bigrams which are then annotated to documents in a field called `terms`.
 
 The `termVectors` function then creates TD-IDF term vectors from the bigrams stored in the `terms` field.
 The `kmeans` function is then used to cluster the bigram term vectors into 5 clusters.
@@ -544,15 +491,12 @@ When this expression is sent to the `/stream` handler it responds with:
 
 == Multi K-Means Clustering
 
-K-means clustering will produce different outcomes depending on
-the initial placement of the centroids. K-means is fast enough
-that multiple trials can be performed so that the best outcome can be selected.
+K-means clustering will produce different outcomes depending on the initial placement of the centroids.
+K-means is fast enough that multiple trials can be performed so that the best outcome can be selected.
 
-The `multiKmeans` function runs the k-means clustering algorithm for a given number of trials and selects the
-best result based on which trial produces the lowest intra-cluster variance.
+The `multiKmeans` function runs the k-means clustering algorithm for a given number of trials and selects the best result based on which trial produces the lowest intra-cluster variance.
 
-The example below is identical to the phrase extraction example except that it uses `multiKmeans` with 15 trials,
-rather than a single trial of the `kmeans` function.
+The example below is identical to the phrase extraction example except that it uses `multiKmeans` with 15 trials, rather than a single trial of the `kmeans` function.
 
 [source,text]
 ----
@@ -621,27 +565,23 @@ This expression returns the following response:
 
 == Fuzzy K-Means Clustering
 
-The `fuzzyKmeans` function is a soft clustering algorithm which
-allows vectors to be assigned to more then one cluster. The `fuzziness` parameter
-is a value between `1` and `2` that determines how fuzzy to make the cluster assignment.
+The `fuzzyKmeans` function is a soft clustering algorithm which allows vectors to be assigned to more then one cluster.
+The `fuzziness` parameter is a value between `1` and `2` that determines how fuzzy to make the cluster assignment.
 
-After the clustering has been performed the `getMembershipMatrix` function can be called
-on the clustering result to return a matrix describing the probabilities
-of cluster membership for each vector.
+After the clustering has been performed the `getMembershipMatrix` function can be called on the clustering result to return a matrix describing the probabilities of cluster membership for each vector.
 This matrix can be used to understand relationships between clusters.
 
 In the example below `fuzzyKmeans` is used to cluster the movie reviews matching the phrase "star wars".
-But instead of looking at the clusters or centroids, the `getMembershipMatrix` is used to return the
-membership probabilities for each document. The membership matrix is comprised of a row for each
-vector that was clustered. There is a column in the matrix for each cluster.
+But instead of looking at the clusters or centroids, the `getMembershipMatrix` is used to return the membership probabilities for each document.
+The membership matrix is comprised of a row for each vector that was clustered.
+There is a column in the matrix for each cluster.
 The values in the matrix contain the probability that a specific vector belongs to a specific cluster.
 
-In the example the `distance` function is then used to create a *distance matrix* from the columns of the
-membership matrix. The distance matrix is then visualized with the `zplot` function as a heat map.
+In the example the `distance` function is then used to create a *distance matrix* from the columns of the membership matrix.
+The distance matrix is then visualized with the `zplot` function as a heat map.
 
 In the example `cluster1` and `cluster5` have the shortest distance between the clusters.
-Further analysis of the features in both clusters can be performed to understand
-the relationship between `cluster1` and `cluster5`.
+Further analysis of the features in both clusters can be performed to understand the relationship between `cluster1` and `cluster5`.
 
 image::images/math-expressions/fuzzyk.png[]
 
@@ -649,8 +589,7 @@ NOTE: The heat map has been configured to increase in color intensity as the dis
 
 == Feature Scaling
 
-Before performing machine learning operations its often necessary to
-scale the feature vectors so they can be compared at the same scale.
+Before performing machine learning operations its often necessary to scale the feature vectors so they can be compared at the same scale.
 
 All the scaling functions below operate on vectors and matrices.
 When operating on a matrix the rows of the matrix are scaled.
@@ -660,12 +599,10 @@ When operating on a matrix the rows of the matrix are scaled.
 The `minMaxScale` function scales a vector or matrix between a minimum and maximum value.
 By default it will scale between `0` and `1` if min/max values are not provided.
 
-Below is a plot of a sine wave, with an amplitude of 1, before and
-after it has been scaled between -5 and 5.
+Below is a plot of a sine wave, with an amplitude of 1, before and after it has been scaled between -5 and 5.
 
 image::images/math-expressions/minmaxscale.png[]
 
-
 Below is a simple example of min/max scaling of a matrix between 0 and 1.
 Notice that once brought into the same scale the vectors are the same.
 
@@ -711,11 +648,9 @@ When this expression is sent to the `/stream` handler it responds with:
 
 === Standardization
 
-The `standardize` function scales a vector so that it has a
-mean of 0 and a standard deviation of 1.
+The `standardize` function scales a vector so that it has a mean of 0 and a standard deviation of 1.
 
-Below is a plot of a sine wave, with an amplitude of 1, before and
-after it has been standardized.
+Below is a plot of a sine wave, with an amplitude of 1, before and after it has been standardized.
 
 image::images/math-expressions/standardize.png[]
 
@@ -764,12 +699,12 @@ When this expression is sent to the `/stream` handler it responds with:
 
 === Unit Vectors
 
-The `unitize` function scales vectors to a magnitude of 1. A vector with a
-magnitude of 1 is known as a unit vector. Unit vectors are preferred
-when the vector math deals with vector direction rather than magnitude.
+The `unitize` function scales vectors to a magnitude of 1.
+A vector with a
+magnitude of 1 is known as a unit vector.
+Unit vectors are preferred when the vector math deals with vector direction rather than magnitude.
 
-Below is a plot of a sine wave, with an amplitude of 1, before and
-after it has been unitized.
+Below is a plot of a sine wave, with an amplitude of 1, before and after it has been unitized.
 
 image::images/math-expressions/unitize.png[]
 
diff --git a/solr/solr-ref-guide/src/major-changes-in-solr-8.adoc b/solr/solr-ref-guide/src/major-changes-in-solr-8.adoc
index 53647f2..e343e5e 100644
--- a/solr/solr-ref-guide/src/major-changes-in-solr-8.adoc
+++ b/solr/solr-ref-guide/src/major-changes-in-solr-8.adoc
@@ -30,7 +30,8 @@ A thorough review of the list in <<Major Changes in Earlier 7.x Versions>>, belo
 
 === Upgrade Prerequisites
 
-*If using SolrCloud, you must be on Solr 7.3.0 or higher*. Solr's LeaderInRecovery (LIR) functionality <<Solr 7.3,changed significantly>> in Solr 7.3. While these changes were back-compatible for all subsequent 7.x releases, that compatibility has been removed in 8.0.
+*If using SolrCloud, you must be on Solr 7.3.0 or higher*.
+Solr's LeaderInRecovery (LIR) functionality <<Solr 7.3,changed significantly>> in Solr 7.3. While these changes were back-compatible for all subsequent 7.x releases, that compatibility has been removed in 8.0.
 In order to upgrade to Solr 8.x, all nodes of your cluster must be running Solr 7.3 or higher. If an upgrade is attempted with nodes running versions earlier than 7.3, documents could be lost.
 
 If you are not using Solr in SolrCloud mode (you run a user-managed cluster or a single-node installation), we expect you can upgrade to Solr 8 from any 7.x version without major issues.
diff --git a/solr/solr-ref-guide/src/managed-resources.adoc b/solr/solr-ref-guide/src/managed-resources.adoc
index f35f1608..17ee531 100644
--- a/solr/solr-ref-guide/src/managed-resources.adoc
+++ b/solr/solr-ref-guide/src/managed-resources.adoc
@@ -55,7 +55,8 @@ To begin, you need to define a field type that uses the <<filters.adoc#managed-s
 
 There are two important things to notice about this field type definition:
 
-<1> The filter implementation class is `solr.ManagedStopFilterFactory`. This is a special implementation of the <<filters.adoc#stop-filter,StopFilterFactory>> that uses a set of stop words that are managed from a REST API.
+<1> The filter implementation class is `solr.ManagedStopFilterFactory`.
+This is a special implementation of the <<filters.adoc#stop-filter,StopFilterFactory>> that uses a set of stop words that are managed from a REST API.
 
 <2> The `managed=”english”` attribute gives a name to the set of managed stop words, in this case indicating the stop words are for English text.
 
@@ -105,8 +106,10 @@ Assuming you sent this request to Solr, the response body is a JSON document:
 
 The `sample_techproducts_configs` <<config-sets.adoc#,configset>> ships with a pre-built set of managed stop words, however you should only interact with this file using the API and not edit it directly.
 
-One thing that should stand out to you in this response is that it contains a `managedList` of words as well as `initArgs`. This is an important concept in this framework -- managed resources typically have configuration and data.
-For stop words, the only configuration parameter is a boolean that determines whether to ignore the case of tokens during stop word filtering (ignoreCase=true|false). The data is a list of words, which is represented as a JSON array named `managedList` in the response.
+One thing that should stand out to you in this response is that it contains a `managedList` of words as well as `initArgs`.
+This is an important concept in this framework -- managed resources typically have configuration and data.
+For stop words, the only configuration parameter is a boolean that determines whether to ignore the case of tokens during stop word filtering (ignoreCase=true|false).
+The data is a list of words, which is represented as a JSON array named `managedList` in the response.
 
 Now, let’s add a new word to the English stop word list using an HTTP PUT:
 
@@ -212,7 +215,8 @@ curl -X PUT -H 'Content-type:application/json' --data-binary '["funny", "enterta
 ----
 
 Note that the expansion is performed when processing the PUT request so the underlying persistent state is still a managed map.
-Consequently, if after sending the previous PUT request, you did a GET for `/schema/analysis/synonyms/english/jocular`, then you would receive a list containing `["funny", "entertaining", "whimiscal"]`. Once you've created synonym mappings using a list, each term must be managed separately.
+Consequently, if after sending the previous PUT request, you did a GET for `/schema/analysis/synonyms/english/jocular`, then you would receive a list containing `["funny", "entertaining", "whimiscal"]`.
+Once you've created synonym mappings using a list, each term must be managed separately.
 
 Lastly, you can delete a mapping by sending a DELETE request to the managed endpoint.
 
diff --git a/solr/solr-ref-guide/src/metrics-reporting.adoc b/solr/solr-ref-guide/src/metrics-reporting.adoc
index a8c6dc6..33789e9 100644
--- a/solr/solr-ref-guide/src/metrics-reporting.adoc
+++ b/solr/solr-ref-guide/src/metrics-reporting.adoc
@@ -34,7 +34,8 @@ Some of these meters may be missing or empty for any number of valid reasons.
 In these cases, missing values of any type will be returned as `null` by default so empty values won't impact averages or histograms.
 This is configurable for several types of missing values; see the <<The <metrics> <missingValues> Element>> section below.
 
-Each group of related metrics with unique names is managed in a *metric registry*. Solr maintains several such registries, each corresponding to a high-level group such as: `jvm`, `jetty`, `node`, and `core` (see <<Metric Registries>> below).
+Each group of related metrics with unique names is managed in a *metric registry*.
+Solr maintains several such registries, each corresponding to a high-level group such as: `jvm`, `jetty`, `node`, and `core` (see <<Metric Registries>> below).
 
 For each group (and/or for each registry) there can be several *reporters*, which are components responsible for communication of metrics from selected registries to external systems.
 Currently implemented reporters support emitting metrics via JMX, Ganglia, Graphite and SLF4J.
@@ -171,7 +172,8 @@ The default is `1028`.
 * `alpha`, the decay parameter.
 The default is `0.015`.
 This is only valid for the `ExponentiallyDecayingReservoir`.
-* `window`, the window size, in seconds, and only valid for the `SlidingTimeWindowReservoir`. The default is 300 (5 minutes).
+* `window`, the window size, in seconds, and only valid for the `SlidingTimeWindowReservoir`.
+The default is `300` (5 minutes).
 
 <timer>:: This element defines an implementation of a `Timer` supplier.
 The default implementation supports the `clock` and `reservoir` parameters described above.
@@ -195,7 +197,8 @@ As an example of a section of `solr.xml` that defines some of these custom param
 
 === The <metrics> <missingValues> Element
 Long-lived metrics values are still reported when the underlying value is unavailable (e.g., "INDEX.sizeInBytes" when
-IndexReader is closed). Short-lived transient metrics (such as cache entries) that are properties of complex gauges
+IndexReader is closed).
+Short-lived transient metrics (such as cache entries) that are properties of complex gauges
 (internally represented as `MetricsMap`) are simply skipped when not available, and neither their names nor values
 appear in registries (or in `/admin/metrics` reports).
 
@@ -300,7 +303,8 @@ Default is no filtering, i.e., all metrics from the selected registry will be re
 
 Reporters are instantiated for every group and registry that they were configured for, at the time when the respective components are initialized (e.g., on JVM startup or SolrCore load).
 
-When reporters are created their configuration is validated (and e.g., necessary connections are established). Uncaught errors at this initialization stage cause the reporter to be discarded from the running configuration.
+When reporters are created their configuration is validated (and e.g., necessary connections are established).
+Uncaught errors at this initialization stage cause the reporter to be discarded from the running configuration.
 
 Reporters are closed when the corresponding component is being closed (e.g., on SolrCore close, or JVM shutdown) but metrics that they reported are still maintained in respective registries, as explained in the previous section.
 
@@ -329,7 +333,8 @@ Note either `serviceUrl` or `agentId` can be specified but not both - if both ar
 Object names created by this reporter are hierarchical, dot-separated but also properly structured to form corresponding hierarchies in e.g., JConsole.
 This hierarchy consists of the following elements in the top-down order:
 
-* registry name (e.g., `solr.core.collection1.shard1.replica1`). Dot-separated registry names are also split into ObjectName hierarchy levels, so that metrics for this registry will be shown under `/solr/core/collection1/shard1/replica1` in JConsole, with each domain part being assigned to `dom1, dom2, ... domN` property.
+* registry name (e.g., `solr.core.collection1.shard1.replica1`).
+Dot-separated registry names are also split into ObjectName hierarchy levels, so that metrics for this registry will be shown under `/solr/core/collection1/shard1/replica1` in JConsole, with each domain part being assigned to `dom1, dom2, ... domN` property.
 * reporter name (the value of reporter's `name` attribute)
 * category, scope and name for request handlers
 * or additional `name1, name2, ... nameN` elements for metrics from other components.
@@ -538,11 +543,13 @@ A few query parameters are available to limit your request to only certain metri
 
 `group`:: The metric group to retrieve.
 The default is `all` to retrieve all metrics for all groups.
-Other possible values are: `jvm`, `jetty`, `node`, and `core`. More than one group can be specified in a request; multiple group names should be separated by a comma.
+Other possible values are: `jvm`, `jetty`, `node`, and `core`.
+More than one group can be specified in a request; multiple group names should be separated by a comma.
 
 `type`:: The type of metric to retrieve.
 The default is `all` to retrieve all metric types.
-Other possible values are `counter`, `gauge`, `histogram`, `meter`, and `timer`. More than one type can be specified in a request; multiple types should be separated by a comma.
+Other possible values are `counter`, `gauge`, `histogram`, `meter`, and `timer`.
+More than one type can be specified in a request; multiple types should be separated by a comma.
 
 `prefix`:: The first characters of metric name that will filter the metrics returned to those starting with the provided string.
 It can be combined with `group` and/or `type` parameters.
diff --git a/solr/solr-ref-guide/src/numerical-analysis.adoc b/solr/solr-ref-guide/src/numerical-analysis.adoc
index de43acd..6c644b3 100644
--- a/solr/solr-ref-guide/src/numerical-analysis.adoc
+++ b/solr/solr-ref-guide/src/numerical-analysis.adoc
@@ -50,7 +50,9 @@ The visualization above first creates two arrays with x and y-axis points.
 Notice that the x-axis ranges from 0 to 9.
 Then the `akima`, `spline` and `lerp` functions are applied to the vectors to create three interpolation functions.
 
-Then 500 hundred random samples are drawn from a uniform distribution between 0 and 3. These are the new zoomed in x-axis points, between 0 and 3. Notice that we are sampling a specific area of the curve.
+Then 500 hundred random samples are drawn from a uniform distribution between 0 and 3.
+These are the new zoomed in x-axis points, between 0 and 3.
+Notice that we are sampling a specific area of the curve.
 
 Then the `predict` function is used to predict y-axis points for the sampled x-axis, for all three interpolation functions.
 Finally all three prediction vectors are plotted with the sampled x-axis points.
@@ -101,7 +103,8 @@ The `derivative` function is then applied to the linear interpolation.
 
 image::images/math-expressions/derivative.png[]
 
-Notice that the *miles_traveled* line has a slope of 10 until the 5th hour where it changes to a slope of 50. The *mph* line, which is the derivative, visualizes the *velocity* of the *miles_traveled* line.
+Notice that the *miles_traveled* line has a slope of 10 until the 5th hour where it changes to a slope of 50.
+The *mph* line, which is the derivative, visualizes the *velocity* of the *miles_traveled* line.
 
 Also notice that the derivative is calculated along straight lines showing immediate change from one point to the next.
 This is because linear interpolation (`lerp`) is used as the interpolation function.
@@ -110,7 +113,8 @@ If the `spline` or `akima` functions had been used it would have produced a deri
 
 === The Second Derivative (Acceleration)
 
-While the first derivative represents velocity, the second derivative represents `acceleration`. The second the derivative is the derivative of the first derivative.
+While the first derivative represents velocity, the second derivative represents `acceleration`.
+The second the derivative is the derivative of the first derivative.
 
 The example below builds on the first example and adds the second derivative.
 Notice that the second derivative `d2` is taken by applying the derivative function to a linear interpolation of the first derivative.
@@ -226,12 +230,14 @@ let(years=array(1998, 2000, 2002, 2004, 2006),
 ----
 
 In this example a bicubic spline is used to interpolate a matrix of real estate data.
-Each row of the matrix represent specific `years`. Each column of the matrix represents `floors` of the building.
+Each row of the matrix represent specific `years`.
+Each column of the matrix represents `floors` of the building.
 The grid of numbers is the average selling price of an apartment for each year and floor.
 For example in 2002 the average selling price for the 9th floor was `415000` (row 3, column 3).
 
 The `bicubicSpline` function is then used to interpolate the grid, and the `predict` function is used to predict a value for year 2003, floor 8.
-Notice that the matrix does not include a data point for year 2003, floor 8. The `bicubicSpline` function creates that data point based on the surrounding data in the matrix:
+Notice that the matrix does not include a data point for year 2003, floor 8.
+The `bicubicSpline` function creates that data point based on the surrounding data in the matrix:
 
 [source,json]
 ----
diff --git a/solr/solr-ref-guide/src/package-manager-internals.adoc b/solr/solr-ref-guide/src/package-manager-internals.adoc
index 19baa3d..9c2db22 100644
--- a/solr/solr-ref-guide/src/package-manager-internals.adoc
+++ b/solr/solr-ref-guide/src/package-manager-internals.adoc
@@ -16,7 +16,8 @@
 // specific language governing permissions and limitations
 // under the License.
 
-The package manager (CLI) internally uses various Solr APIs to install, deploy and update packages. This document contains an overview of those APIs.
+The package manager (CLI) internally uses various Solr APIs to install, deploy and update packages.
+This document contains an overview of those APIs.
 
 == Salient Features
 
@@ -30,12 +31,16 @@ The package manager (CLI) internally uses various Solr APIs to install, deploy a
 
 == Classloaders
 
-At the heart of the system, we have classloader isolation. To achieve this, the system is simplified into two layered classloaders:
-The root classloader which has all the jars from Solr classpath. This requires Solr node restart to change anything.
-A set of named classloaders that inherit from the root classloader. The life cycles of the named classloaders are tied to the package configuration in ZooKeeper. As soon as the configuration is modified, the corresponding classloaders are reloaded and components are asked to reload.
+At the heart of the system, we have classloader isolation.
+To achieve this, the system is simplified into two layered classloaders: the root classloader which has all the jars from Solr classpath.
+This requires Solr node restart to change anything.
+A set of named classloaders that inherit from the root classloader.
+The life cycles of the named classloaders are tied to the package configuration in ZooKeeper.
+As soon as the configuration is modified, the corresponding classloaders are reloaded and components are asked to reload.
 
 == Package Loading Security
-Packages are disabled by default. Start all your nodes with the system property `-Denable.packages=true` to use this feature.
+Packages are disabled by default.
+Start all your nodes with the system property `-Denable.packages=true` to use this feature.
 
 *Example*
 [source,bash]
@@ -62,16 +67,19 @@ Package store is a distributed file store which can store arbitrary files in the
 
 * This is a fully replicated file system based repository.
 * It lives at <solr.home>/filestore on each Solr node.
-* Every entry  is a file + metadata. The metadata is named .<filename>.json.
+* Every entry  is a file + metadata.
+The metadata is named .<filename>.json.
 * The metadata file contains the sha256, signatures of the file.
 * Users can’t create files starting with period (.).
-* It is agnostic of content type of files. You may store jars as well as other files..
+* It is agnostic of content type of files.
+You may store jars as well as other files..
 
 === How Does the Package Store Work?
 When a file is uploaded to the PackageStore, the following is true:
 
 * It’s saved to the local file system.
-* It’s saved along with the metadata. The metadata file stores the sha512 and signatures of the jar files too.
+* It’s saved along with the metadata.
+The metadata file stores the sha512 and signatures of the jar files too.
 * Every live node in the cluster is asked to download it as well.
 
 === Package Store APIs
@@ -133,11 +141,15 @@ A Package have the following attributes:
 ** `version`: The version string
 ** `files`: An array of files from the package store
 
-For every package/version in the packages definition, there is a unique `SolrResourceLoader` instance. This is a child of the `CoreContainer` resource loader.
+For every package/version in the packages definition, there is a unique `SolrResourceLoader` instance.
+This is a child of the `CoreContainer` resource loader.
 
 === packages.json
 
-The package configurations live in a file called `packages.json` in ZooKeeper. At any given moment we can have multiple versions of a given package in the package configuration. The system will always use the latest version. Versions are sorted by their numeric value and the highest is the latest.
+The package configurations live in a file called `packages.json` in ZooKeeper.
+At any given moment we can have multiple versions of a given package in the package configuration.
+The system will always use the latest version.
+Versions are sorted by their numeric value and the highest is the latest.
 
 For example:
 
@@ -174,7 +186,8 @@ Use the `delete` command to delete the highest version and choose the next highe
 
 === Using Multiple Versions in Parallel
 
-We use `params.json` in the collection config to store a version of a package it uses. By default it is the `$LATEST`.
+We use `params.json` in the collection config to store a version of a package it uses.
+By default it is the `$LATEST`.
 
 [source, json]
 ----
@@ -186,7 +199,9 @@ We use `params.json` in the collection config to store a version of a package it
 ----
 
 <1> For `mypkg`, use the version `0.1` irrespective of whether there is a newer version available or not.
-<2> For `pkg2`, use the latest. This is optional. The default is `$LATEST`.
+<2> For `pkg2`, use the latest.
+This is optional.
+The default is `$LATEST`.
 
 === Workflow
 
@@ -197,7 +212,8 @@ We use `params.json` in the collection config to store a version of a package it
 
 === Using Packages in Plugins
 
-Any class name can be prefixed with the package name, e.g., `mypkg:fully.qualified.ClassName` and Solr would use the latest version of the package to load the classes from. The plugins loaded from packages cannot depend on core level classes.
+Any class name can be prefixed with the package name, e.g., `mypkg:fully.qualified.ClassName` and Solr would use the latest version of the package to load the classes from.
+The plugins loaded from packages cannot depend on core level classes.
 
 .Plugin declaration in `solrconfig.xml`
 [source, xml]
@@ -235,7 +251,10 @@ curl http://localhost:8983/api/cluster/package?omitHeader=true
           "files":["/mypkg/1.0/myplugins.jar"]}]}}}
 ----
 
-. The package should be ready to use at this point. Next, register a plugin in your collection from the package. Note the `mypkg:` prefix applied to the `class` attribute. The same result can be achieved by editing your `solrconfig.xml` as well:
+. The package should be ready to use at this point.
+Next, register a plugin in your collection from the package.
+Note the `mypkg:` prefix applied to the `class` attribute.
+The same result can be achieved by editing your `solrconfig.xml` as well:
 +
 [source,bash]
 ----
@@ -283,7 +302,8 @@ $ curl http://localhost:8983/solr/gettingstarted/test?omitHeader=true
   "loader":"java.net.FactoryURLClassLoader"}
 ----
 
-. Update the version of our component. Get a new version of the jar, sign and upload it:
+. Update the version of our component.
+Get a new version of the jar, sign and upload it:
 +
 [source, bash]
 ----
@@ -373,7 +393,8 @@ Note that the `Version` value is `"2"`, which means the plugin is updated.
 
 === How to Avoid Automatic Upgrade
 
-The default version used in any collection is always the latest. However, setting a per-collection property in `params.json` ensures that the versions are always fixed irrespective of the new versions added.
+The default version used in any collection is always the latest.
+However, setting a per-collection property in `params.json` ensures that the versions are always fixed irrespective of the new versions added.
 
 [source,bash]
 ----
diff --git a/solr/solr-ref-guide/src/package-manager.adoc b/solr/solr-ref-guide/src/package-manager.adoc
index 4d2f0bf..4a29068 100644
--- a/solr/solr-ref-guide/src/package-manager.adoc
+++ b/solr/solr-ref-guide/src/package-manager.adoc
@@ -40,7 +40,8 @@ The package manager (CLI) allows you to:
 
 === Enable the Package Manager
 
-The package manager is disabled by default. To enable it, start all Solr nodes with the `-Denable.packages=true` parameter.
+The package manager is disabled by default.
+To enable it, start all Solr nodes with the `-Denable.packages=true` parameter.
 
 [source,bash]
 ----
@@ -48,11 +49,13 @@ $ bin/solr -c -Denable.packages=true
 ----
 
 WARNING: There are security consequences to enabling the package manager.
-If an unauthorized user gained access to the system, they would have write access to ZooKeeper and could install packages from untrusted sources. Always ensure you have secured Solr with firewalls and <<authentication-and-authorization-plugins.adoc#,authentication>> before enabling the package manager.
+If an unauthorized user gained access to the system, they would have write access to ZooKeeper and could install packages from untrusted sources.
+Always ensure you have secured Solr with firewalls and <<authentication-and-authorization-plugins.adoc#,authentication>> before enabling the package manager.
 
 === Add Trusted Repositories
 
-A _repository_ is a location hosting one or many packages. Often, this is a web service that serves meta-information about packages, the package artifacts for downloading, and a public key to validate the jar file signatures while installing.
+A _repository_ is a location hosting one or many packages.
+Often, this is a web service that serves meta-information about packages, the package artifacts for downloading, and a public key to validate the jar file signatures while installing.
 
 In order to install packages into Solr, one has to add a repository hosting the packages.
 
@@ -61,7 +64,8 @@ In order to install packages into Solr, one has to add a repository hosting the
 $ bin/solr package add-repo <name-of-repo> <repo-url>
 ----
 
-NOTE: Do not add repositories that you don't trust or control. Only add repositories that are based on HTTPS and avoid repositories based on HTTP to safeguard against MITM attacks.
+NOTE: Do not add repositories that you don't trust or control.
+Only add repositories that are based on HTTPS and avoid repositories based on HTTP to safeguard against MITM attacks.
 
 === Listing and Installing Packages
 
@@ -124,7 +128,8 @@ For example, if a package named `mypackage` contains a request handler, we would
 
 Then use either the Collections API <<collection-management.adoc#reload,RELOAD command>> or the <<collections-core-admin.adoc#,Admin UI>> to reload the collection.
 
-Next, set the package version that this collection is using. If the collection is named `collection1`, the package name is `mypackage`, and the installed version is `1.0.0`, the command would look like this:
+Next, set the package version that this collection is using.
+If the collection is named `collection1`, the package name is `mypackage`, and the installed version is `1.0.0`, the command would look like this:
 
 [source,bash]
 ----
@@ -153,7 +158,8 @@ Next, install the new version of the package from the repositories.
 $ bin/solr package install <package-name>:<version>
 ----
 
-Once you have installed the new version, you can selectively update each of your collections or the cluster level plugins. Assuming the old version is `1.0.0` of the package `mypackage`, and the new version is `2.0.0`, the command would be as follows:
+Once you have installed the new version, you can selectively update each of your collections or the cluster level plugins.
+Assuming the old version is `1.0.0` of the package `mypackage`, and the new version is `2.0.0`, the command would be as follows:
 
 [source,bash]
 ----
@@ -194,6 +200,10 @@ $ bin/solr package deploy <package-name> -cluster
 
 == Security
 
-The `add-repo` step should only be executed using HTTPS enabled repository urls only so as to prevent against MITM attacks when Solr is fetching the public key for the repository. This `add-repo` step registers the public key of the trusted repository, and hence can only be executed using the package manager (CLI) having direct write access to the trusted store of the package store (a special location in the package store that cannot be written to using the package store APIs). Also, it  [...]
+The `add-repo` step should only be executed using HTTPS enabled repository urls only so as to prevent against MITM attacks when Solr is fetching the public key for the repository.
+This `add-repo` step registers the public key of the trusted repository, and hence can only be executed using the package manager (CLI) having direct write access to the trusted store of the package store (a special location in the package store that cannot be written to using the package store APIs).
+Also, it is critical to protect ZooKeeper from unauthorized write access.
 
-Also, keep in mind, that it is possible to install *any* package from a repository once it has been added. If you want to use some packages in production, a best practice is to setup your own repository and add that to Solr instead of adding a generic third-party repository that is beyond your administrative control. You might want to re-sign packages from a third-party repository using your own private keys and host them at your own repository.
+Also, keep in mind, that it is possible to install *any* package from a repository once it has been added.
+If you want to use some packages in production, a best practice is to setup your own repository and add that to Solr instead of adding a generic third-party repository that is beyond your administrative control.
+You might want to re-sign packages from a third-party repository using your own private keys and host them at your own repository.
diff --git a/solr/solr-ref-guide/src/pagination-of-results.adoc b/solr/solr-ref-guide/src/pagination-of-results.adoc
index ca7db7b..5d35ddc 100644
--- a/solr/solr-ref-guide/src/pagination-of-results.adoc
+++ b/solr/solr-ref-guide/src/pagination-of-results.adoc
@@ -79,7 +79,8 @@ Pagination using `start` and `rows` not only require Solr to compute (and sort)
 While a request for `start=0&rows=1000000` may be obviously inefficient because it requires Solr to maintain & sort in memory a set of 1 million documents, likewise a request for `start=999000&rows=1000` is equally inefficient for the same reasons.
 Solr can't compute which matching document is the 999001st result in sorted order, without first determining what the first 999000 matching sorted results are.
 
-If the index is distributed, which is common when running in SolrCloud mode, then 1 million documents are retrieved from *each shard*. For a ten shard index, ten million entries must be retrieved and sorted to figure out the 1000 documents that match those query parameters.
+If the index is distributed, which is common when running in SolrCloud mode, then 1 million documents are retrieved from *each shard*.
+For a ten shard index, ten million entries must be retrieved and sorted to figure out the 1000 documents that match those query parameters.
 
 == Fetching A Large Number of Sorted Results: Cursors
 
diff --git a/solr/solr-ref-guide/src/partial-document-updates.adoc b/solr/solr-ref-guide/src/partial-document-updates.adoc
index ae71bbd..1b3b0b9 100644
--- a/solr/solr-ref-guide/src/partial-document-updates.adoc
+++ b/solr/solr-ref-guide/src/partial-document-updates.adoc
@@ -63,7 +63,8 @@ Must be specified as a single numeric value.
 
 === Field Storage
 
-The core functionality of atomically updating a document requires that all fields in your schema must be configured as stored (`stored="true"`) or docValues (`docValues="true"`) except for fields which are `<copyField/>` destinations, which must be configured as `stored="false"`. Atomic updates are applied to the document represented by the existing stored field values.
+The core functionality of atomically updating a document requires that all fields in your schema must be configured as stored (`stored="true"`) or docValues (`docValues="true"`) except for fields which are `<copyField/>` destinations, which must be configured as `stored="false"`.
+Atomic updates are applied to the document represented by the existing stored field values.
 All data in copyField destinations fields must originate from ONLY copyField sources.
 
 If `<copyField/>` destinations are configured as stored, then Solr will attempt to index both the current value of the field as well as an additional copy from any source fields.
@@ -234,7 +235,8 @@ curl -X POST 'http://localhost:8983/solr/gettingstarted/update?commit=true' -H '
 In-place updates are very similar to atomic updates; in some sense, this is a subset of atomic updates.
 In regular atomic updates, the entire document is reindexed internally during the application of the update.
 However, in this approach, only the fields to be updated are affected and the rest of the documents are not reindexed internally.
-Hence, the efficiency of updating in-place is unaffected by the size of the documents that are updated (i.e., number of fields, size of fields, etc.). Apart from these internal differences in efficiency, there is no functional difference between atomic updates and in-place updates.
+Hence, the efficiency of updating in-place is unaffected by the size of the documents that are updated (i.e., number of fields, size of fields, etc.).
+Apart from these internal differences in efficiency, there is no functional difference between atomic updates and in-place updates.
 
 An atomic update operation is performed using this In-Place approach only when the fields to be updated meet these three conditions:
 
@@ -246,7 +248,8 @@ To use in-place updates, add a modifier to the field that needs to be updated.
 The content can be updated or incrementally increased.
 
 `set`::
-Set or replace the field value(s) with the specified value(s). May be specified as a single value.
+Set or replace the field value(s) with the specified value(s).
+May be specified as a single value.
 
 `inc`::
 Increments a numeric value by a specific amount.
@@ -501,7 +504,8 @@ Users can not re-purpose that field and specify it as the `versionField` for use
 `DocBasedVersionConstraintsProcessorFactory` supports the following additional configuration parameters, which are all optional:
 
 `ignoreOldUpdates`::
-A boolean option which defaults to `false`. If set to `true`, the update will be silently ignored (and return a status 200 to the client) instead of rejecting updates where the `versionField` is too low.
+A boolean option which defaults to `false`.
+If set to `true`, the update will be silently ignored (and return a status 200 to the client) instead of rejecting updates where the `versionField` is too low.
 
 `deleteVersionParam`::
 A String parameter that can be specified to indicate that this processor should also inspect Delete By Id commands.
diff --git a/solr/solr-ref-guide/src/performance-statistics-reference.adoc b/solr/solr-ref-guide/src/performance-statistics-reference.adoc
index 883319e..79c3578 100644
--- a/solr/solr-ref-guide/src/performance-statistics-reference.adoc
+++ b/solr/solr-ref-guide/src/performance-statistics-reference.adoc
@@ -18,15 +18,19 @@
 
 This page explains some of the statistics that Solr exposes.
 
-There are two approaches to retrieving metrics. First, you can use the <<metrics-reporting.adoc#metrics-api,Metrics API>>, or you can enable JMX and get metrics from the <<mbean-request-handler.adoc#,MBean Request Handler>> or via an external tool such as JConsole. The below descriptions focus on retrieving the metrics using the Metrics API, but the metric names are the same if using the MBean Request Handler or an external tool.
+There are two approaches to retrieving metrics.
+First, you can use the <<metrics-reporting.adoc#metrics-api,Metrics API>>, or you can enable JMX and get metrics from the <<mbean-request-handler.adoc#,MBean Request Handler>> or via an external tool such as JConsole.
+The below descriptions focus on retrieving the metrics using the Metrics API, but the metric names are the same if using the MBean Request Handler or an external tool.
 
-These statistics are per core. When you are running in SolrCloud mode these statistics would co-relate to the performance of an individual replica.
+These statistics are per core.
+When you are running in SolrCloud mode these statistics would co-relate to the performance of an individual replica.
 
 == Request Handler Statistics
 
 === Update Request Handler
 
-The update request handler is an endpoint to send data to Solr. We can see how many update requests are being fired, how fast is it performing, and other valuable information regarding requests.
+The update request handler is an endpoint to send data to Solr.
+We can see how many update requests are being fired, how fast is it performing, and other valuable information regarding requests.
 
 *Registry & Path:* `solr.<core>:UPDATE./update`
 
@@ -34,7 +38,9 @@ You can request update request handler statistics with an API request such as `\
 
 === Search Request Handler
 
-Can be useful to measure and track number of search queries, response times, etc. If you are not using the “select” handler then the path needs to be changed appropriately. Similarly if you are using the “sql” handler or “export” handler, the realtime handler “get”, or any other handler similar statistics can be found for that as well.
+Can be useful to measure and track number of search queries, response times, etc.
+If you are not using the “select” handler then the path needs to be changed appropriately.
+Similarly if you are using the “sql” handler or “export” handler, the realtime handler “get”, or any other handler similar statistics can be found for that as well.
 
 *Registry & Path*: `solr.<core>:QUERY./select`
 
@@ -66,7 +72,9 @@ To get request times, specifically, you can send an API request such as:
 
 *Errors and Other Times*
 
-Other types of data such as errors and timeouts are also provided. These are available under different metric names. For example:
+Other types of data such as errors and timeouts are also provided.
+These are available under different metric names.
+For example:
 
 * `\http://localhost:8983/solr/admin/metrics?group=core&prefix=UPDATE./update.errors`
 *  `\http://localhost:8983/solr/admin/metrics?group=core&prefix=QUERY./select.errors`
@@ -94,17 +102,13 @@ The table below shows the metric names and attributes to request:
 
 *Distributed vs. Local Request Times*
 
-Processing of a single distributed request in SolrCloud usually requires making several requests to
-other nodes and other replicas. The common statistics listed above lump these timings together, even though
-they are very different in nature, thus making it difficult to measure the latency of distributed and
-local requests separately. Solr 8.4 introduced additional statistics that help to do this.
+Processing of a single distributed request in SolrCloud usually requires making several requests to other nodes and other replicas.
+The common statistics listed above lump these timings together, even though they are very different in nature, thus making it difficult to measure the latency of distributed and local requests separately.
+Solr 8.4 introduced additional statistics that help to do this.
 
-These metrics are structured the same as `requestTimes` and `totalTime` metrics above but they use
-different full names, e.g., `QUERY./select.distrib.requestTimes` and `QUERY./select.local.requestTimes`.
-The metrics under the `distrib` path correspond to the time it takes for a (potentially) distributed
-request to complete all remote calls plus any local processing, and return the result to the caller.
-The metrics under the `local` path correspond to the time it takes for a local call (non-distributed,
-i.e., being processed only by the Solr core where the handler operates) to complete.
+These metrics are structured the same as `requestTimes` and `totalTime` metrics above but they use different full names, e.g., `QUERY./select.distrib.requestTimes` and `QUERY./select.local.requestTimes`.
+The metrics under the `distrib` path correspond to the time it takes for a (potentially) distributed request to complete all remote calls plus any local processing, and return the result to the caller.
+The metrics under the `local` path correspond to the time it takes for a local call (non-distributed, i.e., being processed only by the Solr core where the handler operates) to complete.
 
 == Update Handler
 
@@ -161,7 +165,8 @@ In addition to a count of rollbacks, mean, 1 minute, 5 minute, and 15 minute rat
 
 === Document Cache
 
-This cache holds Lucene Document objects (the stored fields for each document). Since Lucene internal document IDs are transient, this cache cannot be auto-warmed.
+This cache holds Lucene Document objects (the stored fields for each document).
+Since Lucene internal document IDs are transient, this cache cannot be auto-warmed.
 
 *Registry and Path:* `solr.<core>:CACHE.searcher.documentCache`
 
diff --git a/solr/solr-ref-guide/src/ping.adoc b/solr/solr-ref-guide/src/ping.adoc
index 38cff84..dd450d2 100644
--- a/solr/solr-ref-guide/src/ping.adoc
+++ b/solr/solr-ref-guide/src/ping.adoc
@@ -23,7 +23,8 @@ image::images/ping/ping.png[image,width=171,height=195]
 
 The search executed by a Ping is configured with the <<request-parameters-api.adoc#,Request Parameters API>>. See <<implicit-requesthandlers.adoc#,Implicit Request Handlers>> for the paramset to use for the `/admin/ping` endpoint.
 
-The Ping option doesn't open a page, but the status of the request can be seen on the core overview page shown when clicking on a collection name. The length of time the request has taken is displayed next to the Ping option, in milliseconds.
+The Ping option doesn't open a page, but the status of the request can be seen on the core overview page shown when clicking on a collection name.
+The length of time the request has taken is displayed next to the Ping option, in milliseconds.
 
 == Ping API Examples
 
@@ -67,7 +68,8 @@ This command will ping all replicas of the given collection name for a response:
 </response>
 ----
 
-Both API calls have the same output. A status=OK indicates that the nodes are responding.
+Both API calls have the same output.
+A status=OK indicates that the nodes are responding.
 
 *SolrJ Example with SolrPing*
 
diff --git a/solr/solr-ref-guide/src/plugins-stats-screen.adoc b/solr/solr-ref-guide/src/plugins-stats-screen.adoc
index 76c8b06..84ecf83 100644
--- a/solr/solr-ref-guide/src/plugins-stats-screen.adoc
+++ b/solr/solr-ref-guide/src/plugins-stats-screen.adoc
@@ -16,11 +16,15 @@
 // specific language governing permissions and limitations
 // under the License.
 
-The Plugins screen shows information and statistics about the status and performance of various plugins running in each Solr core. You can find information about the performance of the Solr caches, the state of Solr's searchers, and the configuration of Request Handlers and Search Components.
+The Plugins screen shows information and statistics about the status and performance of various plugins running in each Solr core.
+You can find information about the performance of the Solr caches, the state of Solr's searchers, and the configuration of Request Handlers and Search Components.
 
-Choose an area of interest on the right, and then drill down into more specifics by clicking on one of the names that appear in the central part of the window. In this example, we've chosen to look at the Searcher stats, from the Core area:
+Choose an area of interest on the right, and then drill down into more specifics by clicking on one of the names that appear in the central part of the window.
+In this example, we've chosen to look at the Searcher stats, from the Core area:
 
 .Searcher Statistics
 image::images/plugins-stats-screen/plugin-searcher.png[image,width=462,height=250]
 
-The display is a snapshot taken when the page is loaded. You can get updated status by choosing to either *Watch Changes* or *Refresh Values*. Watching the changes will highlight those areas that have changed, while refreshing the values will reload the page with updated information.
+The display is a snapshot taken when the page is loaded.
+You can get updated status by choosing to either *Watch Changes* or *Refresh Values*.
+Watching the changes will highlight those areas that have changed, while refreshing the values will reload the page with updated information.
diff --git a/solr/solr-ref-guide/src/post-tool.adoc b/solr/solr-ref-guide/src/post-tool.adoc
index 0cda48dd..d02facb 100644
--- a/solr/solr-ref-guide/src/post-tool.adoc
+++ b/solr/solr-ref-guide/src/post-tool.adoc
@@ -18,7 +18,8 @@
 
 Solr includes a simple command line tool for POSTing various types of content to a Solr server.
 
-The tool is `bin/post`. The bin/post tool is a Unix shell script; for Windows (non-Cygwin) usage, see the section <<Post Tool Windows Support>> below.
+The tool is `bin/post`.
+The bin/post tool is a Unix shell script; for Windows (non-Cygwin) usage, see the section <<Post Tool Windows Support>> below.
 
 NOTE: This tool is meant for use by new users exploring Solr's capabilities, and is not intended as a robust solution to be used for indexing documents into production systems.
 
@@ -29,7 +30,9 @@ To run it, open a window and enter:
 bin/post -c gettingstarted example/films/films.json
 ----
 
-This will contact the server at `localhost:8983`. Specifying the `collection/core name` is *mandatory*. The `-help` (or simply `-h`) option will output information on its usage (i.e., `bin/post -help)`.
+This will contact the server at `localhost:8983`.
+Specifying the `collection/core name` is *mandatory*.
+The `-help` (or simply `-h`) option will output information on its usage (i.e., `bin/post -help)`.
 
 == Using the bin/post Tool
 
@@ -75,7 +78,8 @@ OPTIONS
 
 == Examples Using bin/post
 
-There are several ways to use `bin/post`. This section presents several examples.
+There are several ways to use `bin/post`.
+This section presents several examples.
 
 === Indexing XML
 
@@ -116,7 +120,8 @@ Index a tab-separated file into `gettingstarted`:
 bin/post -c signals -params "separator=%09" -type text/csv data.tsv
 ----
 
-The content type (`-type`) parameter is required to treat the file as the proper type, otherwise it will be ignored and a WARNING logged as it does not know what type of content a .tsv file is. The <<indexing-with-update-handlers.adoc#csv-formatted-index-updates,CSV handler>> supports the `separator` parameter, and is passed through using the `-params` setting.
+The content type (`-type`) parameter is required to treat the file as the proper type, otherwise it will be ignored and a WARNING logged as it does not know what type of content a .tsv file is.
+The <<indexing-with-update-handlers.adoc#csv-formatted-index-updates,CSV handler>> supports the `separator` parameter, and is passed through using the `-params` setting.
 
 === Indexing JSON
 
diff --git a/solr/solr-ref-guide/src/probability-distributions.adoc b/solr/solr-ref-guide/src/probability-distributions.adoc
index 85e4247..b82bbd5 100644
--- a/solr/solr-ref-guide/src/probability-distributions.adoc
+++ b/solr/solr-ref-guide/src/probability-distributions.adoc
@@ -16,30 +16,25 @@
 // specific language governing permissions and limitations
 // under the License.
... 7929 lines suppressed ...