You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@solr.apache.org by ct...@apache.org on 2021/11/29 20:52:41 UTC

[solr] branch jira/solr-15556-antora updated (4973e8b -> efc87d0)

This is an automated email from the ASF dual-hosted git repository.

ctargett pushed a change to branch jira/solr-15556-antora
in repository https://gitbox.apache.org/repos/asf/solr.git.


    from 4973e8b  Bump Antora version to 3.0.0-beta.3
     new 420b96b  Fix refs in indexing guide + cleanups + move 'pure nav' pages aside
     new 3c360f0  Fix all refs in the query guide + move 'pure nav' pages aside
     new f9faa1d  Add temporary hard-coded version values to fix build-time warnings
     new efc87d0  Fix refs in upgrade-notes + cleanups for earlier changes

The 4 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 solr/solr-ref-guide/antora.yml                     |  18 ++-
 solr/solr-ref-guide/modules/ROOT/pages/index.adoc  |   8 -
 .../pages/configuration-files.adoc                 |   2 +-
 .../modules/deployment-guide/pages/aliases.adoc    |   2 +-
 .../deployment-guide/pages/backup-restore.adoc     |   2 +-
 .../pages/cluster-node-management.adoc             |   2 +-
 .../monitoring-with-prometheus-and-grafana.adoc    |   2 +-
 .../modules/deployment-guide/pages/python.adoc     |   2 +-
 .../deployment-guide/pages/security-ui.adoc        |   2 +-
 .../getting-started/pages/introduction.adoc        |   2 +-
 .../getting-started/pages/tutorial-films.adoc      |   2 +-
 .../modules/indexing-guide/indexing-nav.adoc       |  81 +++++------
 .../indexing-guide/pages/analysis-screen.adoc      |   4 +-
 .../modules/indexing-guide/pages/analyzers.adoc    |   2 +-
 .../indexing-guide/pages/content-streams.adoc      |   4 +-
 .../modules/indexing-guide/pages/copy-fields.adoc  |   2 +-
 .../pages/currencies-exchange-rates.adoc           |   6 +-
 .../indexing-guide/pages/de-duplication.adoc       |   6 +-
 .../indexing-guide/pages/document-analysis.adoc    |  25 ++--
 .../indexing-guide/pages/documents-screen.adoc     |   8 +-
 .../modules/indexing-guide/pages/docvalues.adoc    |  14 +-
 .../pages/external-files-processes.adoc            |   6 +-
 .../pages/field-properties-by-use-case.adoc        |   8 +-
 .../field-type-definitions-and-properties.adoc     |  20 +--
 .../pages/field-types-included-with-solr.adoc      |  36 ++---
 .../modules/indexing-guide/pages/fields.adoc       |   4 +-
 .../modules/indexing-guide/pages/filters.adoc      |  38 ++---
 .../pages/indexing-nested-documents.adoc           |  43 +++---
 .../indexing-guide/pages/indexing-with-tika.adoc   |  12 +-
 .../pages/indexing-with-update-handlers.adoc       |  26 ++--
 .../indexing-guide/pages/language-analysis.adoc    |  57 ++++----
 .../indexing-guide/pages/language-detection.adoc   |   4 +-
 .../indexing-guide/pages/luke-request-handler.adoc |   2 +-
 .../pages/partial-document-updates.adoc            |  12 +-
 .../indexing-guide/pages/phonetic-matching.adoc    |  21 +--
 .../modules/indexing-guide/pages/post-tool.adoc    |   2 +-
 .../modules/indexing-guide/pages/reindexing.adoc   |  16 +-
 .../modules/indexing-guide/pages/schema-api.adoc   |  29 ++--
 .../pages/schema-browser-screen.adoc               |  10 +-
 .../indexing-guide/pages/schema-designer.adoc      |  29 ++--
 .../indexing-guide/pages/schema-elements.adoc      |  16 +-
 .../indexing-guide/pages/schemaless-mode.adoc      |  22 +--
 .../modules/indexing-guide/pages/tokenizers.adoc   |   6 +-
 .../transforming-and-indexing-custom-json.adoc     |   6 +-
 .../pages/analytics-expression-sources.adoc        |   8 +-
 .../pages/analytics-mapping-functions.adoc         |   4 +-
 .../pages/analytics-reduction-functions.adoc       |   8 +-
 .../modules/query-guide/pages/analytics.adoc       |  30 ++--
 .../query-guide/pages/block-join-query-parser.adoc |  16 +-
 .../pages/collapse-and-expand-results.adoc         |  14 +-
 .../query-guide/pages/common-query-parameters.adoc |  36 ++---
 .../query-guide/pages/dismax-query-parser.adoc     |   8 +-
 .../query-guide/pages/document-transformers.adoc   |  12 +-
 .../query-guide/pages/edismax-query-parser.adoc    |  16 +-
 .../query-guide/pages/exporting-result-sets.adoc   |  12 +-
 .../modules/query-guide/pages/faceting.adoc        |  24 +--
 .../query-guide/pages/function-queries.adoc        |  13 +-
 .../modules/query-guide/pages/graph-traversal.adoc |   6 +-
 .../modules/query-guide/pages/graph.adoc           |   2 +-
 .../modules/query-guide/pages/highlighting.adoc    |   6 +-
 .../modules/query-guide/pages/jdbc-zeppelin.adoc   |   2 +-
 .../query-guide/pages/join-query-parser.adoc       |   2 +-
 .../modules/query-guide/pages/json-facet-api.adoc  |  22 +--
 .../pages/json-faceting-domain-changes.adoc        |  18 +--
 .../modules/query-guide/pages/json-query-dsl.adoc  |  18 +--
 .../query-guide/pages/json-request-api.adoc        |   6 +-
 .../query-guide/pages/learning-to-rank.adoc        |  10 +-
 .../modules/query-guide/pages/local-params.adoc    |   2 +-
 .../modules/query-guide/pages/logs.adoc            |   4 +-
 .../query-guide/pages/machine-learning.adoc        |   4 +-
 .../query-guide/pages/math-expressions.adoc        |  92 ++++--------
 .../modules/query-guide/pages/math-start.adoc      |   2 +-
 .../modules/query-guide/pages/matrix-math.adoc     |   2 +-
 .../modules/query-guide/pages/morelikethis.adoc    |  18 +--
 .../modules/query-guide/pages/other-parsers.adoc   |  34 ++---
 .../query-guide/pages/pagination-of-results.adoc   |  10 +-
 .../pages/query-elevation-component.adoc           |   6 +-
 .../query-guide/pages/query-re-ranking.adoc        |  10 +-
 .../modules/query-guide/pages/query-screen.adoc    |  22 +--
 .../pages/query-syntax-and-parsers.adoc            |  32 ++--
 .../query-guide/pages/response-writers.adoc        |   8 +-
 .../query-guide/pages/result-clustering.adoc       |   2 +-
 .../modules/query-guide/pages/result-grouping.adoc |   8 +-
 .../modules/query-guide/pages/search-sample.adoc   |   6 +-
 .../pages/searching-nested-documents.adoc          |  23 +--
 .../modules/query-guide/pages/simulations.adoc     |   2 +-
 .../modules/query-guide/pages/spatial-search.adoc  |  16 +-
 .../modules/query-guide/pages/spell-checking.adoc  |   6 +-
 .../modules/query-guide/pages/sql-query.adoc       |  18 +--
 .../modules/query-guide/pages/sql-screen.adoc      |   2 +-
 .../query-guide/pages/standard-query-parser.adoc   |  15 +-
 .../modules/query-guide/pages/stats-component.adoc |   6 +-
 .../modules/query-guide/pages/stream-api.adoc      |   2 +-
 .../pages/stream-decorator-reference.adoc          |  22 +--
 .../pages/stream-evaluator-reference.adoc          |   2 +-
 .../modules/query-guide/pages/stream-screen.adoc   |   4 +-
 .../query-guide/pages/stream-source-reference.adoc |  10 +-
 .../query-guide/pages/streaming-expressions.adoc   |  16 +-
 .../modules/query-guide/pages/suggester.adoc       |   8 +-
 .../modules/query-guide/pages/tagger-handler.adoc  |   2 +-
 .../query-guide/pages/term-vector-component.adoc   |   2 +-
 .../modules/query-guide/pages/terms-component.adoc |   6 +-
 .../modules/query-guide/querying-nav.adoc          | 159 ++++++++++----------
 .../pages/major-changes-in-solr-6.adoc             |  40 +++--
 .../pages/major-changes-in-solr-7.adoc             | 162 ++++++++++++++-------
 .../pages/major-changes-in-solr-8.adoc             |  59 ++++----
 .../pages/major-changes-in-solr-9.adoc             |   8 +-
 .../upgrade-notes/pages/solr-upgrade-notes.adoc    |  23 +--
 .../modules/upgrade-notes/upgrade-nav.adoc         |   1 -
 solr/solr-ref-guide/package-lock.json              |   6 +-
 .../old-pages}/controlling-results.adoc            |   0
 .../pages => src/old-pages}/enhancing-queries.adoc |   0
 .../pages => src/old-pages}/field-types.adoc       |   0
 .../old-pages}/fields-and-schema-design.adoc       |   0
 .../old-pages}/indexing-data-operations.adoc       |   0
 .../pages => src/old-pages}/query-guide.adoc       |   0
 .../old-pages}/schema-indexing-guide.adoc          |   0
 .../pages => src/old-pages}/solr-schema.adoc       |   0
 118 files changed, 904 insertions(+), 892 deletions(-)
 rename solr/solr-ref-guide/{modules/query-guide/pages => src/old-pages}/controlling-results.adoc (100%)
 rename solr/solr-ref-guide/{modules/query-guide/pages => src/old-pages}/enhancing-queries.adoc (100%)
 rename solr/solr-ref-guide/{modules/indexing-guide/pages => src/old-pages}/field-types.adoc (100%)
 rename solr/solr-ref-guide/{modules/indexing-guide/pages => src/old-pages}/fields-and-schema-design.adoc (100%)
 rename solr/solr-ref-guide/{modules/indexing-guide/pages => src/old-pages}/indexing-data-operations.adoc (100%)
 rename solr/solr-ref-guide/{modules/query-guide/pages => src/old-pages}/query-guide.adoc (100%)
 rename solr/solr-ref-guide/{modules/indexing-guide/pages => src/old-pages}/schema-indexing-guide.adoc (100%)
 rename solr/solr-ref-guide/{modules/indexing-guide/pages => src/old-pages}/solr-schema.adoc (100%)

[solr] 01/04: Fix refs in indexing guide + cleanups + move 'pure nav' pages aside

Posted by ct...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ctargett pushed a commit to branch jira/solr-15556-antora
in repository https://gitbox.apache.org/repos/asf/solr.git

commit 420b96b943b88861f40c2c427f2c51fe60a217ea
Author: Cassandra Targett <ct...@apache.org>
AuthorDate: Thu Nov 25 12:53:08 2021 -0600

    Fix refs in indexing guide + cleanups + move 'pure nav' pages aside
---
 .../modules/deployment-guide/pages/aliases.adoc    |  2 +-
 .../deployment-guide/pages/backup-restore.adoc     |  2 +-
 .../pages/cluster-node-management.adoc             |  2 +-
 .../monitoring-with-prometheus-and-grafana.adoc    |  2 +-
 .../modules/deployment-guide/pages/python.adoc     |  2 +-
 .../deployment-guide/pages/security-ui.adoc        |  2 +-
 .../getting-started/pages/tutorial-films.adoc      |  2 +-
 .../modules/indexing-guide/indexing-nav.adoc       | 81 +++++++++++-----------
 .../indexing-guide/pages/analysis-screen.adoc      |  4 +-
 .../modules/indexing-guide/pages/analyzers.adoc    |  2 +-
 .../indexing-guide/pages/content-streams.adoc      |  4 +-
 .../modules/indexing-guide/pages/copy-fields.adoc  |  2 +-
 .../pages/currencies-exchange-rates.adoc           |  6 +-
 .../indexing-guide/pages/de-duplication.adoc       |  6 +-
 .../indexing-guide/pages/document-analysis.adoc    | 25 +++----
 .../indexing-guide/pages/documents-screen.adoc     |  8 +--
 .../modules/indexing-guide/pages/docvalues.adoc    | 14 ++--
 .../pages/external-files-processes.adoc            |  6 +-
 .../pages/field-properties-by-use-case.adoc        |  8 +--
 .../field-type-definitions-and-properties.adoc     | 20 +++---
 .../pages/field-types-included-with-solr.adoc      | 36 +++++-----
 .../modules/indexing-guide/pages/fields.adoc       |  4 +-
 .../modules/indexing-guide/pages/filters.adoc      | 38 +++++-----
 .../pages/indexing-nested-documents.adoc           | 43 +++++-------
 .../indexing-guide/pages/indexing-with-tika.adoc   | 12 ++--
 .../pages/indexing-with-update-handlers.adoc       | 26 ++++---
 .../indexing-guide/pages/language-analysis.adoc    | 57 +++++++--------
 .../indexing-guide/pages/language-detection.adoc   |  4 +-
 .../indexing-guide/pages/luke-request-handler.adoc |  2 +-
 .../pages/partial-document-updates.adoc            | 12 ++--
 .../indexing-guide/pages/phonetic-matching.adoc    | 21 +++---
 .../modules/indexing-guide/pages/post-tool.adoc    |  2 +-
 .../modules/indexing-guide/pages/reindexing.adoc   | 16 ++---
 .../modules/indexing-guide/pages/schema-api.adoc   | 29 ++++----
 .../pages/schema-browser-screen.adoc               | 10 +--
 .../indexing-guide/pages/schema-designer.adoc      | 29 ++++----
 .../indexing-guide/pages/schema-elements.adoc      | 16 ++---
 .../indexing-guide/pages/schemaless-mode.adoc      | 22 +++---
 .../modules/indexing-guide/pages/tokenizers.adoc   |  6 +-
 .../transforming-and-indexing-custom-json.adoc     |  6 +-
 .../pages => src/old-pages}/field-types.adoc       |  0
 .../old-pages}/fields-and-schema-design.adoc       |  0
 .../old-pages}/indexing-data-operations.adoc       |  0
 .../old-pages}/schema-indexing-guide.adoc          |  0
 .../pages => src/old-pages}/solr-schema.adoc       |  0
 45 files changed, 288 insertions(+), 303 deletions(-)

diff --git a/solr/solr-ref-guide/modules/deployment-guide/pages/aliases.adoc b/solr/solr-ref-guide/modules/deployment-guide/pages/aliases.adoc
index 6d16f6b..cea787c 100644
--- a/solr/solr-ref-guide/modules/deployment-guide/pages/aliases.adoc
+++ b/solr/solr-ref-guide/modules/deployment-guide/pages/aliases.adoc
@@ -72,7 +72,7 @@ WARNING: It's extremely important with all routed aliases that the route values
 Reindexing a document with a different route value for the same ID produces two distinct documents with the same ID accessible via the alias.
 All query time behavior of the routed alias is *_undefined_* and not easily predictable once duplicate ID's exist.
 
-CAUTION: It is a bad idea to use "data driven" mode (aka xref:configuration-guide:schemaless-mode.adoc[]) with routed aliases, as duplicate schema mutations might happen concurrently leading to errors.
+CAUTION: It is a bad idea to use "data driven" mode (aka xref:indexing-guide:schemaless-mode.adoc[]) with routed aliases, as duplicate schema mutations might happen concurrently leading to errors.
 
 
 === Time Routed Aliases
diff --git a/solr/solr-ref-guide/modules/deployment-guide/pages/backup-restore.adoc b/solr/solr-ref-guide/modules/deployment-guide/pages/backup-restore.adoc
index 53dadfc..a960749 100644
--- a/solr/solr-ref-guide/modules/deployment-guide/pages/backup-restore.adoc
+++ b/solr/solr-ref-guide/modules/deployment-guide/pages/backup-restore.adoc
@@ -659,7 +659,7 @@ An example configuration using the overall and GCS-client properties can be seen
 === S3BackupRepository
 
 Stores and retrieves backup files in an Amazon S3 bucket.
-This plugin must first be xref:solr-plugins.adoc#installing-plugins[installed] before using.
+This plugin must first be xref:configuration-guide:solr-plugins.adoc#installing-plugins[installed] before using.
 
 This plugin uses the https://docs.aws.amazon.com/sdk-for-java/v2/developer-guide/credentials.html[default AWS credentials provider chain], so ensure that your credentials are set appropriately (e.g., via env var, or in `~/.aws/credentials`, etc.).
 
diff --git a/solr/solr-ref-guide/modules/deployment-guide/pages/cluster-node-management.adoc b/solr/solr-ref-guide/modules/deployment-guide/pages/cluster-node-management.adoc
index d0e954f..4be6a66 100644
--- a/solr/solr-ref-guide/modules/deployment-guide/pages/cluster-node-management.adoc
+++ b/solr/solr-ref-guide/modules/deployment-guide/pages/cluster-node-management.adoc
@@ -601,7 +601,7 @@ The node to be removed.
 |Optional |Default: none
 |===
 +
-Request ID to track this action which will be xref:collections-api.adoc#asynchronous-calls[processed asynchronously].
+Request ID to track this action which will be xref:configuration-guide:collections-api.adoc#asynchronous-calls[processed asynchronously].
 
 [[addrole]]
 == ADDROLE: Add a Role
diff --git a/solr/solr-ref-guide/modules/deployment-guide/pages/monitoring-with-prometheus-and-grafana.adoc b/solr/solr-ref-guide/modules/deployment-guide/pages/monitoring-with-prometheus-and-grafana.adoc
index 28b5867..6834f2d 100644
--- a/solr/solr-ref-guide/modules/deployment-guide/pages/monitoring-with-prometheus-and-grafana.adoc
+++ b/solr/solr-ref-guide/modules/deployment-guide/pages/monitoring-with-prometheus-and-grafana.adoc
@@ -18,7 +18,7 @@
 
 If you use https://prometheus.io[Prometheus] and https://grafana.com[Grafana] for metrics storage and data visualization, Solr includes a Prometheus exporter to collect metrics and other data.
 
-A Prometheus exporter (`solr-exporter`) allows users to monitor not only Solr metrics which come from the xref:metrics-reporting.adoc#metrics-api[Metrics API], but also facet counts which come from xref:query-guide:facet.adoc[] and responses to xref:configuration-guide:collections-api.adoc[] commands and xref:ping.adoc[] requests.
+A Prometheus exporter (`solr-exporter`) allows users to monitor not only Solr metrics which come from the xref:metrics-reporting.adoc#metrics-api[Metrics API], but also facet counts which come from xref:query-guide:faceting.adoc[] and responses to xref:configuration-guide:collections-api.adoc[] commands and xref:ping.adoc[] requests.
 
 This graphic provides a more detailed view:
 
diff --git a/solr/solr-ref-guide/modules/deployment-guide/pages/python.adoc b/solr/solr-ref-guide/modules/deployment-guide/pages/python.adoc
index 10d2ca8..f2fbd3b 100644
--- a/solr/solr-ref-guide/modules/deployment-guide/pages/python.adoc
+++ b/solr/solr-ref-guide/modules/deployment-guide/pages/python.adoc
@@ -16,7 +16,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-Solr includes an output format specifically for xref:query-guide:response-writers.adoc#python-response-writer[Python Response Writer], but the xref:response-writers.adoc#json-response-writer[JSON Response Writer] is a little more robust.
+Solr includes an output format specifically for xref:query-guide:response-writers.adoc#python-response-writer[Python Response Writer], but the xref:query-guide:response-writers.adoc#json-response-writer[JSON Response Writer] is a little more robust.
 
 == Simple Python
 
diff --git a/solr/solr-ref-guide/modules/deployment-guide/pages/security-ui.adoc b/solr/solr-ref-guide/modules/deployment-guide/pages/security-ui.adoc
index 2d5aefb..a5fa8a9 100644
--- a/solr/solr-ref-guide/modules/deployment-guide/pages/security-ui.adoc
+++ b/solr/solr-ref-guide/modules/deployment-guide/pages/security-ui.adoc
@@ -21,7 +21,7 @@ The Security screen allows administrators with the `security-edit` permission to
 The Security screen works with Solr running in cloud and standalone modes.
 
 .Security Screen
-image::solr-admin-ui/security.png[]
+image::getting-started:solr-admin-ui/security.png[]
 
 == Getting Started
 
diff --git a/solr/solr-ref-guide/modules/getting-started/pages/tutorial-films.adoc b/solr/solr-ref-guide/modules/getting-started/pages/tutorial-films.adoc
index 3a207d8..dd5e736 100644
--- a/solr/solr-ref-guide/modules/getting-started/pages/tutorial-films.adoc
+++ b/solr/solr-ref-guide/modules/getting-started/pages/tutorial-films.adoc
@@ -36,7 +36,7 @@ When it's done start the second node, and tell it how to connect to to ZooKeeper
 
 `./bin/solr start -c -p 7574 -s example/cloud/node2/solr -z localhost:9983`
 
-NOTE: If you have defined `ZK_HOST` in `solr.in.sh`/`solr.in.cmd` (see xref:zookeeper-ensemble#updating-solr-include-files[Updating Solr Include Files]) you can omit `-z <zk host string>` from the above command.
+NOTE: If you have defined `ZK_HOST` in `solr.in.sh`/`solr.in.cmd` (see xref:deployment-guide:zookeeper-ensemble#updating-solr-include-files[Updating Solr Include Files]) you can omit `-z <zk host string>` from the above command.
 
 === Create a New Collection
 
diff --git a/solr/solr-ref-guide/modules/indexing-guide/indexing-nav.adoc b/solr/solr-ref-guide/modules/indexing-guide/indexing-nav.adoc
index 70bfa9f..a5fbe59 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/indexing-nav.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/indexing-nav.adoc
@@ -1,46 +1,45 @@
 .Schema and Indexing Guide
-* xref:schema-indexing-guide.adoc[]
 
-** xref:solr-schema.adoc[]
-*** xref:schema-elements.adoc[]
-*** xref:schema-api.adoc[]
-*** xref:schemaless-mode.adoc[]
-*** xref:schema-designer.adoc[]
-*** xref:schema-browser-screen.adoc[]
+* Solr Schema
+** xref:schema-elements.adoc[]
+** xref:schema-api.adoc[]
+** xref:schemaless-mode.adoc[]
+** xref:schema-designer.adoc[]
+** xref:schema-browser-screen.adoc[]
 
-** xref:fields-and-schema-design.adoc[]
-*** xref:fields.adoc[]
-*** xref:field-types.adoc[]
-**** xref:field-type-definitions-and-properties.adoc[]
-**** xref:field-types-included-with-solr.adoc[]
-**** xref:currencies-exchange-rates.adoc[]
-**** xref:date-formatting-math.adoc[]
-**** xref:enum-fields.adoc[]
-**** xref:external-files-processes.adoc[]
-**** xref:field-properties-by-use-case.adoc[]
-*** xref:copy-fields.adoc[]
-*** xref:dynamic-fields.adoc[]
-*** xref:docvalues.adoc[]
-*** xref:luke-request-handler.adoc[]
+* Fields & Schema Design
+** xref:fields.adoc[]
+** Field Types
+*** xref:field-type-definitions-and-properties.adoc[]
+*** xref:field-types-included-with-solr.adoc[]
+*** xref:currencies-exchange-rates.adoc[]
+*** xref:date-formatting-math.adoc[]
+*** xref:enum-fields.adoc[]
+*** xref:external-files-processes.adoc[]
+*** xref:field-properties-by-use-case.adoc[]
+** xref:copy-fields.adoc[]
+** xref:dynamic-fields.adoc[]
+** xref:docvalues.adoc[]
+** xref:luke-request-handler.adoc[]
 
-** xref:document-analysis.adoc[]
-*** xref:analyzers.adoc[]
-*** xref:tokenizers.adoc[]
-*** xref:filters.adoc[]
-*** xref:charfilterfactories.adoc[]
-*** xref:language-analysis.adoc[]
-*** xref:phonetic-matching.adoc[]
-*** xref:analysis-screen.adoc[]
+* xref:document-analysis.adoc[]
+** xref:analyzers.adoc[]
+** xref:tokenizers.adoc[]
+** xref:filters.adoc[]
+** xref:charfilterfactories.adoc[]
+** xref:language-analysis.adoc[]
+** xref:phonetic-matching.adoc[]
+** xref:analysis-screen.adoc[]
 
-** xref:indexing-data-operations.adoc[]
-*** xref:indexing-with-update-handlers.adoc[]
-**** xref:transforming-and-indexing-custom-json.adoc[]
-*** xref:indexing-with-tika.adoc[]
-*** xref:indexing-nested-documents.adoc[]
-*** xref:post-tool.adoc[]
-*** xref:documents-screen.adoc[]
-*** xref:partial-document-updates.adoc[]
-*** xref:reindexing.adoc[]
-*** xref:language-detection.adoc[]
-*** xref:de-duplication.adoc[]
-*** xref:content-streams.adoc[]
+* Indexing & Data Operations
+** xref:indexing-with-update-handlers.adoc[]
+*** xref:transforming-and-indexing-custom-json.adoc[]
+** xref:indexing-with-tika.adoc[]
+** xref:indexing-nested-documents.adoc[]
+** xref:post-tool.adoc[]
+** xref:documents-screen.adoc[]
+** xref:partial-document-updates.adoc[]
+** xref:reindexing.adoc[]
+** xref:language-detection.adoc[]
+** xref:de-duplication.adoc[]
+** xref:content-streams.adoc[]
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/analysis-screen.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/analysis-screen.adoc
index bd92b79..2d78f2f 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/analysis-screen.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/analysis-screen.adoc
@@ -16,9 +16,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
-Once you've <<field-type-definitions-and-properties.adoc#,defined a field type in your Schema>>, and specified the analysis steps that you want applied to it, you should test it out to make sure that it behaves the way you expect it to.
+Once you've xref:field-type-definitions-and-properties.adoc[defined a field type in your Schema], and specified the analysis steps that you want applied to it, you should test it out to make sure that it behaves the way you expect it to.
 
-Luckily, there is a very handy page in the Solr <<solr-admin-ui.adoc#,admin interface>> that lets you do just that.
+Luckily, there is a very handy page in the Solr Admin UI that lets you do just that.
 You can invoke the analyzer for any text field, provide sample input, and display the resulting token stream.
 
 For example, let's look at some of the "Text" field types available in the `bin/solr -e techproducts` example configuration, and use the Analysis Screen (`\http://localhost:8983/solr/#/techproducts/analysis`) to compare how the tokens produced at index time for the sentence "Running an Analyzer" match up with a slightly different query text of "run my analyzer".
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/analyzers.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/analyzers.adoc
index 8d5deee..280f928 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/analyzers.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/analyzers.adoc
@@ -18,7 +18,7 @@
 
 An analyzer examines the text of fields and generates a token stream.
 
-Analyzers are specified as a child of the `<fieldType>` element in <<solr-schema.adoc#,Solr's schema>>.
+Analyzers are specified as a child of the `<fieldType>` element in xref:schema-elements.adoc[Solr's schema].
 
 In normal usage, only fields of type `solr.TextField` or `solr.SortableTextField` will specify an analyzer.
 The simplest way to configure an analyzer is with a single `<analyzer>` element whose class attribute is a fully qualified Java class name.
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/content-streams.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/content-streams.adoc
index 9e87596..55411b0 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/content-streams.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/content-streams.adoc
@@ -55,7 +55,7 @@ In `solrconfig.xml`, you can enable it by changing the following `enableRemoteSt
 
 When `enableRemoteStreaming` is not specified in `solrconfig.xml`, the default behavior is to _not_ allow remote streaming (i.e., `enableRemoteStreaming="false"`).
 
-Remote streaming can also be enabled through the <<config-api.adoc#,Config API>> as follows:
+Remote streaming can also be enabled through the xref:configuration-guide:config-api.adoc[] as follows:
 
 [.dynamic-tabs]
 --
@@ -90,5 +90,5 @@ Gzip doesn't apply to `stream.body`.
 
 == Debugging Requests
 
-The implicit "dump" RequestHandler (see <<implicit-requesthandlers.adoc#,Implicit Request Handlers>>) simply outputs the contents of the Solr QueryRequest using the specified writer type `wt`.
+The implicit "dump" RequestHandler (see xref:configuration-guide:implicit-requesthandlers.adoc[]) simply outputs the contents of the Solr QueryRequest using the specified writer type `wt`.
 This is a useful tool to help understand what streams are available to the RequestHandlers.
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/copy-fields.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/copy-fields.adoc
index 8883d49..93a130d 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/copy-fields.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/copy-fields.adoc
@@ -28,7 +28,7 @@ In the schema file, it's very simple to make copies of fields:
 ----
 
 In this example, we want Solr to copy the `cat` field to a field named `text`.
-Fields are copied before <<document-analysis.adoc#,analysis>> is done, meaning you can have two fields with identical original content, but which use different analysis chains and are stored in the index differently.
+Fields are copied before xref:document-analysis.adoc[analysis], meaning you can have two fields with identical original content, but which use different analysis chains and are stored in the index differently.
 
 In the example above, if the `text` destination field has data of its own in the input documents, the contents of the `cat` field will be added as additional values – just as if all of the values had originally been specified by the client.
 Remember to configure your fields as `multivalued="true"` if they will ultimately get multiple values (either from a multivalued source or from multiple `copyField` directives).
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/currencies-exchange-rates.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/currencies-exchange-rates.adoc
index 111acd5..7d089ac 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/currencies-exchange-rates.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/currencies-exchange-rates.adoc
@@ -35,7 +35,7 @@ The following features are supported:
 CurrencyField has been deprecated in favor of CurrencyFieldType; all configuration examples below use CurrencyFieldType.
 ====
 
-The `currency` field type is defined in the <<solr-schema.adoc#,schema>>.
+The `currency` field type is defined in the xref:schema-elements.adoc[schema].
 This is the default configuration of this type.
 
 [source,xml]
@@ -51,7 +51,7 @@ This is a file of exchange rates between our default currency to other currencie
 There is an alternate implementation that would allow regular downloading of currency data.
 See <<Exchange Rates>> below for more.
 
-Many of the example schemas that ship with Solr include a <<dynamic-fields.adoc#,dynamic field>> that uses this type, such as this example:
+Many of the example schemas that ship with Solr include a xref:dynamic-fields.adoc[dynamic field] that uses this type, such as this example:
 
 [source,xml]
 ----
@@ -83,7 +83,7 @@ The currency code field will use the `"*_s_ns"` dynamic field, which must exist
 .Atomic Updates won't work if dynamic sub-fields are stored
 [NOTE]
 ====
-As noted in <<partial-document-updates.adoc#field-storage,Atomic Update Field Storage>>, stored dynamic sub-fields will cause indexing to fail when you use Atomic Updates.
+As noted in xref:partial-document-updates.adoc#field-storage[Atomic Update Field Storage], stored dynamic sub-fields will cause indexing to fail when you use Atomic Updates.
 To avoid this problem, specify `stored="false"` on those dynamic fields.
 ====
 
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/de-duplication.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/de-duplication.adoc
index 183d22f..8d87504 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/de-duplication.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/de-duplication.adoc
@@ -40,11 +40,11 @@ When a document is added, a signature will automatically be generated and attach
 
 == Configuration Options
 
-There are two places in Solr to configure de-duplication: in `solrconfig.xml` and in the <<solr-schema.adoc#,schema>>.
+There are two places in Solr to configure de-duplication: in `solrconfig.xml` and in the xref:schema-elements.adoc[schema].
 
 === In solrconfig.xml
 
-The `SignatureUpdateProcessorFactory` has to be registered in `solrconfig.xml` as part of an <<update-request-processors.adoc#,Update Request Processor Chain>>, as in this example:
+The `SignatureUpdateProcessorFactory` has to be registered in `solrconfig.xml` as part of an xref:configuration-guide:update-request-processors.adoc[Update Request Processor Chain], as in this example:
 
 [source,xml]
 ----
@@ -125,7 +125,7 @@ There are 2 important things to keep in mind when using `SignatureUpdateProcesso
 
 . The `overwriteDupes=true` setting does not work _except_ in the special case of using the uniqueKey field as the `signatureField`.
 Attempting De-duplication on any other `signatureField` will not work correctly because of how updates are forwarded to replicas
-. When using the uniqueKey field as the `signatureField`, `SignatureUpdateProcessorFactory` must be run prior to the `<<update-request-processors.adoc#update-processors-in-solrcloud,DistributedUpdateProcessor>>` to ensure that documents can be routed to the correct shard leader based on the (generated) uniqueKey field.
+. When using the uniqueKey field as the `signatureField`, `SignatureUpdateProcessorFactory` must be run prior to the xref:configuration-guide:update-request-processors.adoc#update-processors-in-solrcloud[`DistributedUpdateProcessor`] to ensure that documents can be routed to the correct shard leader based on the (generated) uniqueKey field.
 
 (Using any other `signatureField` with `overwriteDupes=false` -- to generate a Signature for each document with out De-duplication -- has no limitations.)
 ====
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/document-analysis.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/document-analysis.adoc
index e24ccaa..5b56449 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/document-analysis.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/document-analysis.adoc
@@ -1,11 +1,4 @@
 = Document Analysis in Solr
-:page-children: analyzers, \
-    tokenizers, \
-    filters, \
-    charfilterfactories, \
-    language-analysis, \
-    phonetic-matching, \
-    analysis-screen
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -26,11 +19,11 @@
 The following sections describe how Solr breaks down and works with textual data.
 There are three main concepts to understand: analyzers, tokenizers, and filters.
 
-* <<analyzers.adoc#,Field analyzers>> are used both during ingestion, when a document is indexed, and at query time.
+* xref:analyzers.adoc[Field analyzers] are used both during ingestion, when a document is indexed, and at query time.
 An analyzer examines the text of fields and generates a token stream.
 Analyzers may be a single class or they may be composed of a series of tokenizer and filter classes.
-* <<tokenizers.adoc#,Tokenizers>> break field data into lexical units, or _tokens_.
-* <<filters.adoc#,Filters>> examine a stream of tokens and keep them, transform or discard them, or create new ones.
+* xref:tokenizers.adoc[] break field data into lexical units, or _tokens_.
+* xref:filters.adoc[] examine a stream of tokens and keep them, transform or discard them, or create new ones.
 Tokenizers and filters may be combined to form pipelines, or _chains_, where the output of one is input to the next.
 Such a sequence of tokenizers and filters is called an _analyzer_ and the resulting output of an analyzer is used to match query results or build indices.
 
@@ -54,12 +47,12 @@ It also serves as a guide so that you can configure your own analysis classes if
 // tag::analysis-sections[]
 [cols="1,1",frame=none,grid=none,stripes=none]
 |===
-| <<analyzers.adoc#,Analyzers>>: Overview of Solr analyzers.
-| <<tokenizers.adoc#,Tokenizers>>: Tokenizers and tokenizer factory classes.
-| <<filters.adoc#,Filters>>: Filters and filter factory classes.
-| <<charfilterfactories.adoc#,CharFilterFactories>>: Filters for pre-processing input characters.
-| <<language-analysis.adoc#,Language Analysis>>: Tokenizers and filters for character set conversion and specific languages.
-| <<analysis-screen.adoc#,Analysis Screen>>: Admin UI for testing field analysis.
+| xref:analyzers.adoc[]: Overview of Solr analyzers.
+| xref:tokenizers.adoc[]: Tokenizers and tokenizer factory classes.
+| xref:filters.adoc[]: Filters and filter factory classes.
+| xref:charfilterfactories.adoc[]: Filters for pre-processing input characters.
+| xref:language-analysis.adoc[]: Tokenizers and filters for character set conversion and specific languages.
+| xref:analysis-screen.adoc[]: Admin UI for testing field analysis.
 |===
 // end::analysis-sections[]
 ****
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/documents-screen.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/documents-screen.adoc
index 22ca858..df17fbf 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/documents-screen.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/documents-screen.adoc
@@ -31,8 +31,8 @@ The screen allows you to:
 ====
 There are other ways to load data, see also these sections:
 
-* <<indexing-with-update-handlers.adoc#,Indexing with Update Handlers>>
-* <<indexing-with-tika.adoc#,Indexing with Solr Cell and Apache Tika>>
+* xref:indexing-with-update-handlers.adoc[]
+* xref:indexing-with-tika.adoc[]
 ====
 
 == Common Fields
@@ -44,7 +44,7 @@ The remaining parameters may change depending on the document type selected.
 * Document(s): Enter a properly-formatted Solr document corresponding to the `Document Type` selected.
 XML and JSON documents must be formatted in a Solr-specific format, a small illustrative document will be shown.
 CSV files should have headers corresponding to fields defined in the schema.
-More details can be found at: <<indexing-with-update-handlers.adoc#,Indexing with Update Handlers>>.
+More details can be found in xref:indexing-with-update-handlers.adoc[].
 * Commit Within: Specify the number of milliseconds between the time the document is submitted and when it is available for searching.
 * Overwrite: If `true` the new document will replace an existing document with the same value in the `id` field.
 If `false` multiple documents with the same id can be added.
@@ -74,7 +74,7 @@ If using `/update` for the Request-Handler option, you will be limited to XML, C
 Other document types (e.g., Word, PDF, etc.) can be indexed using the ExtractingRequestHandler (aka, Solr Cell).
 You must modify the RequestHandler to `/update/extract`, which must be defined in your `solrconfig.xml` file with your desired defaults.
 You should also add `&literal.id` shown in the "Extracting Request Handler Params" field so the file chosen is given a unique id.
-More information can be found at: <<indexing-with-tika.adoc#,Indexing with Solr Cell and Apache Tika>>.
+More information can be found in xref:indexing-with-tika.adoc[].
 
 == Solr Command
 
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/docvalues.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/docvalues.adoc
index 8a7be90..e56b6be 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/docvalues.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/docvalues.adoc
@@ -36,9 +36,9 @@ This approach promises to relieve some of the memory requirements of the fieldCa
 
 To use docValues, you only need to enable it for a field that you will use it with.
 As with all schema design, you need to define a field type and then define fields of that type with docValues enabled.
-All of these actions are done in the <<solr-schema.adoc#,schema>>.
+All of these actions are done in the xref:schema-elements.adoc[schema].
 
-Enabling a field for docValues only requires adding `docValues="true"` to the field (or field type) definition, as in this example from Solr's `sample_techproducts_configs` <<config-sets.adoc#,configset>>:
+Enabling a field for docValues only requires adding `docValues="true"` to the field (or field type) definition, as in this example from Solr's `sample_techproducts_configs` xref:configuration-guide:config-sets.adoc[configset]:
 
 [source,xml]
 ----
@@ -71,7 +71,7 @@ Entries are kept in sorted order and duplicates are removed.
 
 These Lucene types are related to how the {lucene-javadocs}/core/org/apache/lucene/index/DocValuesType.html[values are sorted and stored].
 
-There is an additional configuration option available, which is to modify the `docValuesFormat` <<field-type-definitions-and-properties.adoc#docvaluesformat,used by the field type>>.
+There is an additional configuration option available, which is to modify the xref:field-type-definitions-and-properties.adoc#docvaluesformat[`docValuesFormat`] used by the field type.
 The default implementation employs a mixture of loading some things into memory and keeping some on disk.
 In some cases, however, you may choose to specify an alternative {lucene-javadocs}/core/org/apache/lucene/codecs/DocValuesFormat.html[DocValuesFormat implementation].
 For example, you could choose to keep everything in memory by specifying `docValuesFormat="Direct"` on a field type:
@@ -91,16 +91,16 @@ If you choose to customize the `docValuesFormat` in your schema, upgrading to a
 
 === Sorting, Faceting & Functions
 
-If `docValues="true"` for a field, then DocValues will automatically be used any time the field is used for <<common-query-parameters.adoc#sort-parameter,sorting>>, <<faceting.adoc#,faceting>> or <<function-queries.adoc#,function queries>>.
+If `docValues="true"` for a field, then DocValues will automatically be used any time the field is used for xref:query-guide:common-query-parameters.adoc#sort-parameter[sorting], xref:query-guide:faceting.adoc[faceting], or xref:query-guide:function-queries.adoc[function queries].
 
 === Retrieving DocValues During Search
 
 Field values retrieved during search queries are typically returned from stored values.
 However, non-stored docValues fields will be also returned along with other stored fields when all fields (or pattern matching globs) are specified to be returned (e.g., "`fl=*`") for search queries depending on the effective value of the `useDocValuesAsStored` parameter for each field.
 For schema versions >= 1.6, the implicit default is `useDocValuesAsStored="true"`.
-See <<field-type-definitions-and-properties.adoc#,Field Type Definitions and Properties>> & <<fields.adoc#,Fields>> for more details.
+See xref:field-type-definitions-and-properties.adoc[] and xref:fields.adoc[] for more details.
 
-When `useDocValuesAsStored="false"`, non-stored DocValues fields can still be explicitly requested by name in the <<common-query-parameters.adoc#fl-field-list-parameter,`fl` parameter>>, but will not match glob patterns (`"*"`).
+When `useDocValuesAsStored="false"`, non-stored DocValues fields can still be explicitly requested by name in the xref:query-guide:common-query-parameters.adoc#fl-field-list-parameter[`fl` parameter], but will not match glob patterns (`"*"`).
 
 Returning DocValues along with "regular" stored fields at query time has performance implications that stored fields may not because DocValues are column-oriented and may therefore incur additional cost to retrieve for each returned document.
 
@@ -109,7 +109,7 @@ If you require the multi-valued fields to be returned in the original insertion
 
 In cases where the query is returning _only_ docValues fields performance may improve since returning stored fields requires disk reads and decompression whereas returning docValues fields in the fl list only requires memory access.
 
-When retrieving fields from their docValues form (such as when using the <<exporting-result-sets.adoc#,/export handler>>, <<streaming-expressions.adoc#,streaming expressions>> or if the field is requested in the `fl` parameter), two important differences between regular stored fields and docValues fields must be understood:
+When retrieving fields from their docValues form (such as when using the xref:query-guide:exporting-result-sets.adoc[/export handler], xref:query-guide:streaming-expressions.adoc[streaming expressions], or if the field is requested in the `fl` parameter), two important differences between regular stored fields and docValues fields must be understood:
 
 . Order is _not_ preserved.
 When retrieving stored fields, the insertion order is the return order.
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/external-files-processes.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/external-files-processes.adoc
index 81175c3..7d4ade2 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/external-files-processes.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/external-files-processes.adoc
@@ -29,7 +29,7 @@ Another way to think of this is that, instead of specifying the field in documen
 ====
 External fields are not searchable.
 They can be used only for function queries or display.
-For more information on function queries, see the section on <<function-queries.adoc#,Function Queries>>.
+For more information on function queries, see the section on xref:query-guide:function-queries.adoc[].
 ====
 
 The `ExternalFileField` type is handy for cases where you want to update a particular field in many documents more often than you want to update the rest of the documents.
@@ -38,7 +38,7 @@ You might want to update the rank of all the documents daily or hourly, while th
 Without `ExternalFileField`, you would need to update each document just to change the rank.
 Using `ExternalFileField` is much more efficient because all document values for a particular field are stored in an external file that can be updated as frequently as you wish.
 
-In the <<solr-schema.adoc#,schema>>, the definition of this field type might look like this:
+In the xref:schema-elements.adoc[schema], the definition of this field type might look like this:
 
 [source,xml]
 ----
@@ -77,7 +77,7 @@ The file does not need to be sorted, but Solr will be able to perform the lookup
 === Reloading an External File
 
 It's possible to define an event listener to reload an external file when either a searcher is reloaded or when a new searcher is started.
-See the section <<caches-warming.adoc#query-related-listeners,Query-Related Listeners>> for more information, but a sample definition in `solrconfig.xml` might look like this:
+See the section xref:configuration-guide:caches-warming.adoc#query-related-listeners[Query-Related Listeners] for more information, but a sample definition in `solrconfig.xml` might look like this:
 
 [source,xml]
 ----
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/field-properties-by-use-case.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/field-properties-by-use-case.adoc
index 3b84c6b..e39169f 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/field-properties-by-use-case.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/field-properties-by-use-case.adoc
@@ -44,13 +44,13 @@ Notes:
 2. [[fpbuc_2,2]] Will be used if present, but not necessary.
 3. [[fpbuc_3,3]] (if termVectors=true)
 4. [[fpbuc_4,4]] A tokenizer must be defined for the field, but it doesn't need to be indexed.
-5. [[fpbuc_5,5]] Described in <<document-analysis.adoc#,Document Analysis in Solr>>.
+5. [[fpbuc_5,5]] Described in xref:document-analysis.adoc[].
 6. [[fpbuc_6,6]] Term vectors are not mandatory here.
 If not true, then a stored field is analyzed.
 So term vectors are recommended, but only required if `stored=false`.
 7. [[fpbuc_7,7]] For most field types, either `indexed` or `docValues` must be true, but both are not required.
-<<docvalues.adoc#,DocValues>> can be more efficient in many cases.
+xref:docvalues.adoc[] can be more efficient in many cases.
 For `[Int/Long/Float/Double/Date]PointFields`, `docValues=true` is required.
 8. [[fpbuc_8,8]] Stored content will be used by default, but docValues can alternatively be used.
-See <<docvalues.adoc#,DocValues>>.
-9. [[fpbuc_9,9]] Multi-valued sorting may be performed on docValues-enabled fields using the two-argument `field()` function, e.g., `field(myfield,min)`; see the <<function-queries.adoc#field-function,field() function in Function Queries>>.
+See xref:docvalues.adoc[].
+9. [[fpbuc_9,9]] Multi-valued sorting may be performed on docValues-enabled fields using the two-argument `field()` function, e.g., `field(myfield,min)`; see the xref:query-guide:function-queries.adoc#field-function[field() function in Function Queries].
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/field-type-definitions-and-properties.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/field-type-definitions-and-properties.adoc
index 7327b20..e415023 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/field-type-definitions-and-properties.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/field-type-definitions-and-properties.adoc
@@ -27,7 +27,7 @@ A field type definition can include four types of information:
 
 == Field Type Definitions in the Schema
 
-Field types are defined in the collection's <<solr-schema.adoc#,schema>>.
+Field types are defined in the collection's xref:schema-elements.adoc[schema].
 Each field type is defined between `fieldType` elements.
 They can optionally be grouped within a `types` element.
 
@@ -54,7 +54,7 @@ Here is an example of a field type definition for a type called `text_general`:
 ----
 
 <1> The first line in the example above contains the field type name, `text_general`, and the name of the implementing class, `solr.TextField`.
-<2> The rest of the definition is about field analysis, described in <<document-analysis.adoc#,Document Analysis in Solr>>.
+<2> The rest of the definition is about field analysis, described in xref:document-analysis.adoc[].
 
 The implementing class is responsible for making sure the field is handled correctly.
 In the class names, the string `solr` is shorthand for `org.apache.solr.schema` or `org.apache.solr.analysis`.
@@ -151,10 +151,10 @@ This blog post http://opensourceconnections.com/blog/2017/11/21/solr-synonyms-me
 |Optional |Default: `true`
 |===
 +
-For text fields, applicable when querying with <<standard-query-parser.adoc#standard-query-parser-parameters,`sow=false`>> (which is the default for the `sow` parameter).
-Use `true` for field types with query analyzers including graph-aware filters, e.g., <<filters.adoc#synonym-graph-filter,Synonym Graph Filter>> and <<filters.adoc#word-delimiter-graph-filter,Word Delimiter Graph Filter>>.
+For text fields, applicable when querying with xref:query-guide:standard-query-parser.adoc#standard-query-parser-parameters[`sow=false`] (the default).
+Use `true` for field types with query analyzers including graph-aware filters, e.g., xref:filters.adoc#synonym-graph-filter[Synonym Graph Filter] and xref:filters.adoc#word-delimiter-graph-filter[Word Delimiter Graph Filter].
 +
-Use `false` for field types with query analyzers including filters that can match docs when some tokens are missing, e.g., <<filters.adoc#shingle-filter,Shingle Filter>>.
+Use `false` for field types with query analyzers including filters that can match docs when some tokens are missing, e.g., xref:filters.adoc#shingle-filter[Shingle Filter].
 
 [[docvaluesformat]]
 `docValuesFormat`::
@@ -197,16 +197,16 @@ The table below includes the default value for most `FieldType` implementations
 |Property |Description |Implicit Default
 |`indexed` |If `true`, the value of the field can be used in queries to retrieve matching documents. |`true`
 |`stored` |If `true`, the actual value of the field can be retrieved by queries.  |`true`
-|`docValues` |If `true`, the value of the field will be put in a column-oriented <<docvalues.adoc#,DocValues>> structure. |`false`
+|`docValues` |If `true`, the value of the field will be put in a column-oriented xref:docvalues.adoc[] structure. |`false`
 |`sortMissingFirst`, `sortMissingLast` |Control the placement of documents when a sort field is not present. |`false`
 |`multiValued` |If `true`, indicates that a single document might contain multiple values for this field type. |`false`
-|`uninvertible` |If `true`, indicates that an `indexed="true" docValues="false"` field can be "un-inverted" at query time to build up large in memory data structure to serve in place of <<docvalues.adoc#,DocValues>>. *Defaults to `true` for historical reasons, but users are strongly encouraged to set this to `false` for stability and use `docValues="true"` as needed.* |`true`
+|`uninvertible` |If `true`, indicates that an `indexed="true" docValues="false"` field can be "un-inverted" at query time to build up large in memory data structure to serve in place of xref:docvalues.adoc[]. *Defaults to `true` for historical reasons, but users are strongly encouraged to set this to `false` for stability and use `docValues="true"` as needed.* |`true`
 |`omitNorms` |If `true`, omits the norms associated with this field (this disables length normalization for the field, and saves some memory). *Defaults to true for all primitive (non-analyzed) field types, such as int, float, data, bool, and string.* Only full-text fields or fields need norms. |*
 |`omitTermFreqAndPositions` |If `true`, omits term frequency, positions, and payloads from postings for this field. This can be a performance boost for fields that don't require that information. It also reduces the storage space required for the index. Queries that rely on position that are issued on a field with this option will silently fail to find documents. *This property defaults to true for all field types that are not text fields.* |*
 |`omitPositions` |Similar to `omitTermFreqAndPositions` but preserves term frequency information. |*
 |`termVectors`, `termPositions`, `termOffsets`, `termPayloads` |These options instruct Solr to maintain full term vectors for each document, optionally including position, offset, and payload information for each term occurrence in those vectors. These can be used to accelerate highlighting and other ancillary functionality, but impose a substantial cost in terms of index size. They are not necessary for typical uses of Solr. |`false`
 |`required` |Instructs Solr to reject any attempts to add a document which does not have a value for this field. This property defaults to false. |`false`
-|`useDocValuesAsStored` |If the field has <<docvalues.adoc#,docValues>> enabled, setting this to true would allow the field to be returned as if it were a stored field (even if it has `stored=false`) when matching "`*`" in an <<common-query-parameters.adoc#fl-field-list-parameter,fl parameter>>. |`true`
+|`useDocValuesAsStored` |If the field has xref:docvalues.adoc[] enabled, setting this to true would allow the field to be returned as if it were a stored field (even if it has `stored=false`) when matching "`*`" in an xref:query-guide:common-query-parameters.adoc#fl-field-list-parameter[fl parameter]. |`true`
 |`large` |Large fields are always lazy loaded and will only take up space in the document cache if the actual value is < 512KB. This option requires `stored="true"` and `multiValued="false"`. It's intended for fields that might have very large values so that they don't get cached in memory. |`false`
 |===
 
@@ -216,7 +216,7 @@ The table below includes the default value for most `FieldType` implementations
 
 For general numeric needs, consider using one of the `IntPointField`, `LongPointField`, `FloatPointField`, or `DoublePointField` classes, depending on the specific values you expect.
 These "Dimensional Point" based numeric classes use specially encoded data structures to support efficient range queries regardless of the size of the ranges used.
-Enable <<docvalues.adoc#,DocValues>> on these fields as needed for sorting and/or faceting.
+Enable xref:docvalues.adoc[] on these fields as needed for sorting and/or faceting.
 
 Some Solr features may not yet work with "Dimensional Points", in which case you may want to consider the equivalent `TrieIntField`, `TrieLongField`, `TrieFloatField`, and `TrieDoubleField` classes.
 These field types are deprecated and are likely to be removed in a future major Solr release, but they can still be used if necessary.
@@ -255,4 +255,4 @@ Finally, for faceting, use the primary author only via a `StrField`:
 A field type may optionally specify a `<similarity/>` that will be used when scoring documents that refer to fields with this type, as long as the "global" similarity for the collection allows it.
 
 By default, any field type which does not define a similarity, uses `BM25Similarity`.
-For more details, and examples of configuring both global & per-type Similarities, please see <<schema-elements.adoc#similarity,Schema Elements>>.
+For more details, and examples of configuring both global & per-type similarities, please see xref:schema-elements.adoc#similarity[Similarity].
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/field-types-included-with-solr.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/field-types-included-with-solr.adoc
index 74268c0..0faba5d 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/field-types-included-with-solr.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/field-types-included-with-solr.adoc
@@ -25,61 +25,61 @@ The {solr-javadocs}/core/org/apache/solr/schema/package-summary.html[`org.apache
 [%autowidth.stretch,options="header"]
 |===
 |Class |Description
-|BBoxField | Indexes a single rectangle (bounding box) per document field and supports searching via a bounding box. See the section <<spatial-search.adoc#,Spatial Search>> for more information.
+|BBoxField | Indexes a single rectangle (bounding box) per document field and supports searching via a bounding box. See the section xref:query-guide:spatial-search.adoc[] for more information.
 
 |BinaryField |Binary data.
 
 |BoolField |Contains either true or false. Values of `1`, `t`, or `T` in the first character are interpreted as `true`. Any other values in the first character are interpreted as `false`.
 
-|CollationField |Supports Unicode collation for sorting and range queries. The ICUCollationField is a better choice if you can use ICU4J. See the section <<language-analysis.adoc#unicode-collation,Unicode Collation>> for more information.
+|CollationField |Supports Unicode collation for sorting and range queries. The ICUCollationField is a better choice if you can use ICU4J. See the section xref:language-analysis.adoc#unicode-collation[Unicode Collation] for more information.
 
-|CurrencyFieldType |Supports currencies and exchange rates. See the section <<currencies-exchange-rates.adoc#,Currencies and Exchange Rates>> for more information.
+|CurrencyFieldType |Supports currencies and exchange rates. See the section xref:currencies-exchange-rates.adoc[] for more information.
 
-|DateRangeField |Supports indexing date ranges, to include point in time date instances as well (single-millisecond durations). See the section <<date-formatting-math.adoc#,Date Formatting and Date Math>> for more detail on using this field type. Consider using this field type even if it's just for date instances, particularly when the queries typically fall on UTC year/month/day/hour, etc., boundaries.
+|DateRangeField |Supports indexing date ranges, to include point in time date instances as well (single-millisecond durations). See the section xref:date-formatting-math.adoc[] for more detail on using this field type. Consider using this field type even if it's just for date instances, particularly when the queries typically fall on UTC year/month/day/hour, etc., boundaries.
 
-|DatePointField |Date field. Represents a point in time with millisecond precision, encoded using a "Dimensional Points" based data structure that allows for very efficient searches for specific values, or ranges of values. See the section <<date-formatting-math.adoc#,Working with Dates>> for more details on the supported syntax. For single valued fields, `docValues="true"` must be used to enable sorting.
+|DatePointField |Date field. Represents a point in time with millisecond precision, encoded using a "Dimensional Points" based data structure that allows for very efficient searches for specific values, or ranges of values. See the section xref:date-formatting-math.adoc[] for more details on the supported syntax. For single valued fields, `docValues="true"` must be used to enable sorting.
 
 |DoublePointField |Double field (64-bit IEEE floating point). This class encodes double values using a "Dimensional Points" based data structure that allows for very efficient searches for specific values, or ranges of values. For single valued fields, `docValues="true"` must be used to enable sorting.
 
-|ExternalFileField |Pulls values from a file on disk. See the section <<external-files-processes.adoc#,External Files and Processes>> for more information.
+|ExternalFileField |Pulls values from a file on disk. See the section xref:external-files-processes.adoc[] for more information.
 
-|EnumFieldType |Allows defining an enumerated set of values which may not be easily sorted by either alphabetic or numeric order (such as a list of severities, for example). This field type takes a configuration file, which lists the proper order of the field values. See the section <<enum-fields.adoc#,Enum Fields>> for more information.
+|EnumFieldType |Allows defining an enumerated set of values which may not be easily sorted by either alphabetic or numeric order (such as a list of severities, for example). This field type takes a configuration file, which lists the proper order of the field values. See the section xref:enum-fields.adoc[] for more information.
 
 |FloatPointField |Floating point field (32-bit IEEE floating point). This class encodes float values using a "Dimensional Points" based data structure that allows for very efficient searches for specific values, or ranges of values. For single valued fields, `docValues="true"` must be used to enable sorting.
 
-|ICUCollationField |Supports Unicode collation for sorting and range queries. See the section <<language-analysis.adoc#unicode-collation,Unicode Collation>> for more information.
+|ICUCollationField |Supports Unicode collation for sorting and range queries. See the section xref:language-analysis.adoc#unicode-collation[Unicode Collation] for more information.
 
 |IntPointField |Integer field (32-bit signed integer). This class encodes int values using a "Dimensional Points" based data structure that allows for very efficient searches for specific values, or ranges of values. For single valued fields, `docValues="true"` must be used to enable sorting.
 
-|LatLonPointSpatialField |A latitude/longitude coordinate pair; possibly multi-valued for multiple points. Usually it's specified as "lat,lon" order with a comma. See the section <<spatial-search.adoc#,Spatial Search>> for more information.
+|LatLonPointSpatialField |A latitude/longitude coordinate pair; possibly multi-valued for multiple points. Usually it's specified as "lat,lon" order with a comma. See the section xref:query-guide:spatial-search.adoc[] for more information.
 
 |LongPointField |Long field (64-bit signed integer). This class encodes foo values using a "Dimensional Points" based data structure that allows for very efficient searches for specific values, or ranges of values. For single valued fields, `docValues="true"` must be used to enable sorting.
 
-|NestPathField | Specialized field type storing ehanced information, when <<indexing-nested-documents.adoc#schema-configuration,working with nested documents>>.
+|NestPathField | Specialized field type storing ehanced information, when xref:indexing-nested-documents.adoc#schema-configuration[working with nested documents].
 
-|PointType |A single-valued n-dimensional point. It's both for sorting spatial data that is _not_ lat-lon, and for some more rare use-cases. (NOTE: this is _not_ related to the "Point" based numeric fields). See <<spatial-search.adoc#,Spatial Search>> for more information.
+|PointType |A single-valued n-dimensional point. It's both for sorting spatial data that is _not_ lat-lon, and for some more rare use-cases. (NOTE: this is _not_ related to the "Point" based numeric fields). See xref:query-guide:spatial-search.adoc[] for more information.
 
 |PreAnalyzedField |Provides a way to send to Solr serialized token streams, optionally with independent stored values of a field, and have this information stored and indexed without any additional text processing.
 
-Configuration and usage of PreAnalyzedField is documented in the section  <<external-files-processes.adoc#the-preanalyzedfield-type,The PreAnalyzedField Type>>.
+Configuration and usage of PreAnalyzedField is documented in the section  xref:external-files-processes.adoc#the-preanalyzedfield-type[PreAnalyzedField Type].
 
 |RandomSortField |Does not contain a value. Queries that sort on this field type will return results in random order. Use a dynamic field to use this feature.
 
-|RankField |Can be used to store scoring factors to improve document ranking. To be used in combination with <<other-parsers.adoc#ranking-query-parser,RankQParserPlugin>>
+|RankField |Can be used to store scoring factors to improve document ranking. To be used in combination with xref:query-guide:other-parsers.adoc#ranking-query-parser[RankQParserPlugin].
 
-|RptWithGeometrySpatialField |A derivative of `SpatialRecursivePrefixTreeFieldType` that also stores the original geometry. See <<spatial-search.adoc#,Spatial Search>> for more information and usage with geospatial results transformer.
+|RptWithGeometrySpatialField |A derivative of `SpatialRecursivePrefixTreeFieldType` that also stores the original geometry. See xref:query-guide:spatial-search.adoc[] for more information and usage with geospatial results transformer.
 
-|SortableTextField |A specialized version of TextField that allows (and defaults to) `docValues="true"` for sorting on the first 1024 characters of the original string prior to analysis. The number of characters used for sorting can be overridden with the `maxCharsForDocValues` attribute. See <<common-query-parameters.adoc#sort-parameter,sort parameter discussion>> for details.
+|SortableTextField |A specialized version of TextField that allows (and defaults to) `docValues="true"` for sorting on the first 1024 characters of the original string prior to analysis. The number of characters used for sorting can be overridden with the `maxCharsForDocValues` attribute. See xref:query-guide:common-query-parameters.adoc#sort-parameter[sort parameter discussion] for details.
 
-|SpatialRecursivePrefixTreeFieldType |(RPT for short) Accepts latitude comma longitude strings or other shapes in WKT format. See <<spatial-search.adoc#,Spatial Search>> for more information.
+|SpatialRecursivePrefixTreeFieldType |(RPT for short) Accepts latitude comma longitude strings or other shapes in WKT format. See xref:query-guide:spatial-search.adoc[] for more information.
 
 |StrField |String (UTF-8 encoded string or Unicode). Strings are intended for small fields and are _not_ tokenized or analyzed in any way. They have a hard limit of slightly less than 32K.
 
-|TextField |Text, usually multiple words or tokens. In normal usage, only fields of type TextField or SortableTextField will specify an <<analyzers.adoc#,analyzer>>.
+|TextField |Text, usually multiple words or tokens. In normal usage, only fields of type TextField or SortableTextField will specify an xref:analyzers.adoc[analyzer].
 
 |UUIDField |Universally Unique Identifier (UUID). Pass in a value of `NEW` and Solr will create a new UUID.
 
-*Note*: configuring a UUIDField instance with a default value of `NEW` is not advisable for most users when using SolrCloud (and not possible if the UUID value is configured as the unique key field) since the result will be that each replica of each document will get a unique UUID value. Using <<update-request-processors.adoc#,UUIDUpdateProcessorFactory>> to generate UUID values when documents are added is recommended instead.
+*Note*: configuring a UUIDField instance with a default value of `NEW` is not advisable for most users when using SolrCloud (and not possible if the UUID value is configured as the unique key field) since the result will be that each replica of each document will get a unique UUID value. Using xref:configuration-guide:update-request-processors.adoc[UUIDUpdateProcessorFactory] to generate UUID values when documents are added is recommended instead.
 |===
 
 == Deprecated Field Types
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/fields.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/fields.adoc
index ce21946..064a3d2 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/fields.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/fields.adoc
@@ -16,7 +16,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-Fields are defined in the fields element of a <<solr-schema.adoc#,schema>>.
+Fields are defined in the fields element of a xref:schema-elements.adoc[schema].
 Once you have the field types set up, defining the fields themselves is simple.
 
 == Example Field Definition
@@ -69,7 +69,7 @@ If this property is not specified, there is no default.
 
 Fields can have many of the same properties as field types.
 Properties from the table below which are specified on an individual field will override any explicit value for that property specified on the `fieldType` of the field, or any implicit default property value provided by the underlying `fieldType` implementation.
-The table below is reproduced from <<field-type-definitions-and-properties.adoc#,Field Type Definitions and Properties>>, which has more details:
+The table below is reproduced from xref:field-type-definitions-and-properties.adoc[], which has more details:
 
 --
 include::field-type-definitions-and-properties.adoc[tag=field-params]
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/filters.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/filters.adoc
index 95c56ae..9fd4b73 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/filters.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/filters.adoc
@@ -20,7 +20,7 @@ Filters examine a stream of tokens and keep them, transform them, or discard the
 
 == About Filters
 
-Like <<tokenizers.adoc#,tokenizers>>, filters consume input and produce a stream of tokens.
+Like xref:tokenizers.adoc[tokenizers], filters consume input and produce a stream of tokens.
 Filters also derive from `org.apache.lucene.analysis.TokenStream` but unlike tokenizers, a filter's input is another TokenStream.
 The job of a filter is usually easier than that of a tokenizer since in most cases a filter looks at each token in the stream sequentially and decides whether to pass it along, replace it, or discard it.
 
@@ -90,7 +90,7 @@ Solr includes several language-specific stemmers created by the http://snowball.
 The generic <<Snowball Porter Stemmer Filter>> can be used to configure any of these language stemmers.
 Solr also includes a convenience wrapper for the English Snowball stemmer.
 There are also several purpose-built stemmers for non-English languages.
-These stemmers are described in <<language-analysis.adoc#,Language Analysis>>.
+These stemmers are described in xref:language-analysis.adoc[].
 
 === Filters with Arguments
 
@@ -199,7 +199,7 @@ If `true`, the original token is preserved: "thé" -> "the", "thé"
 == Beider-Morse Filter
 
 Implements the Beider-Morse Phonetic Matching (BMPM) algorithm, which allows identification of similar names, even if they are spelled differently or in different languages.
-More information about how this works is available in the section on <<phonetic-matching.adoc#beider-morse-phonetic-matching-bmpm,Phonetic Matching>>.
+More information about how this works is available in the section xref:phonetic-matching.adoc#beider-morse-phonetic-matching-bmpm[Beider-Morse Phonetic Matching].
 
 [IMPORTANT]
 ====
@@ -284,7 +284,7 @@ The value `auto` will allow the filter to identify the language, or a comma-sepa
 
 == Classic Filter
 
-This filter takes the output of the <<tokenizers.adoc#classic-tokenizer,Classic Tokenizer>> and strips periods from acronyms and "'s" from possessives.
+This filter takes the output of the xref:tokenizers.adoc#classic-tokenizer[Classic Tokenizer] and strips periods from acronyms and "'s" from possessives.
 
 *Factory class:* `solr.ClassicFilterFactory`
 
@@ -328,9 +328,9 @@ This filter takes the output of the <<tokenizers.adoc#classic-tokenizer,Classic
 
 This filter for use in `index` time analysis creates word shingles by combining common tokens such as stop words with regular tokens.
 This can result in an index with more unique terms, but is useful for creating phrase queries containing common words, such as "the cat", in a way that will typically be much faster then if the combined tokens are not used, because only the term positions of documents containg both terms in sequence have to be considered.
-Correct usage requires being paired with <<#common-grams-query-filter,Common Grams Query Filter>> during `query` analysis.
+Correct usage requires being paired with <<Common Grams Query Filter>> during `query` analysis.
 
-These filters can also be combined with <<#stop-filter,Stop Filter>> so searching for `"the cat"` would match different documents then `"a cat"`, while pathological searches for either `"the"` or `"a"` would not match any documents.
+These filters can also be combined with <<Stop Filter>> so searching for `"the cat"` would match different documents then `"a cat"`, while pathological searches for either `"the"` or `"a"` would not match any documents.
 
 *Factory class:* `solr.CommonGramsFilterFactory`
 
@@ -409,18 +409,18 @@ If `true`, the filter ignores the case of words when comparing them to the commo
 
 == Common Grams Query Filter
 
-This filter is used for the `query` time analysis aspect of <<#common-grams-filter,Common Grams Filter>> -- see that filer for a description of arguments, example configuration, and sample input/output.
+This filter is used for the `query` time analysis aspect of <<Common Grams Filter>> -- see that filer for a description of arguments, example configuration, and sample input/output.
 
 == Collation Key Filter
 
 Collation allows sorting of text in a language-sensitive way.
 It is usually used for sorting, but can also be used with advanced searches.
-We've covered this in much more detail in the section on <<language-analysis.adoc#unicode-collation,Unicode Collation>>.
+We've covered this in much more detail in the section on xref:language-analysis.adoc#unicode-collation[Unicode Collation].
 
 == Daitch-Mokotoff Soundex Filter
 
 Implements the Daitch-Mokotoff Soundex algorithm, which allows identification of similar names, even if they are spelled differently.
-More information about how this works is available in the section on <<phonetic-matching.adoc#,Phonetic Matching>>.
+More information about how this works is available in the section on xref:phonetic-matching.adoc[].
 
 *Factory class:* `solr.DaitchMokotoffSoundexFilterFactory`
 
@@ -468,7 +468,7 @@ Setting this to `false` will enable phonetic matching, but the exact spelling of
 == Double Metaphone Filter
 
 This filter creates tokens using the http://commons.apache.org/proper/commons-codec/archives/{ivy-commons-codec-version}/apidocs/org/apache/commons/codec/language/DoubleMetaphone.html[`DoubleMetaphone`] encoding algorithm from commons-codec.
-For more information, see the <<phonetic-matching.adoc#,Phonetic Matching>> section.
+For more information, see xref:phonetic-matching.adoc[].
 
 *Factory class:* `solr.DoubleMetaphoneFilterFactory`
 
@@ -1050,7 +1050,7 @@ This filter is generally only useful at index time.
 This filter is a custom Unicode normalization form that applies the foldings specified in http://www.unicode.org/reports/tr30/tr30-4.html[Unicode TR #30: Character Foldings] in addition to the `NFKC_Casefold` normalization form as described in <<ICU Normalizer 2 Filter>>.
 This filter is a better substitute for the combined behavior of the <<ASCII Folding Filter>>, <<Lower Case Filter>>, and <<ICU Normalizer 2 Filter>>.
 
-To use this filter, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+To use this filter, you must add additional .jars to Solr's classpath (as described in the section xref:configuration-guide:solr-plugins.adoc#installing-plugins[Installing Plugins]).
 See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
 
 *Factory class:* `solr.ICUFoldingFilterFactory`
@@ -1191,7 +1191,7 @@ See the http://icu-project.org/apiref/icu4j/com/ibm/icu/text/UnicodeSet.html[Uni
 
 For detailed information about these normalization forms, see http://unicode.org/reports/tr15/[Unicode Normalization Forms].
 
-To use this filter, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+To use this filter, you must add additional .jars to Solr's classpath (as described in the section xref:configuration-guide:solr-plugins.adoc#installing-plugins[Installing Plugins]).
 See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
 
 == ICU Transform Filter
@@ -1244,7 +1244,7 @@ For a full list of ICU System Transforms, see http://demo.icu-project.org/icu-bi
 
 For detailed information about ICU Transforms, see http://userguide.icu-project.org/transforms/general.
 
-To use this filter, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+To use this filter, you must add additional .jars to Solr's classpath (as described in the section xref:configuration-guide:solr-plugins.adoc#installing-plugins[Installing Plugins]).
 See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
 
 == Keep Word Filter
@@ -1703,7 +1703,7 @@ All other characters are left unchanged.
 
 == Managed Stop Filter
 
-This is specialized version of the <<Stop Filter,Stop Words Filter Factory>> that uses a set of stop words that are <<managed-resources.adoc#,managed from a REST API.>>
+This is specialized version of the <<Stop Filter,Stop Words Filter Factory>> that uses a set of stop words that are xref:configuration-guide:managed-resources.adoc[managed from a REST API].
 
 *Arguments:*
 
@@ -1750,7 +1750,7 @@ See <<Stop Filter>> for example input/output.
 
 == Managed Synonym Filter
 
-This is specialized version of the <<Synonym Filter>> that uses a mapping on synonyms that is <<managed-resources.adoc#,managed from a REST API.>>
+This is specialized version of the <<Synonym Filter>> that uses a mapping on synonyms that is xref:configuration-guide:managed-resources.adoc[managed from a REST API].
 
 .Managed Synonym Filter has been Deprecated
 [WARNING]
@@ -1764,7 +1764,7 @@ For arguments and examples, see the <<Synonym Graph Filter>> below.
 
 == Managed Synonym Graph Filter
 
-This is specialized version of the <<Synonym Graph Filter>> that uses a mapping on synonyms that is <<managed-resources.adoc#,managed from a REST API.>>
+This is specialized version of the <<Synonym Graph Filter>> that uses a mapping on synonyms that is xref:configuration-guide:managed-resources.adoc[managed from a REST API].
 
 This filter maps single- or multi-token synonyms, producing a fully correct graph output.
 This filter is a replacement for the Managed Synonym Filter, which produces incorrect graphs for multi-token synonyms.
@@ -2199,7 +2199,7 @@ Otherwise the token is passed through.
 == Phonetic Filter
 
 This filter creates tokens using one of the phonetic encoding algorithms in the `org.apache.commons.codec.language` package.
-For more information, see the section on <<phonetic-matching.adoc#,Phonetic Matching>>.
+For more information, see the section on xref:phonetic-matching.adoc[].
 
 *Factory class:* `solr.PhoneticFilterFactory`
 
@@ -3062,7 +3062,7 @@ s|Required |Default: none
 The path to a file that contains a list of synonyms, one per line.
 In the (default) `solr` format - see the `format` argument below for alternatives - blank lines and lines that begin with `\#` are ignored.
 This may be a comma-separated list of paths.
-See <<resource-loading.adoc#,Resource Loading>> for more information.
+See xref:configuration-guide:resource-loading.adoc[] for more information.
 +
 There are two ways to specify synonym mappings:
 +
@@ -3429,7 +3429,7 @@ With the example below, for a token "example.com" with type `<URL>`, the token e
 == Type Token Filter
 
 This filter blacklists or whitelists a specified list of token types, assuming the tokens have type metadata associated with them.
-For example, the <<tokenizers.adoc#uax29-url-email-tokenizer,UAX29 URL Email Tokenizer>> emits "<URL>" and "<EMAIL>" typed tokens, as well as other types.
+For example, the xref:tokenizers.adoc#uax29-url-email-tokenizer[UAX29 URL Email Tokenizer] emits "<URL>" and "<EMAIL>" typed tokens, as well as other types.
 This filter would allow you to pull out only e-mail addresses from text as tokens, if you wish.
 
 *Factory class:* `solr.TypeTokenFilterFactory`
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/indexing-nested-documents.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/indexing-nested-documents.adoc
index 44dc993..799a4fc 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/indexing-nested-documents.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/indexing-nested-documents.adoc
@@ -16,23 +16,20 @@
 // specific language governing permissions and limitations
 // under the License.
 
-Solr supports indexing nested documents, described here, and ways to <<searching-nested-documents.adoc#,search and retrieve>> them very efficiently.
+Solr supports indexing nested documents, described here, and ways to xref:query-guide:searching-nested-documents.adoc[search and retrieve] them very efficiently.
 
-By way of examples: nested documents in Solr can be used to bind a blog post (parent document)
-with comments (child documents) -- or as a way to model major product lines as parent documents,
+By way of examples: nested documents in Solr can be used to bind a blog post (parent document) with comments (child documents) -- or as a way to model major product lines as parent documents,
 with multiple types of child documents representing individual SKUs (with unique sizes / colors) and supporting documentation (either directly nested under the products, or under individual SKUs.
 
 The "top most" parent with all children is referred to as a "root" document or formerly "block
 document" and it explains some of the nomenclature of related features.
 
-At query time, the <<block-join-query-parser.adoc#,Block Join Query Parsers>> can search these relationships,
- and the `<<document-transformers.adoc#child-childdoctransformerfactory,[child]>>` Document Transformer can attach child (or other "descendent") documents to the result documents.
-In terms of performance, indexing the relationships between documents usually yields much faster queries than an equivalent "<<other-parsers#join-query-parser,query time join>>",
+At query time, the xref:query-guide:block-join-query-parser.adoc[] can search these relationships, and the xref:query-guide:document-transformers.adoc#child-childdoctransformerfactory[`[child]`] Document Transformer can attach child (or other "descendent") documents to the result documents.
+In terms of performance, indexing the relationships between documents usually yields much faster queries than an equivalent xref:query-guide:join-query-parser["query time join"],
  since the relationships are already stored in the index and do not need to be computed.
 
 However, nested documents are less flexible than query time joins as it imposes rules that some applications may not be able to accept.
-Nested documents may be indexed via either the XML or JSON data syntax, and is also supported by <<solrj.adoc#,SolrJ>> with javabin.
-
+Nested documents may be indexed via either the XML or JSON data syntax, and is also supported by xref:deployment-guide:solrj.adoc[] with javabin.
 
 [CAUTION]
 ====
@@ -125,7 +122,7 @@ There is no "child document" field type.
 
 [CAUTION]
 =====
-The <<indexing-with-update-handlers#json-update-convenience-paths,`/update/json/docs` convenience path>> will automatically flatten complex JSON documents by default -- so to index nested JSON documents make sure to use `/update`.
+The xref:indexing-with-update-handlers#json-update-convenience-paths[`/update/json/docs` convenience path] will automatically flatten complex JSON documents by default -- so to index nested JSON documents make sure to use `/update`.
 =====
 ====
 
@@ -232,13 +229,13 @@ Indexing nested documents _requires_ an indexed field named `\_root_`:
 ----
 
 *Do not add this field to an index that already has data!
-<<reindexing.adoc#changes-that-require-reindex,You must reindex.>>*
+xref:reindexing.adoc#changes-that-require-reindex[You must reindex].*
 
 * Solr automatically populates this field in _all_ documents with the `id` value of it's root document
 -- it's highest ancestor, possibly itself.
 * This field must be indexed (`indexed="true"`) but doesn't need to
 be either stored (`stored="true"`) or use doc values (`docValues="true"`), however you are free to do so if you find it useful.
-If you want to use `uniqueBlock(\_root_)` <<json-facet-api#stat-facet-functions,field type limitation>>, then you should enable docValues.
+If you want to use `uniqueBlock(\_root_)` xref:query-guide:json-facet-api#stat-facet-functions[field type limitation], then you should enable docValues.
 
 Preferably, you will also define `\_nest_path_` which adds features and ease-of-use:
 
@@ -250,11 +247,11 @@ Preferably, you will also define `\_nest_path_` which adds features and ease-of-
 
 * Solr automatically populates this field for any child document but not root documents.
 * This field enables Solr to properly record & reconstruct the named and nested relationship of documents
-when using the `<<searching-nested-documents.adoc#child-doc-transformer,[child]>>` doc transformer.
-** If this field does not exist, the `[child]` transformer will return all descendent child documents as a flattened list -- just as if they had been <<#indexing-anonymous-children,indexed as anonymous children>>.
+when using the xref:query-guide:searching-nested-documents.adoc#child-doc-transformer[`[child]`] doc transformer.
+** If this field does not exist, the `[child]` transformer will return all descendent child documents as a flattened list -- just as if they had been <<indexing-anonymous-children,indexed as anonymous children>>.
 * If you do not use `\_nest_path_` it is strongly recommended that every document have some
 field that differentiates root documents from their nested children -- and differentiates different "types" of child documents.
-This is not strictly necessary, so long as it's possible to write a "filter" query that can be used to isolate and select only parent documents for use in the <<block-join-query-parser#,block join query parsers>> and <<searching-nested-documents.adoc#child-doc-transformer,[child]>> doc transformer
+This is not strictly necessary, so long as it's possible to write a "filter" query that can be used to isolate and select only parent documents for use in the xref:query-guide:block-join-query-parser[] and xref:query-guide:searching-nested-documents.adoc#child-doc-transformer[`[child]`] doc transformer
 * It's possible to query on this field, although at present it's only documented how to in the
 context of `[child]`'s `childFilter` parameter.
 
@@ -279,17 +276,16 @@ documents.
 [TIP]
 ====
 When using SolrCloud it is a _VERY_ good idea to use
-<<solrcloud-shards-indexing.adoc#document-routing,prefix based compositeIds>> with a
+xref:deployment-guide:solrcloud-shards-indexing.adoc#document-routing[prefix based compositeIds] with a
 common prefix for all documents in the nested document tree.
 This makes it much easier to apply
-<<partial-document-updates#updating-child-documents,atomic updates to individual child documents>>
+xref:partial-document-updates.adoc#updating-child-documents[atomic updates] to individual child documents.
 ====
 
-
 == Maintaining Integrity with Updates and Deletes
 
-Nested document trees can be modified with Solr's
-<<partial-document-updates#updating-child-documents,atomic update>> feature to
+Nested document trees can be modified with
+xref:partial-document-updates.adoc#atomic-updates,atomic updates>> to
 manipulate any document in a nested tree, and even to add new child documents.
 This aspect isn't different than updating any normal document -- Solr internally deletes the old
 nested document tree and it adds the newly modified one.
@@ -302,14 +298,11 @@ Clients should be very careful to *never* violate this.
 
 To delete an entire nested document tree, you can simply delete-by-ID using the `id` of the root document.
 Delete-by-ID will not work with the `id` of a child document, since only root document IDs are considered.
-Instead, use delete-by-query (most efficient) or <<partial-document-updates#,atomic updates>> to remove the child document from it's parent.
+Instead, use delete-by-query (most efficient) or xref:partial-document-updates.adoc#atomic-updates[atomic updates] to remove the child document from it's parent.
 
 If you use Solr's delete-by-query APIs, you *MUST* be careful to ensure that any deletion query is structured to ensure no descendent children remain of any documents that are being deleted.
 *_Doing otherwise will violate integrity assumptions that Solr expects._*
 
-
-
-
 == Indexing Anonymous Children
 
 Although not recommended, it is also possible to index child documents "anonymously":
@@ -424,10 +417,10 @@ include::example$IndexingNestedDocuments.java[tag=anon-kids]
 This simplified approach was common in older versions of Solr, and can still be used with "Root-Only" schemas that do not contain any other nested related fields apart from `\_root_`.
 Many schemas in existence are this way simply because default configsets are this way, even if the application isn't using nested documents.
 
-This approach should *NOT* be used when schemas include a `\_nest_path_` field, as the existence of that field triggers assumptions and changes in behavior in various query time functionality, such as the <<searching-nested-documents.adoc#child-doc-transformer,[child]>>, that will not work when nested documents do not have any intrinsic "nested path" information.
+This approach should *NOT* be used when schemas include a `\_nest_path_` field, as the existence of that field triggers assumptions and changes in behavior in various query time functionality, such as xref:query-guide:searching-nested-documents.adoc#child-doc-transformer[`[child]`], that will not work when nested documents do not have any intrinsic "nested path" information.
 
 The results of indexing anonymous nested children with a "Root-Only" schema are similar to what happens if you attempt to index "pseudo field" nested documents using a "Root-Only" schema.
-Notably: since there is no nested path information for the <<searching-nested-documents.adoc#child-doc-transformer,[child]>> transformer to use to reconstruct the structure of a nest of documents, it returns all matching children as a flat list, similar in structure to how they were originally indexed:
+Notably: since there is no nested path information for the xref:query-guide:searching-nested-documents.adoc#child-doc-transformer[`[child]`] transformer to use to reconstruct the structure of a nest of documents, it returns all matching children as a flat list, similar in structure to how they were originally indexed:
 
 [.dynamic-tabs]
 --
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/indexing-with-tika.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/indexing-with-tika.adoc
index bb5a46d..025b9b8 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/indexing-with-tika.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/indexing-with-tika.adoc
@@ -47,7 +47,7 @@ You can configure which elements should be included/ignored, and which should ma
 * Solr Cell maps each piece of metadata onto a field.
 By default it maps to the same name but several parameters control how this is done.
 * When Solr Cell finishes creating the internal `SolrInputDocument`, the rest of the Lucene/Solr indexing stack takes over.
-The next step after any update handler is the <<update-request-processors.adoc#,Update Request Processor>> chain.
+The next step after any update handler is the xref:configuration-guide:update-request-processors.adoc[Update Request Processor] chain.
 
 Solr Cell is a contrib, which means it's not automatically included with Solr but must be configured.
 The example configsets have Solr Cell configured, but if you are not using those, you will want to pay attention to the section <<solrconfig.xml Configuration>> below.
@@ -70,7 +70,7 @@ that have a lot of rich media embedded in them.
 For these reasons, Solr Cell is not recommended for use in a production system.
 
 It is a best practice to use Solr Cell as a proof-of-concept tool during development and then run Tika as an external
-process that sends the extracted documents to Solr (via <<solrj.adoc#,SolrJ>>) for indexing.
+process that sends the extracted documents to Solr (via xref:deployment-guide:solrj.adoc[]) for indexing.
 This way, any extraction failures that occur are isolated from Solr itself and can be handled gracefully.
 
 For a few examples of how this could be done, see this blog post by Erick Erickson, https://lucidworks.com/2012/02/14/indexing-with-solrj/[Indexing with SolrJ].
@@ -405,7 +405,7 @@ Also see the section <<Defining XPath Expressions>> for an example.
 
 === solrconfig.xml Configuration
 
-If you have started Solr with one of the supplied <<config-sets.adoc#,example configsets>>, you may already have the `ExtractingRequestHandler` configured by default.
+If you have started Solr with one of the supplied xref:configuration-guide:config-sets.adoc[example configsets], you may already have the `ExtractingRequestHandler` configured by default.
 
 If it is not already configured, you will need to configure `solrconfig.xml` to find the `ExtractingRequestHandler` and its dependencies:
 
@@ -434,9 +434,9 @@ In this setup, all field names are lower-cased (with the `lowernames` parameter)
 
 [TIP]
 ====
-You may need to configure <<update-request-processors.adoc#,Update Request Processors>> (URPs) that parse numbers and dates and do other manipulations on the metadata fields generated by Solr Cell.
+You may need to configure xref:configuration-guide:update-request-processors.adoc[] (URPs) that parse numbers and dates and do other manipulations on the metadata fields generated by Solr Cell.
 
-In Solr's `_default` configset, <<schemaless-mode.adoc#,"schemaless">> (aka data driven, or field guessing) mode is enabled, which does a variety of such processing already.
+In Solr's `_default` configset, xref:schemaless-mode.adoc[schemaless mode] (aka data driven, or field guessing) is enabled, which does a variety of such processing already.
 
 If you instead explicitly define the fields for your schema, you can selectively specify the desired URPs.
 An easy way to specify this is to configure the parameter `processor` (under `defaults`) to `uuid,remove-blank,field-name-mutating,parse-boolean,parse-long,parse-double,parse-date`.
@@ -604,7 +604,7 @@ curl "http://localhost:8983/solr/gettingstarted/update/extract?literal.id=doc6&d
 == Using Solr Cell with SolrJ
 
 SolrJ is a Java client that you can use to add documents to the index, update the index, or query the index.
-You'll find more information on SolrJ in <<solrj.adoc#,SolrJ>>.
+You'll find more information on SolrJ in xref:deployment-guide:solrj.adoc[].
 
 Here's an example of using Solr Cell and SolrJ to add documents to a Solr index.
 
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/indexing-with-update-handlers.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/indexing-with-update-handlers.adoc
index 0ac0d32..22706e1 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/indexing-with-update-handlers.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/indexing-with-update-handlers.adoc
@@ -18,18 +18,16 @@
 // under the License.
 
 Update handlers are request handlers designed to add, delete and update documents to the index.
-In addition to having plugins for importing rich documents <<indexing-with-tika.adoc#,Indexing with Solr Cell and Apache Tika>>, Solr natively supports indexing structured documents in XML, CSV, and JSON.
+In addition to having plugins for importing rich documents xref:indexing-with-tika.adoc[], Solr natively supports indexing structured documents in XML, CSV, and JSON.
 
 The recommended way to configure and use request handlers is with path based names that map to paths in the request URL.
-However, request handlers can also be specified with the `qt` (query type) parameter if the <<requestdispatcher.adoc#,`requestDispatcher`>> is appropriately configured.
+However, request handlers can also be specified with the `qt` (query type) parameter if the xref:configuration-guide:requestdispatcher.adoc[`requestDispatcher`] is appropriately configured.
 It is possible to access the same handler using more than one name, which can be useful if you wish to specify different sets of default options.
 
-A single unified update request handler supports XML, CSV, JSON, and javabin update requests, delegating to the appropriate `ContentStreamLoader` based on the `Content-Type` of the <<content-streams.adoc#,ContentStream>>.
+A single unified update request handler supports XML, CSV, JSON, and javabin update requests, delegating to the appropriate `ContentStreamLoader` based on the `Content-Type` of the xref:content-streams.adoc[ContentStream].
 
 If you need to pre-process documents after they are loaded but before they are indexed (or even checked against the schema),
-Solr has document-preprocessing plugins for Update Request Handlers,
-called <<update-request-processors.adoc#,Update Request Processors>>,
-which allow for default and custom configuration chains.
+Solr has document preprocessing plugins for Update Request Handlers, called xref:configuration-guide:update-request-processors.adoc[], which allow for default and custom configuration chains.
 
 == UpdateRequestHandler Configuration
 
@@ -184,7 +182,7 @@ A single delete message can contain multiple delete operations.
 ====
 
 When using the Join query parser in a Delete By Query, you should use the `score` parameter with a value of "none" to avoid a `ClassCastException`.
-See the section on the <<other-parsers.adoc#,Join Query Parser>> for more details on the `score` parameter.
+See the section on the xref:query-guide:join-query-parser.adoc[] for more details on the `score` parameter.
 
 ====
 
@@ -250,7 +248,7 @@ This alternative `curl` command performs equivalent operations but with minimal
 curl http://localhost:8983/solr/my_collection/update -H "Content-Type: text/xml" -T "myfile.xml" -X POST
 ----
 
-Short requests can also be sent using a HTTP GET command, if enabled in <<requestdispatcher.adoc#requestparsers-element,`requestParsers`>> element of `solrconfig.xml`, URL-encoding the request, as in the following.
+Short requests can also be sent using a HTTP GET command, if enabled in xref:configuration-guide:requestdispatcher.adoc#requestparsers-element[`requestParsers`] element of `solrconfig.xml`, URL-encoding the request, as in the following.
 Note the escaping of "<" and ">":
 
 [source,bash]
@@ -275,9 +273,9 @@ The status field will be non-zero in case of failure.
 === Using XSLT to Transform XML Index Updates
 
 The Scripting contrib module provides a separate XSLT Update Request Handler that allows you to index any arbitrary XML by using the `<tr>` parameter to apply an https://en.wikipedia.org/wiki/XSLT[XSL transformation].
-You must have an XSLT stylesheet in the `conf/xslt` directory of your <<config-sets.adoc#,configset>> that can transform the incoming data to the expected `<add><doc/></add>` format, and use the `tr` parameter to specify the name of that stylesheet.
+You must have an XSLT stylesheet in the `conf/xslt` directory of your xref:configuration-guide:config-sets.adoc[configset] that can transform the incoming data to the expected `<add><doc/></add>` format, and use the `tr` parameter to specify the name of that stylesheet.
 
-Learn more about adding the `dist/solr-scripting-*.jar` file into Solr's <<libs.adoc#lib-directories,Lib Directories>>.
+Learn more about adding the `dist/solr-scripting-*.jar` file into Solr's xref:configuration-guide:libs.adoc#lib-directories[Lib Directories].
 
 === tr Parameter
 
@@ -287,7 +285,7 @@ The transformation must be found in the Solr `conf/xslt` directory.
 
 === XSLT Configuration
 
-The example below, from the `sample_techproducts_configs` <<config-sets.adoc#,configset>> in the Solr distribution, shows how the XSLT Update Request Handler is configured.
+The example below, from the `sample_techproducts_configs` xref:configuration-guide:config-sets.adoc[configset] in the Solr distribution, shows how the XSLT Update Request Handler is configured.
 
 [source,xml]
 ----
@@ -358,12 +356,12 @@ $ curl -o standard_solr_xml_format.xml "http://localhost:8983/solr/techproducts/
 $ curl -X POST -H "Content-Type: text/xml" -d @standard_solr_xml_format.xml "http://localhost:8983/solr/techproducts/update/xslt?commit=true&tr=updateXml.xsl"
 ----
 
-NOTE: You can see the opposite export/import cycle using the `tr` parameter in <<response-writers.adoc#xslt-writer-example,Response Writer XSLT example>>.
+NOTE: You can see the opposite export/import cycle using the `tr` parameter in the xref:query-guide:response-writers.adoc#xslt-writer-example[Response Writer XSLT example].
 
 == JSON Formatted Index Updates
 
 Solr can accept JSON that conforms to a defined structure, or can accept arbitrary JSON-formatted documents.
-If sending arbitrarily formatted JSON, there are some additional parameters that need to be sent with the update request, described in the section <<transforming-and-indexing-custom-json.adoc#,Transforming and Indexing Custom JSON>>.
+If sending arbitrarily formatted JSON, there are some additional parameters that need to be sent with the update request, described in the section xref:transforming-and-indexing-custom-json.adoc[].
 
 === Solr-Style JSON
 
@@ -507,7 +505,7 @@ The `/update/json` path may be useful for clients sending in JSON formatted upda
 === Custom JSON Documents
 
 Solr can support custom JSON.
-This is covered in the section <<transforming-and-indexing-custom-json.adoc#,Transforming and Indexing Custom JSON>>.
+This is covered in the section xref:transforming-and-indexing-custom-json.adoc[].
 
 
 == CSV Formatted Index Updates
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/language-analysis.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/language-analysis.adoc
index 676add4..10ca240 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/language-analysis.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/language-analysis.adoc
@@ -25,7 +25,7 @@ Tokens are delimited by white space and/or a relatively small set of punctuation
 In other languages the tokenization rules are often not so simple.
 Some European languages may also require special tokenization rules, such as rules for decompounding German words.
 
-For information about language detection at index time, see <<language-detection.adoc#,Language Detection>>.
+For information about language detection at index time, see xref:language-detection.adoc[].
 
 == KeywordMarkerFilterFactory
 
@@ -33,7 +33,7 @@ Protects words from being modified by stemmers.
 A customized protected word list may be specified with the "protected" attribute in the schema.
 Any words in the protected word list will not be modified by any stemmer in Solr.
 
-A sample Solr `protwords.txt` with comments can be found in the `sample_techproducts_configs` <<config-sets.adoc#,configset>> directory:
+A sample Solr `protwords.txt` with comments can be found in the `sample_techproducts_configs` xref:configuration-guide:config-sets.adoc[configset] directory:
 
 [.dynamic-tabs]
 --
@@ -186,7 +186,7 @@ s|Required |Default: none
 The path of a file that contains a list of simple words, one per line.
 Blank lines and lines that begin with "`#`" are ignored.
 +
-See <<resource-loading.adoc#,Resource Loading>> for more information.
+See xref:configuration-guide:resource-loading.adoc[] for more information.
 
 `minWordSize`::
 +
@@ -269,7 +269,7 @@ Unicode Collation in Solr is fast, because all the work is done at index time.
 Rather than specifying an analyzer within `<fieldtype ... class="solr.TextField">`, the `solr.CollationField` and `solr.ICUCollationField` field type classes provide this functionality.
 `solr.ICUCollationField`, which is backed by http://site.icu-project.org[the ICU4J library], provides more flexible configuration, has more locales, is significantly faster, and requires less memory and less index space, since its keys are smaller than those produced by the JDK implementation that backs `solr.CollationField`.
 
-To use `solr.ICUCollationField`, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+To use `solr.ICUCollationField`, you must add additional .jars to Solr's classpath (as described in the section xref:configuration-guide:solr-plugins.adoc#installing-plugins[Installing Plugins]).
 See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
 
 `solr.ICUCollationField` and `solr.CollationField` fields can be created in two ways:
@@ -695,13 +695,13 @@ On the other hand, it can reduce precision because language-specific character d
 
 The `lucene/analysis/opennlp` module provides OpenNLP integration via several analysis components: a tokenizer, a part-of-speech tagging filter, a phrase chunking filter, and a lemmatization filter.
 In addition to these analysis components, Solr also provides an update request processor to extract named entities.
-See also <<update-request-processors.adoc#update-processor-factories-that-can-be-loaded-as-plugins,Update Processor Factories That Can Be Loaded as Plugins>>.
+See also xref:configuration-guide:update-request-processors.adoc#update-processor-factories-that-can-be-loaded-as-plugins[Update Processor Factories That Can Be Loaded as Plugins].
 
 NOTE: The <<OpenNLP Tokenizer>> must be used with all other OpenNLP analysis components, for two reasons.
 First, the OpenNLP Tokenizer detects and marks the sentence boundaries required by all the OpenNLP filters.
 Second, since the pre-trained OpenNLP models used by these filters were trained using the corresponding language-specific sentence-detection/tokenization models, the same tokenization using the same models must be used at runtime for optimal performance.
 
-To use the OpenNLP components, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+To use the OpenNLP components, you must add additional .jars to Solr's classpath (as described in the section xref:configuration-guide:solr-plugins.adoc#installing-plugins[Installing Plugins]).
 See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
 
 === OpenNLP Tokenizer
@@ -722,7 +722,7 @@ s|Required |Default: none
 |===
 +
 The path of a language-specific OpenNLP sentence detection model file.
-See <<resource-loading.adoc#,Resource Loading>> for more information.
+See xref:configuration-guide:resource-loading.adoc[] for more information.
 
 `tokenizerModel`::
 +
@@ -732,7 +732,7 @@ s|Required |Default: none
 |===
 +
 The path of a language-specific OpenNLP tokenization model file.
-See <<resource-loading.adoc#,Resource Loading>> for more information.
+See xref:configuration-guide:resource-loading.adoc[] for more information.
 
 *Example:*
 
@@ -783,11 +783,12 @@ s|Required |Default: none
 |===
 +
 The path of a language-specific OpenNLP POS tagger model file.
-See <<resource-loading.adoc#,Resource Loading>> for more information.
+See xref:configuration-guide:resource-loading.adoc[] for more information.
 
 *Examples:*
 
-The OpenNLP tokenizer will tokenize punctuation, which is useful for following token filters, but ordinarily you don't want to include punctuation in your index, so the `TypeTokenFilter` (<<filters.adoc#type-token-filter,described here>>) is included in the examples below, with `stop.pos.txt` containing the following:
+The OpenNLP tokenizer will tokenize punctuation, which is useful for following token filters.
+Ordinarily you don't want to include punctuation in your index, so the xref:filters.adoc#type-token-filter[`TypeTokenFilter`] is included in the examples below, with `stop.pos.txt` containing the following:
 
 .stop.pos.txt
 [source,text]
@@ -839,7 +840,7 @@ Index the POS for each token as a payload:
 ====
 --
 
-Index the POS for each token as a synonym, after prefixing the POS with "@" (see the <<filters.adoc#type-as-synonym-filter,TypeAsSynonymFilter description>>):
+Index the POS for each token as a synonym, after prefixing the POS with "@" (see the xref:filters.adoc#type-as-synonym-filter[TypeAsSynonymFilter description]):
 
 [source,xml]
 ----
@@ -888,7 +889,7 @@ s|Required |Default: none
 |===
 +
 The path of a language-specific OpenNLP phrase chunker model file.
-See <<resource-loading.adoc#,Resource Loading>> for more information.
+See xref:configuration-guide:resource-loading.adoc[] for more information.
 
 *Examples*:
 
@@ -928,7 +929,7 @@ Index the phrase chunk label for each token as a payload:
 ====
 --
 
-Index the phrase chunk label for each token as a synonym, after prefixing it with "#" (see the <<filters.adoc#type-as-synonym-filter,TypeAsSynonymFilter description>>):
+Index the phrase chunk label for each token as a synonym, after prefixing it with "#" (see the xref:filters.adoc#type-as-synonym-filter[TypeAsSynonymFilter description]):
 
 [source,xml]
 ----
@@ -963,7 +964,7 @@ Either `dictionary` or `lemmatizerModel` must be provided, and both may be provi
 |===
 +
 The path of a lemmatization dictionary file.
-See <<resource-loading.adoc#,Resource Loading>> for more information.
+See xref:configuration-guide:resource-loading.adoc[] for more information.
 The dictionary file must be encoded as UTF-8, with one entry per line, in the form `word[tab]lemma[tab]part-of-speech`, e.g., `wrote[tab]write[tab]VBD`.
 
 `lemmatizerModel`::
@@ -974,7 +975,7 @@ The dictionary file must be encoded as UTF-8, with one entry per line, in the fo
 |===
 +
 The path of a language-specific OpenNLP lemmatizer model file.
-See <<resource-loading.adoc#,Resource Loading>> for more information.
+See xref:configuration-guide:resource-loading.adoc[] for more information.
 
 *Examples:*
 
@@ -1324,12 +1325,12 @@ The stemmer language, `Catalan` in this case.
 
 === Traditional Chinese
 
-The default configuration of the <<tokenizers.adoc#icu-tokenizer,ICU Tokenizer>> is suitable for Traditional Chinese text.
+The default configuration of the xref:tokenizers.adoc#icu-tokenizer[ICU Tokenizer] is suitable for Traditional Chinese text.
 It follows the Word Break rules from the Unicode Text Segmentation algorithm for non-Chinese text, and uses a dictionary to segment Chinese words.
-To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section xref:configuration-guide:solr-plugins.adoc#installing-plugins[Installing Plugins]).
 See the `solr/contrib/analysis-extras/README.md` for information on which jars you need to add.
 
-<<tokenizers.adoc#standard-tokenizer,Standard Tokenizer>> can also be used to tokenize Traditional Chinese text.
+The xref:tokenizers.adoc#standard-tokenizer[Standard Tokenizer] can also be used to tokenize Traditional Chinese text.
 Following the Word Break rules from the Unicode Text Segmentation algorithm, it produces one token per Chinese character.
 When combined with <<CJK Bigram Filter>>, overlapping bigrams of Chinese characters are formed.
 
@@ -1377,7 +1378,7 @@ When combined with <<CJK Bigram Filter>>, overlapping bigrams of Chinese charact
 
 === CJK Bigram Filter
 
-Forms bigrams (overlapping 2-character sequences) of CJK characters that are generated from <<tokenizers.adoc#standard-tokenizer,Standard Tokenizer>> or <<tokenizers.adoc#icu-tokenizer,ICU Tokenizer>>.
+Forms bigrams (overlapping 2-character sequences) of CJK characters that are generated from the xref:tokenizers.adoc#standard-tokenizer[Standard Tokenizer] or the xref:tokenizers.adoc#icu-tokenizer[ICU Tokenizer].
 
 By default, all CJK characters produce bigrams, but finer grained control is available by specifying orthographic type arguments `han`, `hiragana`, `katakana`, and `hangul`.
 When set to `false`, characters of the corresponding type will be passed through as unigrams, and will not be included in any bigrams.
@@ -1440,12 +1441,12 @@ See the example under <<Traditional Chinese>>.
 
 For Simplified Chinese, Solr provides support for Chinese sentence and word segmentation with the <<HMM Chinese Tokenizer>>.
 This component includes a large dictionary and segments Chinese text into words with the Hidden Markov Model.
-To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section xref:configuration-guide:solr-plugins.adoc#installing-plugins[Installing Plugins]).
 See the `solr/contrib/analysis-extras/README.md` for information on which jars you need to add.
 
-The default configuration of the <<tokenizers.adoc#icu-tokenizer,ICU Tokenizer>> is also suitable for Simplified Chinese text.
+The default configuration of the xref:tokenizers.adoc#icu-tokenizer[ICU Tokenizer] is also suitable for Simplified Chinese text.
 It follows the Word Break rules from the Unicode Text Segmentation algorithm for non-Chinese text, and uses a dictionary to segment Chinese words.
-To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section xref:configuration-guide:solr-plugins.adoc#installing-plugins[Installing Plugins]).
 See the `solr/contrib/analysis-extras/README.md` for information on which jars you need to add.
 
 Also useful for Chinese analysis:
@@ -1503,7 +1504,7 @@ Also useful for Chinese analysis:
 
 For Simplified Chinese, Solr provides support for Chinese sentence and word segmentation with the `solr.HMMChineseTokenizerFactory` in the `analysis-extras` contrib module.
 This component includes a large dictionary and segments Chinese text into words with the Hidden Markov Model.
-To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <xref:configuration-guide:solr-plugins.adoc#installing-plugins[Installing Plugins]).
 See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
 
 *Factory class:* `solr.HMMChineseTokenizerFactory`
@@ -2346,7 +2347,7 @@ Removes terms with one of the configured parts-of-speech.
 |===
 +
 Filename for a list of parts-of-speech for which to remove terms.
-See `conf/lang/stoptags_ja.txt` in the `sample_techproducts_config` <<config-sets.adoc#,configset>> for an example.
+See `conf/lang/stoptags_ja.txt` in the `sample_techproducts_config` xref:configuration-guide:config-sets.adoc[configset] for an example.
 
 ==== Japanese Katakana Stem Filter
 
@@ -2550,10 +2551,10 @@ This filter replaces term text with the Reading Attribute, the Hangul transcript
 === Hebrew, Lao, Myanmar, Khmer
 
 Lucene provides support, in addition to UAX#29 word break rules, for Hebrew's use of the double and single quote characters, and for segmenting Lao, Myanmar, and Khmer into syllables with the `solr.ICUTokenizerFactory` in the `analysis-extras` contrib module.
-To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section xref:configuration-guide:solr-plugins.adoc#installing-plugins[Installing Plugins]).
 See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
 
-See <<tokenizers.adoc#icu-tokenizer,the ICUTokenizer>> for more information.
+See xref:tokenizers.adoc#icu-tokenizer[ICUTokenizer] for more information.
 
 === Latvian
 
@@ -2825,7 +2826,7 @@ Solr includes support for normalizing Persian, and Lucene includes an example st
 
 Solr provides support for Polish stemming with the `solr.StempelPolishStemFilterFactory`, and `solr.MorphologikFilterFactory` for lemmatization, in the `contrib/analysis-extras` module.
 The `solr.StempelPolishStemFilterFactory` component includes an algorithmic stemmer with tables for Polish.
-To use either of these filters, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+To use either of these filters, you must add additional .jars to Solr's classpath (as described in the section xref:configuration-guide:solr-plugins.adoc#installing-plugins[Installing Plugins]).
 See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
 
 *Factory class:* `solr.StempelPolishStemFilterFactory` and `solr.MorfologikFilterFactory`
@@ -3373,7 +3374,7 @@ Solr includes support for stemming Turkish with the `solr.SnowballPorterFilterFa
 === Ukrainian
 
 Solr provides support for Ukrainian lemmatization with the `solr.MorphologikFilterFactory`, in the `contrib/analysis-extras` module.
-To use this filter, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+To use this filter, you must add additional .jars to Solr's classpath (as described in the section xref:configuration-guide:solr-plugins.adoc#installing-plugins[Installing Plugins]).
 See `solr/contrib/analysis-extras/README.md` for instructions on which jars you need to add.
 
 Lucene also includes an example Ukrainian stopword list, in the `lucene-analyzers-morfologik` jar.
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/language-detection.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/language-detection.adoc
index 7318e83..68122c1 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/language-detection.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/language-detection.adoc
@@ -29,7 +29,7 @@ In general, the LangDetect implementation supports more languages with higher pe
 
 For specific information on each of these language identification implementations, including a list of supported languages for each, see the relevant project websites.
 
-For more information about language analysis in Solr, see <<language-analysis.adoc#,Language Analysis>>.
+For more information about language analysis in Solr, see xref:language-analysis.adoc[].
 
 == Configuring Language Detection
 
@@ -94,7 +94,7 @@ An OpenNLP language detection model.
 The OpenNLP project provides a pre-trained 103 language model on the http://opennlp.apache.org/models.html[OpenNLP site's model dowload page].
 Model training instructions are provided on the http://opennlp.apache.org/docs/{ivy-opennlp-version}/manual/opennlp.html#tools.langdetect[OpenNLP website].
 +
-See <<resource-loading.adoc#,Resource Loading>> for information on where to put the model.
+See xref:configuration-guide:resource-loading.adoc[] for information on where to put the model.
 
 ==== OpenNLP Language Codes
 
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/luke-request-handler.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/luke-request-handler.adoc
index 7d3734b..bb0987d 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/luke-request-handler.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/luke-request-handler.adoc
@@ -16,7 +16,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-The Luke Request Handler offers programmatic access to the information provided on the <<schema-browser-screen#schema-browser-screen,Schema Browser>> page of the Admin UI.
+The Luke Request Handler offers programmatic access to the information provided on the xref:schema-browser-screen.adoc[] page of the Admin UI.
 It is modeled after Luke, the Lucene Index Browser by Andrzej Bialecki.
 It is an implicit handler, so you don't need to define it in `solrconfig.xml`.
 
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/partial-document-updates.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/partial-document-updates.adoc
index 9e601f8..3aee891 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/partial-document-updates.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/partial-document-updates.adoc
@@ -139,13 +139,13 @@ In-place updates avoid that.
 .Routing Updates using child document Ids in SolrCloud
 
 When SolrCloud receives document updates, the
-<<solrcloud-shards-indexing.adoc#document-routing,document routing>> rules for the collection is used to determine which shard should process the update based on the `id` of the document.
+xref:deployment-guide:solrcloud-shards-indexing.adoc#document-routing[document routing] rules for the collection is used to determine which shard should process the update based on the `id` of the document.
 
 When sending an update that specifies the `id` of a _child document_ this will not work by default: the correct shard to send the document to is based on the `id` of the "Root" document for the block the child document is in, *not* the `id` of the child document being updated.
 
 Solr offers two solutions to address this:
 
-* Clients may specify a <<solrcloud-shards-indexing.adoc#document-routing,`\_route_` parameter>>, with the `id` of the Root document as the parameter value, on each update to tell Solr which shard should process the update.
+* Clients may specify a xref:deployment-guide:solrcloud-shards-indexing.adoc#document-routing[`\_route_` parameter], with the `id` of the Root document as the parameter value, on each update to tell Solr which shard should process the update.
 * Clients can use the (default) `compositeId` router's "prefix routing" feature when indexing all documents to ensure that all child/descendent documents in a Block use the same `id` prefix as the Root level document.
 This will cause Solr's default routing logic to automatically send child document updates to the correct shard.
 
@@ -159,13 +159,13 @@ equivalent, but it may be absent or not equivalent (e.g., when using the `implic
 All of the examples below use `id` prefixes, so no `\_route_` parameter will be necessary for these examples.
 ====
 
-For the upcoming examples, we'll assume an index containing the same documents covered in <<indexing-nested-documents#example-indexing-syntax,Indexing Nested Documents>>:
+For the upcoming examples, we'll assume an index containing the same documents covered in xref:indexing-nested-documents.adoc#example-indexing-syntax[Indexing Nested Documents]:
 
 include::indexing-nested-documents.adoc[tag=sample-indexing-deeply-nested-documents]
 
 ==== Modifying Child Document Fields
 
-All of the <<#atomic-updates,Atomic Update operations>> mentioned above are supported for "real" fields of Child Documents:
+All of the <<atomic-updates,Atomic Update operations>> mentioned above are supported for "real" fields of Child Documents:
 
 [source,bash]
 ----
@@ -345,7 +345,7 @@ Use the parameter `failOnVersionConflicts=false` to avoid failure of the entire
 If the document being updated does not include the `\_version_` field, and atomic updates are not being used, the document will be treated by normal Solr rules, which is usually to discard the previous version.
 
 When using Optimistic Concurrency, clients can include an optional `versions=true` request parameter to indicate that the _new_ versions of the documents being added should be included in the response.
-This allows clients to immediately know what the `\_version_` is of every document added without needing to make a redundant <<realtime-get.adoc#,`/get` request>>.
+This allows clients to immediately know what the `\_version_` is of every document added without needing to make a redundant xref:configuration-guide:realtime-get.adoc[`/get` request].
 
 Following are some examples using `versions=true` in queries:
 
@@ -487,7 +487,7 @@ Optimistic Concurrency is extremely powerful, and works very efficiently because
 However, in some situations users may want to configure their own document specific version field, where the version values are assigned on a per-document basis by an external system, and have Solr reject updates that attempt to replace a document with an "older" version.
 In situations like this the {solr-javadocs}/core/org/apache/solr/update/processor/DocBasedVersionConstraintsProcessorFactory.html[`DocBasedVersionConstraintsProcessorFactory`] can be useful.
 
-The basic usage of `DocBasedVersionConstraintsProcessorFactory` is to configure it in `solrconfig.xml` as part of the <<update-request-processors.adoc#update-request-processor-configuration,UpdateRequestProcessorChain>> and specify the name of your custom `versionField` in your schema that should be checked when validating updates:
+The basic usage of `DocBasedVersionConstraintsProcessorFactory` is to configure it in `solrconfig.xml` as part of the xref:configuration-guide:update-request-processors.adoc#update-request-processor-configuration[UpdateRequestProcessorChain] and specify the name of your custom `versionField` in your schema that should be checked when validating updates:
 
 [source,xml]
 ----
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/phonetic-matching.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/phonetic-matching.adoc
index 04ab0af..2f6fd13 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/phonetic-matching.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/phonetic-matching.adoc
@@ -22,7 +22,7 @@ For overviews of and comparisons between algorithms, see http://en.wikipedia.org
 
 == Beider-Morse Phonetic Matching (BMPM)
 
-For examples of how to use this encoding in your analyzer, see <<filters.adoc#beider-morse-filter,Beider Morse Filter>> in the Filter Descriptions section.
+For examples of how to use this encoding in your analyzer, see xref:filters.adoc#beider-morse-filter[Beider Morse Filter] in the Filter Descriptions section.
 
 Beider-Morse Phonetic Matching (BMPM) is a "soundalike" tool that lets you search using a new phonetic matching system.
 BMPM helps you search for personal names (or just surnames) in a Solr/Lucene index, and is far superior to the existing phonetic codecs, such as regular soundex, metaphone, caverphone, etc.
@@ -65,7 +65,7 @@ For more information, see here: http://stevemorse.org/phoneticinfo.htm and http:
 
 == Daitch-Mokotoff Soundex
 
-To use this encoding in your analyzer, see <<filters.adoc#daitch-mokotoff-soundex-filter,Daitch-Mokotoff Soundex Filter>> in the Filter Descriptions section.
+To use this encoding in your analyzer, see xref:filters.adoc#daitch-mokotoff-soundex-filter[Daitch-Mokotoff Soundex Filter] in the Filter Descriptions section.
 
 The Daitch-Mokotoff Soundex algorithm is a refinement of the Russel and American Soundex algorithms, yielding greater accuracy in matching especially Slavic and Yiddish surnames with similar pronunciation but differences in spelling.
 
@@ -82,15 +82,16 @@ For more information, see http://en.wikipedia.org/wiki/Daitch%E2%80%93Mokotoff_S
 
 == Double Metaphone
 
-To use this encoding in your analyzer, see <<filters.adoc#double-metaphone-filter,Double Metaphone Filter>> in the Filter Descriptions section.
-Alternatively, you may specify `encoder="DoubleMetaphone"` with the <<filters.adoc#phonetic-filter,Phonetic Filter>>, but note that the Phonetic Filter version will *not* provide the second ("alternate") encoding that is generated by the Double Metaphone Filter for some tokens.
+To use this encoding in your analyzer, see xref:filters.adoc#double-metaphone-filter[Double Metaphone Filter] in the Filter Descriptions section.
+
+Alternatively, you may specify `encoder="DoubleMetaphone"` with the xref:filters.adoc#phonetic-filter[Phonetic Filter], but note that the Phonetic Filter version will *not* provide the second ("alternate") encoding that is generated by the Double Metaphone Filter for some tokens.
 
 Encodes tokens using the double metaphone algorithm by Lawrence Philips.
 See the original article at http://www.drdobbs.com/the-double-metaphone-search-algorithm/184401251?pgno=2
 
 == Metaphone
 
-To use this encoding in your analyzer, specify `encoder="Metaphone"` with the <<filters.adoc#phonetic-filter,Phonetic Filter>>.
+To use this encoding in your analyzer, specify `encoder="Metaphone"` with the xref:filters.adoc#phonetic-filter[Phonetic Filter].
 
 Encodes tokens using the Metaphone algorithm by Lawrence Philips, described in "Hanging on the Metaphone" in Computer Language, Dec. 1990.
 
@@ -99,7 +100,7 @@ Another reference for more information is http://www.drdobbs.com/the-double-meta
 
 == Soundex
 
-To use this encoding in your analyzer, specify `encoder="Soundex"` with the <<filters.adoc#phonetic-filter,Phonetic Filter>>.
+To use this encoding in your analyzer, specify `encoder="Soundex"` with the xref:filters.adoc#phonetic-filter[Phonetic Filter].
 
 Encodes tokens using the Soundex algorithm, which is used to relate similar names, but can also be used as a general purpose scheme to find words with similar phonemes.
 
@@ -107,7 +108,7 @@ See also http://en.wikipedia.org/wiki/Soundex.
 
 == Refined Soundex
 
-To use this encoding in your analyzer, specify `encoder="RefinedSoundex"` with the <<filters.adoc#phonetic-filter,Phonetic Filter>>.
+To use this encoding in your analyzer, specify `encoder="RefinedSoundex"` with xref:filters.adoc#phonetic-filter[Phonetic Filter].
 
 Encodes tokens using an improved version of the Soundex algorithm.
 
@@ -115,7 +116,7 @@ See http://en.wikipedia.org/wiki/Soundex.
 
 == Caverphone
 
-To use this encoding in your analyzer, specify `encoder="Caverphone"` with the <<filters.adoc#phonetic-filter,Phonetic Filter>>.
+To use this encoding in your analyzer, specify `encoder="Caverphone"` with the xref:filters.adoc#phonetic-filter[Phonetic Filter].
 
 Caverphone is an algorithm created by the Caversham Project at the University of Otago.
 The algorithm is optimised for accents present in the southern part of the city of Dunedin, New Zealand.
@@ -124,7 +125,7 @@ See http://en.wikipedia.org/wiki/Caverphone and the Caverphone 2.0 specification
 
 == Kölner Phonetik a.k.a. Cologne Phonetic
 
-To use this encoding in your analyzer, specify `encoder="ColognePhonetic"` with the <<filters.adoc#phonetic-filter,Phonetic Filter>>.
+To use this encoding in your analyzer, specify `encoder="ColognePhonetic"` with the xref:filters.adoc#phonetic-filter[Phonetic Filter].
 
 The Kölner Phonetik, an algorithm published by Hans Joachim Postel in 1969, is optimized for the German language.
 
@@ -132,7 +133,7 @@ See http://de.wikipedia.org/wiki/K%C3%B6lner_Phonetik
 
 == NYSIIS
 
-To use this encoding in your analyzer, specify `encoder="Nysiis"` with the <<filters.adoc#phonetic-filter,Phonetic Filter>>.
+To use this encoding in your analyzer, specify `encoder="Nysiis"` with the xref:filters.adoc#phonetic-filter[Phonetic Filter].
 
 NYSIIS is an encoding used to relate similar names, but can also be used as a general purpose scheme to find words with similar phonemes.
 
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/post-tool.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/post-tool.adoc
index d02facb..f2d303b 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/post-tool.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/post-tool.adoc
@@ -121,7 +121,7 @@ bin/post -c signals -params "separator=%09" -type text/csv data.tsv
 ----
 
 The content type (`-type`) parameter is required to treat the file as the proper type, otherwise it will be ignored and a WARNING logged as it does not know what type of content a .tsv file is.
-The <<indexing-with-update-handlers.adoc#csv-formatted-index-updates,CSV handler>> supports the `separator` parameter, and is passed through using the `-params` setting.
+The xref:indexing-with-update-handlers.adoc#csv-formatted-index-updates[CSV handler] supports the `separator` parameter, and is passed through using the `-params` setting.
 
 === Indexing JSON
 
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/reindexing.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/reindexing.adoc
index 070d50c..7b0bf11 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/reindexing.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/reindexing.adoc
@@ -27,7 +27,7 @@ It is strongly recommended that Solr users have a consistent, repeatable process
 
 [CAUTION]
 ====
-Re-ingesting all the documents in your corpus without first insuring that all documents and Lucene segments have been deleted is *not* sufficient, see the section <<reindexing.adoc#reindexing-strategies,Reindexing Strategies>>.
+Re-ingesting all the documents in your corpus without first insuring that all documents and Lucene segments have been deleted is *not* sufficient, see the section <<Reindexing Strategies>>.
 ====
 
 Reindexing is recommended during major upgrades, so in addition to covering what types of configuration changes should trigger a reindex, this section will also cover strategies for reindexing.
@@ -57,7 +57,7 @@ This type of change is usually only made during or because of a major upgrade.
 When you change your schema by adding fields, removing fields, or changing the field or field type definitions you generally do so with the intent that those changes alter how documents are searched.
 The full effects of those changes are not reflected in the corpus as a whole until all documents are reindexed.
 
-Changes to *any* field/field type property described in <<field-type-definitions-and-properties.adoc#field-type-properties,Field Type Properties>> must be reindexed in order for the change to be reflected in _all_ documents.
+Changes to *any* field/field type property described in xref:field-type-definitions-and-properties.adoc#field-type-properties[Field Type Properties] must be reindexed in order for the change to be reflected in _all_ documents.
 
 [CAUTION]
 ====
@@ -69,7 +69,7 @@ Negative impacts on the user may not be immediately apparent.
 
 ==== Changing Field Analysis
 
-Beyond specific field-level properties, <<analyzers.adoc#,analysis chains>> are also configured on field types, and are applied at index and query time.
+Beyond specific field-level properties, xref:analyzers.adoc[analysis chains] are also configured on field types, and are applied at index and query time.
 
 If separate analysis chains are defined for query and indexing events for a field and you change _only_ the query-time analysis chain, reindexing is not necessary.
 
@@ -80,11 +80,11 @@ Identifying changes to solrconfig.xml that alter how data is ingested and thus r
 The general rule is "anything that changes what gets stored in the index requires reindexing".
 Here are several known examples.
 
-The parameter `luceneMatchVersion` in solrconfig.xml controls the compatibility of Solr with Lucene.
+The parameter `luceneMatchVersion` in `solrconfig.xml` controls the compatibility of Solr with Lucene.
 Since this parameter can change the rules for analysis behind the scenes, it's always recommended to reindex when changing it.
 Usually this is only changed in conjunction with a major upgrade.
 
-If you make a change to Solr's <<update-request-processors.adoc#,Update Request Processors>>, it's generally because you want to change something about how _update requests_ (documents) are _processed_ (indexed).
+If you make a change to Solr's xref:configuration-guide:update-request-processors.adoc[], it's generally because you want to change something about how _update requests_ (documents) are _processed_ (indexed).
 In this case, we recommend that you reindex your documents to implement the changes you've made just as if you had changed the schema.
 
 Similarly, if you change the `codecFactory` parameter in `solrconfig.xml`, it is again strongly recommended that you
@@ -139,7 +139,7 @@ It's important to verify that *all* documents have been deleted, as that ensures
 deleted as well.
 
 To verify that there are no segments in your index, look in the data/index directory and confirm it has no segments files.
-Since the data directory can be customized, see the section <<index-location-format.adoc#specifying-a-location-for-index-data-with-the-datadir-parameter,Specifying a Location for Index Data with the dataDir Parameter>> for the location of your index files.
+Since the data directory can be customized, see the section xref:configuration-guide:index-location-format.adoc#specifying-a-location-for-index-data-with-the-datadir-parameter[Specifying a Location for Index Data with the dataDir Parameter] for the location of your index files.
 
 Note you will need to verify the indexes have been removed in every shard and every replica on every node of a cluster.
 It is not sufficient to only query for the number of documents because you may have no documents but still have index
@@ -152,14 +152,14 @@ A variation on this approch is to delete and recreate your collection using the
 
 === Index to Another Collection
 
-Another approach is to use index to a new collection and use Solr's <<alias-management.adoc#createalias,collection alias>> feature to seamlessly point the application to a new collection without downtime.
+Another approach is to use index to a new collection and use Solr's xref:deployment-guide:alias-management.adoc#createalias[collection alias] feature to seamlessly point the application to a new collection without downtime.
 
 This option is only available for Solr installations running in SolrCloud mode.
 
 With this approach, you will index your documents into a new collection that uses your changes and, once indexing and testing are complete, create an alias that points your front-end at the new collection.
 From that point, new queries and updates will be routed to the new collection seamlessly.
 
-Once the alias is in place and you are satisfied you no longer need the old data, you can delete the old collection with the Collections API <<collection-management.adoc#delete,DELETE command>>.
+Once the alias is in place and you are satisfied you no longer need the old data, you can delete the old collection with the Collections API xref:deployment-guide:collection-management.adoc#delete[DELETE command].
 
 [NOTE]
 One advantage of this option is that if you you can switch back to the old collection if you discover problems our testing did not uncover.
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/schema-api.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/schema-api.adoc
index b73f082..c58ec64 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/schema-api.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/schema-api.adoc
@@ -24,7 +24,7 @@ Fields, dynamic fields, field types and copyField rules may be added, removed or
 Future Solr releases will extend write access to allow more schema elements to be modified.
 
 The Schema API utilizes the `ManagedIndexSchemaFactory` class, which is the default schema factory in modern Solr versions.
-See the section <<schema-factory.adoc#,Schema Factory Definition in SolrConfig>> for more information about choosing a schema factory for your index.
+See the section xref:configuration-guide:schema-factory.adoc[] for more information about choosing a schema factory for your index.
 
 .Hand editing of the managed schema is discouraged
 [NOTE]
@@ -55,7 +55,7 @@ You must reindex documents in order to apply schema changes to them.
 Queries and updates made after the change may encounter errors that were not present before the change.
 Completely deleting the index and rebuilding it is usually the only option to fix such errors.
 
-See the section <<reindexing.adoc#,Reindexing>> for more information about reindexing.
+See the section xref:reindexing.adoc[] for more information about reindexing.
 ====
 
 All of the examples in this section assume you are running the "techproducts" Solr example:
@@ -96,7 +96,7 @@ The `add-field` command adds a new field definition to your schema.
 If a field with the same name exists an error is thrown.
 
 All of the properties available when defining a field with manual schema edits can be passed via the API.
-These request attributes are described in detail in the section <<fields.adoc#,Fields>>.
+These request attributes are described in detail in the section xref:fields.adoc[].
 
 For example, to define a new stored field named "sell_by", of type "pdate", you would POST the following request:
 
@@ -170,7 +170,7 @@ Note that you must supply the full definition for a field - this command will *n
 If the field does not exist in the schema an error is thrown.
 
 All of the properties available when defining a field with manual schema edits can be passed via the API.
-These request attributes are described in detail in the section <<fields.adoc#,Fields>>.
+These request attributes are described in detail in the section xref:fields.adoc[].
 
 For example, to replace the definition of an existing field "sell_by", to make it be of type "date" and to not be stored, you would POST the following request:
 
@@ -210,7 +210,7 @@ curl -X POST -H 'Content-type:application/json' --data-binary '{
 The `add-dynamic-field` command adds a new dynamic field rule to your schema.
 
 All of the properties available when editing the schema can be passed with the POST request.
-The section <<dynamic-fields.adoc#,Dynamic Fields>> has details on all of the attributes that can be defined for a dynamic field rule.
+The section xref:dynamic-fields.adoc[] has details on all of the attributes that can be defined for a dynamic field rule.
 
 For example, to create a new dynamic field rule where all incoming fields ending with "_s" would be stored and have field type "string", you can POST a request like this:
 
@@ -284,7 +284,7 @@ Note that you must supply the full definition for a dynamic field rule - this co
 If the dynamic field rule does not exist in the schema an error is thrown.
 
 All of the properties available when editing the schema can be passed with the POST request.
-The section <<dynamic-fields.adoc#,Dynamic Fields>> has details on all of the attributes that can be defined for a dynamic field rule.
+The section xref:dynamic-fields.adoc[] has details on all of the attributes that can be defined for a dynamic field rule.
 
 For example, to replace the definition of the "*_s" dynamic field rule with one where the field type is "text_general" and it's not stored, you can POST a request like this:
 
@@ -325,7 +325,7 @@ The `add-field-type` command adds a new field type to your schema.
 
 All of the field type properties available when editing the schema by hand are available for use in a POST request.
 The structure of the command is a JSON mapping of the standard field type definition, including the name, class, index and query analyzer definitions, etc.
-Details of all of the available options are described in the section <<field-types.adoc#,Field Types>>.
+Details of all of the available options are described in the section xref:field-type-definitions-and-properties.adoc[].
 
 For example, to create a new field type named "myNewTxtField", you can POST a request as follows:
 
@@ -444,7 +444,7 @@ If the field type does not exist in the schema an error is thrown.
 
 All of the field type properties available when editing the schema by hand are available for use in a POST request.
 The structure of the command is a JSON mapping of the standard field type definition, including the name, class, index and query analyzer definitions, etc.
-Details of all of the available options are described in the section <<field-types.adoc#,Field Types>>.
+Details of all of the available options are described in the section xref:field-type-definitions-and-properties.adoc[].
 
 For example, to replace the definition of a field type named "myNewTxtField", you can make a POST request as follows:
 
@@ -517,7 +517,7 @@ A field or an array of fields to which the source field will be copied.
 |===
 +
 The upper limit for the number of characters to be copied.
-The section <<copy-fields.adoc#,Copy Fields>> has more details.
+The section xref:copy-fields.adoc[] has more details.
 
 For example, to define a rule to copy the field "shelf" to the "location" and "catchall" fields, you would POST the following request:
 
@@ -944,7 +944,7 @@ The output will include each field and any defined configuration for each field.
 The defined configuration can vary for each field, but will minimally include the field `name`, the `type`, if it is `indexed` and if it is `stored`.
 
 If `multiValued` is defined as either true or false (most likely true), that will also be shown.
-See the section <<fields.adoc#,Fields>> for more information about each parameter.
+See the section xref:fields.adoc[] for more information about each parameter.
 
 ==== List Fields Examples
 
@@ -1060,7 +1060,7 @@ If `false`, only explicitly specified field properties will be included.
 
 The output will include each dynamic field rule and the defined configuration for each rule.
 The defined configuration can vary for each rule, but will minimally include the dynamic field `name`, the `type`, if it is `indexed` and if it is `stored`.
-See the section <<dynamic-fields.adoc#,Dynamic Fields>> for more information about each parameter.
+See the section xref:dynamic-fields.adoc[] for more information about each parameter.
 
 ==== List Dynamic Field Examples
 
@@ -1189,7 +1189,7 @@ If `false`, only explicitly specified field properties will be included.
 The output will include each field type and any defined configuration for the type.
 The defined configuration can vary for each type, but will minimally include the field type `name` and the `class`.
 If query or index analyzers, tokenizers, or filters are defined, those will also be shown with other defined parameters.
-See the section <<field-types.adoc#,Field Types>> for more information about how to configure various types of fields.
+See the section xref:field-type-definitions-and-properties.adoc[] for more information about how to configure various types of fields.
 
 ==== List Field Type Examples
 
@@ -1312,7 +1312,7 @@ If not specified, all copyField-s will be included in the response.
 ==== List Copy Field Response
 
 The output will include the `source` and `dest` (destination) of each copy field rule defined in `schema.xml`.
-For more information about copy fields, see the section <<copy-fields.adoc#,Copy Fields>>.
+For more information about copy fields, see the section xref:copy-fields.adoc[].
 
 ==== List Copy Field Examples
 
@@ -1649,6 +1649,5 @@ curl -X GET "http://localhost:8983/api/collections/techproducts/schema/similarit
 
 == Manage Resource Data
 
-The <<managed-resources.adoc#,Managed Resources>> REST API provides a mechanism for any Solr plugin to expose resources that should support CRUD (Create, Read, Update, Delete) operations.
+The xref:configuration-guide:managed-resources.adoc[] REST API provides a mechanism for any Solr plugin to expose resources that should support CRUD (Create, Read, Update, Delete) operations.
 Depending on which field types and analyzers are configured in your Schema, additional `/schema/` REST API paths may exist.
-See the <<managed-resources.adoc#,Managed Resources>> section for more information and examples.
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/schema-browser-screen.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/schema-browser-screen.adoc
index 723af74..1502eb1 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/schema-browser-screen.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/schema-browser-screen.adoc
@@ -24,7 +24,7 @@ If there is nothing chosen, use the pull-down menu to choose the field or field
 .Schema Browser Screen
 image::schema-browser-screen/schema_browser_terms.png[image,height=400]
 
-The screen provides a great deal of useful information about each particular field and fieldtype in the Schema, and provides a quick UI for adding fields or fieldtypes using the <<schema-api.adoc#,Schema API>> (if enabled).
+The screen provides a great deal of useful information about each particular field and fieldtype in the Schema, and provides a quick UI for adding fields or fieldtypes using the xref:schema-api.adoc[] (if enabled).
 In the example above, we have chosen the `cat` field.
 On the left side of the main view window, we see the field name, that it is copied to the `\_text_` (because of a copyField rule) and that it uses the `strings` fieldtype.
 Click on one of those field or fieldtype names, and you can see the corresponding definitions.
@@ -32,18 +32,18 @@ Click on one of those field or fieldtype names, and you can see the correspondin
 In the right part of the main view, we see the specific properties of how the `cat` field is defined – either explicitly or implicitly via its fieldtype, as well as how many documents have populated this field.
 Then we see the analyzer used for indexing and query processing.
 Click the icon to the left of either of those, and you'll see the definitions for the tokenizers and/or filters that are used.
-The output of these processes is the information you see when testing how content is handled for a particular field with the <<analysis-screen.adoc#,Analysis Screen>>.
+The output of these processes is the information you see when testing how content is handled for a particular field with the xref:analysis-screen.adoc[].
 
 Under the analyzer information is a button to *Load Term Info*.
 Clicking that button will show the top _N_ terms that are in a sample shard for that field, as well as a histogram showing the number of terms with various frequencies.
-Click on a term, and you will be taken to the <<query-screen.adoc#,Query Screen>> to see the results of a query of that term in that field.
+Click on a term, and you will be taken to the xref:query-guide:query-screen.adoc[] to see the results of a query of that term in that field.
 If you want to always see the term information for a field, choose *Autoload* and it will always appear when there are terms for a field.
 A histogram shows the number of terms with a given frequency in the field.
 
 [IMPORTANT]
 ====
 Term Information is loaded from single arbitrarily selected core from the collection, to provide a representative sample for the collection.
-Full <<faceting.adoc#,Field Facet>> query results are needed to see precise term counts across the entire collection.
+Full xref:query-guide:faceting.adoc[Field Facet] query results are needed to see precise term counts across the entire collection.
 ====
 
-For programmatic access to the underlying information in this screen please reference the <<luke-request-handler.adoc#,Luke Request Handler>>
+For programmatic access to the underlying information in this screen please reference the xref:luke-request-handler.adoc[].
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/schema-designer.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/schema-designer.adoc
index 13576fa..06c98f3 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/schema-designer.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/schema-designer.adoc
@@ -20,7 +20,7 @@
 The Schema Designer screen lets you interactively design a new schema using sample data.
 
 .Schema Designer screen
-image::solr-admin-ui/schema-designer.png[image]
+image::getting-started:solr-admin-ui/schema-designer.png[image]
 
 There are a number of panels on the Schema Designer screen to provide immediate feedback when you make changes to the schema, including:
 
@@ -35,10 +35,10 @@ You can safely experiment with changes and see the impact on query results immed
 Once data is indexed using a published schema, there are severe restrictions on the type of changes you can make to the schema without needing a full re-index.
 When designing a new schema, the Schema Designer re-indexes your sample data automatically when you make changes. However, the designer does not re-index data in collections using a published schema.
 
-.Security Requirements
+.Authorization Requirements
 [NOTE]
 ====
-If the <<rule-based-authorization-plugin.adoc#,Rule-based Authorization Plugin>> is enabled for your Solr installation, then users need to have the `config-edit` and `config-read` permissions to use the Schema Designer.
+If the xref:deployment-guide:rule-based-authorization-plugin.adoc[] is enabled for your Solr installation, then users need to have the `config-edit` and `config-read` permissions to use the Schema Designer.
 ====
 
 == Getting Started
@@ -63,7 +63,7 @@ Click on the btn:[Analyze Documents] button to submit the sample documents to th
 
 === Temporary Configset and Collection
 
-Behind the scenes, the Schema Designer API creates a temporary <<config-sets.adoc#,Configset>> (schema + solrconfig.xml + supporting files) in Zookeeper.
+Behind the scenes, the Schema Designer API creates a temporary xref:configuration-guide:config-sets.adoc[] (schema + `solrconfig.xml` + supporting files) in Zookeeper.
 In addition, the Schema Designer API creates a temporary collection with a single shard and replica to hold sample documents.
 These temporary resources are persisted to disk and exist until the schema is published or manually deleted using the Schema Designer API cleanup endpoint (`/api/schema-designer/cleanup`).
 
@@ -116,14 +116,14 @@ After sending the sample documents to the Schema Designer backend, you can open
 [NOTE]
 ====
 The Schema Designer API is primarily intended to support an interactive experience in the UI vs. being used programmatically by developers.
-To create and manage Configsets and Schemas programmatically, see the <<configsets-api.adoc#,Configset>> and <<schema-api.adoc#,Schema>> APIs.
+To create and manage Configsets and Schemas programmatically, see the sections xref:configuration-guide:configsets-api.adoc[] and xref:schema-api.adoc[].
 ====
 
 == Schema Editor
 
 After analyzing your sample documents, the Schema Designer loads the schema in the *Schema Editor* in the middle panel.
 The editor renders the schema as a tree component composed of Fields, Dynamic Fields, Field Types, and Files.
-For more information about schema objects, see <<fields-and-schema-design.adoc#,Fields and Schema Design>>.
+For more information about schema objects, see xref:fields.adoc[].
 
 image::schema-designer/schema-editor-root.png[image,width=700]
 
@@ -137,10 +137,10 @@ Consequently, the Schema Designer focuses primarily on the schema aspects of a C
 
 When you click on the root node of the Schema Editor tree, you can refine top-level schema properties, including:
 
-* Languages: The `_default` schema includes text fields for a number of common languages. You can include all text analyzers in your schema or select a subset based on the languages your search application needs to support. The designer will remove all the unnecessary field types for languages you don't need. For more information about text analysis and languages, see: <<language-analysis.adoc#,Language Analysis>>
-* Dynamic fields allow Solr to index fields that you did not explicitly define in your schema. Dynamic fields can make your application less brittle by providing some flexibility in the documents you can add to Solr. It is recommended to keep the default set of dynamic fields enabled for your schema. Unchecking this option removes all dynamic fields from your schema. For more information about dynamic fields, see: <<dynamic-fields.adoc#,Dynamic Fields>>
-* Field guessing (aka "schemaless mode") allows Solr to detect the "best" field type for unknown fields encountered during indexing. Field guessing also performs some field transformations, such as removing spaces from field names. If you use the schema designer to create your schema based on sample documents, you may not need to enable this feature. However, with this feature disabled, you need to make sure the incoming data matches the schema exactly or indexing errors may occur. For m [...]
-* Enabling this feature adds the `_root_` and `_nest_path_` fields to your schema. For more information about indexing nested child documents, see: <<indexing-nested-documents.adoc#,Indexing Nested Documents>>
+* Languages: The `_default` schema includes text fields for a number of common languages. You can include all text analyzers in your schema or select a subset based on the languages your search application needs to support. The designer will remove all the unnecessary field types for languages you don't need. For more information about text analysis and languages, see xref:language-analysis.adoc[].
+* Dynamic fields allow Solr to index fields that you did not explicitly define in your schema. Dynamic fields can make your application less brittle by providing some flexibility in the documents you can add to Solr. It is recommended to keep the default set of dynamic fields enabled for your schema. Unchecking this option removes all dynamic fields from your schema. For more information about dynamic fields, see xref:dynamic-fields.adoc[].
+* Field guessing (aka "schemaless mode") allows Solr to detect the "best" field type for unknown fields encountered during indexing. Field guessing also performs some field transformations, such as removing spaces from field names. If you use the schema designer to create your schema based on sample documents, you may not need to enable this feature. However, with this feature disabled, you need to make sure the incoming data matches the schema exactly or indexing errors may occur. For m [...]
+* Enabling this feature adds the `_root_` and `_nest_path_` fields to your schema. For more information about indexing nested child documents, see xref:indexing-nested-documents.adoc[].
 
 Only make changes to these top-level schema properties when you fully understand how they impact the behavior of your search application.
 When first starting out, you can leave the default settings and focus your attention on the fields and field types in the schema.
@@ -148,7 +148,7 @@ When first starting out, you can leave the default settings and focus your atten
 === Schema Fields
 
 Click on the *Fields* node in the editor tree to see an overview of the fields in your schema,
-along with the <<field-type-definitions-and-properties.adoc#,properties>> that govern how the field will be indexed by Solr.
+along with the xref:field-type-definitions-and-properties.adoc[properties] that govern how the field will be indexed by Solr.
 
 image::schema-designer/schema-editor-fields.png[image,width=750]
 
@@ -190,13 +190,14 @@ When you select a text-based field in the tree, the *Text Analysis* panel shows
 
 image::schema-designer/text-analysis.png[image,width=600]
 
-If you need to change the text analysis strategy for a field, you need to edit the Field Type. For more information about text analysis, see: <<analyzers.adoc#,Analyzers>>.
+If you need to change the text analysis strategy for a field, you need to edit the Field Type. For more information about text analysis, see xref:analyzers.adoc[].
 
 == Query Tester
 
 The *Query Tester* panel lets you experiment with queries executed against your sample document set using the current schema.
+
 Using the Query Tester, you can see how changes to the schema impact the behavior of queries, such as matching, sorting, faceting, and highlighting.
-The Query Tester form is not intended to demonstrate all possible <<query-guide.adoc#,query features>> available in Solr.
+The Query Tester form is not intended to demonstrate all possible xref:query-guide.adoc[query features] available in Solr.
 
 image::schema-designer/query-tester.png[image]
 
@@ -233,4 +234,4 @@ and does not prevent someone from changing the schema using the Schema API direc
 Once the publish action completes, the temporary Configset and collection are deleted and the Schema Designer UI resets back to a fresh state.
 
 Alternatively, instead of publishing to Zookeeper, you can also download the Configset to a zip file containing the schema, solrconfig.xml, and supporting files.
-The zip file can be uploaded to other Solr instances using the <<configsets-api.adoc#,Configset API>> or saved in version control.
+The zip file can be uploaded to other Solr instances using the xref:configuration-guide:configsets-api.adoc[] or saved in version control.
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/schema-elements.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/schema-elements.adoc
index 6e19834..abb1b74 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/schema-elements.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/schema-elements.adoc
@@ -21,12 +21,12 @@ Solr stores details about the field types and fields it is expected to understan
 == Solr's Schema File
 The name and location of Solr's schema file may vary depending on how you initially configured Solr or if you modified it later.
 
-* `managed-schema.xml` is the name for the schema file Solr uses by default to support making schema changes at runtime via the <<schema-api.adoc#,Schema API>>, or <<schemaless-mode.adoc#,Schemaless Mode>> features.
+* `managed-schema.xml` is the name for the schema file Solr uses by default to support making schema changes at runtime via the xref:schema-api.adoc[], or xref:schemaless-mode.adoc[] features.
 +
-You may <<schema-factory.adoc#,explicitly configure the managed schema features>> to use an alternative filename if you choose, but the contents of the files are still updated automatically by Solr.
-* `schema.xml` is the traditional name for a schema file which can be edited manually by users who use the <<schema-factory.adoc#,`ClassicIndexSchemaFactory`>>.
+You may xref:configuration-guide:schema-factory.adoc[explicitly configure] the managed schema features to use an alternative filename if you choose, but the contents of the files are still updated automatically by Solr.
+* `schema.xml` is the traditional name for a schema file which can be edited manually by users who use the xref:configuration-guide:schema-factory.adoc#classicindexschemafactory[`ClassicIndexSchemaFactory`].
 * If you are using SolrCloud you may not be able to find any file by these names on the local filesystem.
-You will only be able to see the schema through the Schema API (if enabled) or through the Solr Admin UI's <<cloud-screens.adoc#,Cloud Screens>>.
+You will only be able to see the schema through the Schema API (if enabled) or through the Solr Admin UI's xref:deployment-guide:cloud-screens.adoc[].
 
 Whichever name of the file in use in your installation, the structure of the file is not changed.
 However, the way you interact with the file will change.
@@ -34,7 +34,7 @@ If you are using the managed schema, it is expected that you only interact with
 If you do not use the managed schema, you will only be able to make manual edits to the file, the Schema API will not support any modifications.
 
 Note that if you are not using the Schema API yet you do use SolrCloud, you will need to interact with the schema file through ZooKeeper using `upconfig` and `downconfig` commands to make a local copy and upload your changes.
-The options for doing this are described in <<solr-control-script-reference.adoc#,Solr Control Script Reference>> and <<zookeeper-file-management.adoc#,ZooKeeper File Management>>.
+The options for doing this are described in xref:deployment-guide:solr-control-script-reference.adoc[] and xref:deployment-guide:zookeeper-file-management.adoc[].
 
 == Structure of the Schema File
 
@@ -55,13 +55,13 @@ This example is not real XML, but shows the primary elements that make up a sche
 ----
 
 The most commonly defined elements are `types` and `fields`, where the field types and the actual fields are configured.
-The sections <<field-types.adoc#,Field Types>>, and <<fields.adoc#,Fields>> describe how to configure these for your schema.
+The sections xref:field-type-definitions-and-properties.adoc[], and xref:fields.adoc[] describe how to configure these for your schema.
 
-These are supplemented by `copyFields`, described in <<copy-fields.adoc#,Copy Fields>>, and `dynamicFields`, described in <<dynamic-fields.adoc#,Dynamic Fields>>.
+These are supplemented by `copyFields`, described in xref:copy-fields.adoc[], and `dynamicFields`, described in xref:dynamic-fields.adoc[].
 
 The `uniqueKey` described in <<Unique Key>> below must always be defined.
 
-A default `similarity` will be used, but can be modifed as described in the section <<Similarity>> below.
+A default `similarity` will be used, but can be modified as described in the section <<Similarity>> below.
 
 .Types and fields are optional tags
 [NOTE]
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/schemaless-mode.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/schemaless-mode.adoc
index 311b677..5e0884e 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/schemaless-mode.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/schemaless-mode.adoc
@@ -21,13 +21,13 @@ Schemaless Mode is a set of Solr features that, when used together, allow users
 These Solr features, all controlled via `solrconfig.xml`, are:
 
 . Managed schema: Schema modifications are made at runtime through Solr APIs, which requires the use of a `schemaFactory` that supports these changes.
-See the section <<schema-factory.adoc#,Schema Factory Definition in SolrConfig>> for more details.
+See the section xref:configuration-guide:schema-factory.adoc[] for more details.
 . Field value class guessing: Previously unseen fields are run through a cascading set of value-based parsers, which guess the Java class of field values - parsers for Boolean, Integer, Long, Float, Double, and Date are currently available.
-. Automatic schema field addition, based on field value class(es): Previously unseen fields are added to the schema, based on field value Java classes, which are mapped to schema field types - see <<field-types.adoc#,Field Types>>.
+. Automatic schema field addition, based on field value class(es): Previously unseen fields are added to the schema, based on field value Java classes, which are mapped to schema field types - see xref:field-type-definitions-and-properties.adoc[].
 
 == Using the Schemaless Example
 
-The three features of schemaless mode are pre-configured in the `_default` <<config-sets.adoc#,configset>> in the Solr distribution.
+The three features of schemaless mode are pre-configured in the `_default` xref:configuration-guide:config-sets.adoc[configset] in the Solr distribution.
 To start an example instance of Solr using these configs, run the following command:
 
 [source,bash]
@@ -37,7 +37,7 @@ bin/solr start -e schemaless
 
 This will launch a single Solr server, and automatically create a collection (named "```gettingstarted```") that contains only three fields in the initial schema: `id`, `\_version_`, and `\_text_`.
 
-You can use the `/schema/fields` <<schema-api.adoc#,Schema API>> to confirm this: `curl \http://localhost:8983/solr/gettingstarted/schema/fields` will output:
+You can use the `/schema/fields` xref:schema-api.adoc[] to confirm this: `curl \http://localhost:8983/solr/gettingstarted/schema/fields` will output:
 
 [source,json]
 ----
@@ -74,9 +74,9 @@ If, however, you would like to implement schemaless on your own, you should make
 
 === Enable Managed Schema
 
-As described in the section <<schema-factory.adoc#,Schema Factory Definition in SolrConfig>>, Managed Schema support is enabled by default, unless your configuration specifies that `ClassicIndexSchemaFactory` should be used.
+As described in the section xref:configuration-guide:schema-factory.adoc[], Managed Schema support is enabled by default, unless your configuration specifies that `ClassicIndexSchemaFactory` should be used.
 
-You can configure the `ManagedIndexSchemaFactory` (and control the resource file used, or disable future modifications) by adding an explicit `<schemaFactory/>` like the one below, please see <<schema-factory.adoc#,Schema Factory Definition in SolrConfig>> for more details on the options available.
+You can configure the `ManagedIndexSchemaFactory` (and control the resource file used, or disable future modifications) by adding an explicit `<schemaFactory/>` like the one below, please see xref:configuration-guide:schema-factory.adoc[] for more details on the options available.
 
 [source,xml]
 ----
@@ -88,7 +88,7 @@ You can configure the `ManagedIndexSchemaFactory` (and control the resource file
 
 === Enable Field Class Guessing
 
-In Solr, an <<update-request-processors.adoc#,UpdateRequestProcessorChain>> defines a chain of plugins that are applied to documents before or while they are indexed.
+In Solr, an xref:configuration-guide:update-request-processors.adoc[UpdateRequestProcessorChain] defines a chain of plugins that are applied to documents before or while they are indexed.
 
 The field guessing aspect of Solr's schemaless mode uses a specially-defined UpdateRequestProcessorChain that allows Solr to guess field types.
 You can also define the default field type classes to use.
@@ -203,7 +203,7 @@ Once the UpdateRequestProcessorChain has been defined, you must instruct your Up
 There are two ways to do this.
 The update chain shown above has a `default=true` attribute which will use it for any update handler.
 
-An alternative, more explicit way is to use <<initparams.adoc#,InitParams>> to set the defaults on all `/update` request handlers:
+An alternative, more explicit way is to use xref:configuration-guide:initparams.adoc[] to set the defaults on all `/update` request handlers:
 
 [source,xml]
 ----
@@ -219,7 +219,7 @@ IMPORTANT: After all of these changes have been made, Solr should be restarted o
 === Disabling Automatic Field Guessing
 
 Automatic field creation can be disabled with the `update.autoCreateFields` property.
-To do this, you can use <<solr-control-script-reference.adoc#set-or-unset-configuration-properties,`bin/solr config`>> with a command such as:
+To do this, you can use xref:deployment-guide:solr-control-script-reference.adoc#set-or-unset-configuration-properties[`bin/solr config`] with a command such as:
 
 [source,bash]
 bin/solr config -c mycollection -p 8983 -action set-user-property -property update.autoCreateFields -value false
@@ -304,9 +304,9 @@ In addition string versions of the text fields are indexed, using copyFields to
 .You Can Still Be Explicit
 [TIP]
 ====
-Even if you want to use schemaless mode for most fields, you can still use the <<schema-api.adoc#,Schema API>> to pre-emptively create some fields, with explicit types, before you index documents that use them.
+Even if you want to use schemaless mode for most fields, you can still use the xref:schema-api.adoc[] to pre-emptively create some fields, with explicit types, before you index documents that use them.
 
-Internally, the Schema API and the Schemaless Update Processors both use the same <<schema-factory.adoc#,Managed Schema>> functionality.
+Internally, the Schema API and the Schemaless Update Processors both use the same xref:configuration-guide:schema-factory.adoc[Managed Schema] functionality.
 
 Also, if you do not need the `*_str` version of a text field, you can simply remove the `copyField` definition from the auto-generated schema and it will not be re-added since the original field is now defined.
 ====
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/tokenizers.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/tokenizers.adoc
index 12e571f..34a4275 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/tokenizers.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/tokenizers.adoc
@@ -31,7 +31,7 @@ It's also possible for more than one token to have the same position or refer to
 Keep this in mind if you use token metadata for things like highlighting search results in the field text.
 
 == About Tokenizers
-You configure the tokenizer for a text field type in the <<solr-schema.adoc#,schema>> with a `<tokenizer>` element, as a child of `<analyzer>`:
+You configure the tokenizer for a text field type in the xref:schema-elements.adoc[schema] with a `<tokenizer>` element, as a child of `<analyzer>`:
 
 [.dynamic-tabs]
 --
@@ -549,7 +549,7 @@ The default configuration for `solr.ICUTokenizerFactory` provides UAX#29 word br
 [IMPORTANT]
 ====
 
-To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<solr-plugins.adoc#installing-plugins,Solr Plugins>>).
+To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section xref:configuration-guide:solr-plugins.adoc#installing-plugins[Installing Plugins]).
 See the `solr/contrib/analysis-extras/README.md` for information on which jars you need to add.
 
 ====
@@ -913,4 +913,4 @@ Valid values:
 
 == OpenNLP Tokenizer and OpenNLP Filters
 
-See <<language-analysis.adoc#opennlp-integration,OpenNLP Integration>> for information about using the OpenNLP Tokenizer, along with information about available OpenNLP token filters.
+See xref:language-analysis.adoc#opennlp-integration[OpenNLP Integration] for information about using the OpenNLP Tokenizer, along with information about available OpenNLP token filters.
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/transforming-and-indexing-custom-json.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/transforming-and-indexing-custom-json.adoc
index e1a2065..621874d 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/transforming-and-indexing-custom-json.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/transforming-and-indexing-custom-json.adoc
@@ -61,7 +61,7 @@ Wildcards can be used here, see <<Using Wildcards for Field Names>> below for mo
 |Optional |Default: `false`
 |===
 +
-This parameter is particularly convenient when the fields in the input JSON are not available in the schema and <<schemaless-mode.adoc#,schemaless mode>> is not enabled.
+This parameter is particularly convenient when the fields in the input JSON are not available in the schema and xref:schemaless-mode.adoc[schemaless mode] is not enabled.
 This will index all the fields into the default search field (using the `df` parameter) and only the `uniqueKey` field is mapped to the corresponding field in the schema.
 If the input JSON does not have a value for the `uniqueKey` field then a UUID is generated for the same.
 
@@ -332,12 +332,12 @@ Solr will automatically attempt to add the content of the field from the JSON in
 ====
 Documents will be rejected during indexing if the fields do not exist in the schema before indexing.
 So, if you are NOT using schemaless mode, you must pre-create all fields.
-If you are working in <<schemaless-mode.adoc#,Schemaless Mode>>, however, fields that don't exist will be created on the fly with Solr's best guess for the field type.
+If you are working in xref:configuration-guide:schemaless-mode.adoc[], however, fields that don't exist will be created on the fly with Solr's best guess for the field type.
 ====
 
 === Reusing Parameters in Multiple Requests
 
-You can store and re-use parameters with Solr's <<request-parameters-api.adoc#,Request Parameters API>>.
+You can store and re-use parameters with Solr's xref:configuration-guide:request-parameters-api.adoc[].
 
 Say we wanted to define parameters to split documents at the `exams` field, and map several other fields.
 We could make an API request such as:
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/field-types.adoc b/solr/solr-ref-guide/src/old-pages/field-types.adoc
similarity index 100%
rename from solr/solr-ref-guide/modules/indexing-guide/pages/field-types.adoc
rename to solr/solr-ref-guide/src/old-pages/field-types.adoc
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/fields-and-schema-design.adoc b/solr/solr-ref-guide/src/old-pages/fields-and-schema-design.adoc
similarity index 100%
rename from solr/solr-ref-guide/modules/indexing-guide/pages/fields-and-schema-design.adoc
rename to solr/solr-ref-guide/src/old-pages/fields-and-schema-design.adoc
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/indexing-data-operations.adoc b/solr/solr-ref-guide/src/old-pages/indexing-data-operations.adoc
similarity index 100%
rename from solr/solr-ref-guide/modules/indexing-guide/pages/indexing-data-operations.adoc
rename to solr/solr-ref-guide/src/old-pages/indexing-data-operations.adoc
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/schema-indexing-guide.adoc b/solr/solr-ref-guide/src/old-pages/schema-indexing-guide.adoc
similarity index 100%
rename from solr/solr-ref-guide/modules/indexing-guide/pages/schema-indexing-guide.adoc
rename to solr/solr-ref-guide/src/old-pages/schema-indexing-guide.adoc
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/solr-schema.adoc b/solr/solr-ref-guide/src/old-pages/solr-schema.adoc
similarity index 100%
rename from solr/solr-ref-guide/modules/indexing-guide/pages/solr-schema.adoc
rename to solr/solr-ref-guide/src/old-pages/solr-schema.adoc

[solr] 03/04: Add temporary hard-coded version values to fix build-time warnings

Posted by ct...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ctargett pushed a commit to branch jira/solr-15556-antora
in repository https://gitbox.apache.org/repos/asf/solr.git

commit f9faa1d51bc9c1383086f40e925f49958f67e8e7
Author: Cassandra Targett <ct...@apache.org>
AuthorDate: Mon Nov 29 14:51:22 2021 -0600

    Add temporary hard-coded version values to fix build-time warnings
---
 solr/solr-ref-guide/antora.yml | 19 ++++++++++++++-----
 1 file changed, 14 insertions(+), 5 deletions(-)

diff --git a/solr/solr-ref-guide/antora.yml b/solr/solr-ref-guide/antora.yml
index c4e088f..0c0af92 100644
--- a/solr/solr-ref-guide/antora.yml
+++ b/solr/solr-ref-guide/antora.yml
@@ -1,7 +1,7 @@
 name: solr
-# Note for future: We can assign the version based on parsing the branch name:
+# Assigns the version based on parsing the branch name:
 # 'branch_(*)_(*)': $1.$2' (or $1_$2)
-# This could also be on the content source in playbook.yml
+# This could also be put in the content source in playbook.yml
 version:
    '*/(*)': '$1'
 prerelease: true
@@ -16,10 +16,19 @@ nav:
 - modules/upgrade-notes/upgrade-nav.adoc
 asciidoc:
   attributes:
+    idseparator: '-'
+    idprefix: ''
     solr-javadocs: https://solr.apache.org/docs/9_0_0/
     lucene-javadocs: https://lucene.apache.org/core/9_0_0/
     solr-root-path: /Users/cass/solr-repos/solr-fork/solr/
+    java-javadocs: https://docs.oracle.com/en/java/javase/11/docs/api/java.base/
+    # Hardcode some version attributes for now TODO
+    # This will make some constructed links be 404 since they aren't parsed
     solr-docs-version: '9.0'
-    idseparator: '-'
-    idprefix: ''
-    toc: ~
+    ivy-tika-version: '${ivyTika}'
+    ivy-opennlp-version: '1.9.1'
+    ivy-commons-codec-version: '1.13'
+    ivy-zookeeper-version: '3.7.0'
+    ivy-log4j-version: '2.14.1'
+    ivy-hadoop-version: '3.2.0'
+    ivy-dropwizard-version: '4.1.5'

[solr] 04/04: Fix refs in upgrade-notes + cleanups for earlier changes

Posted by ct...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ctargett pushed a commit to branch jira/solr-15556-antora
in repository https://gitbox.apache.org/repos/asf/solr.git

commit efc87d0ec5aa2c7d620d6c7d36ec08e837b69b27
Author: Cassandra Targett <ct...@apache.org>
AuthorDate: Mon Nov 29 14:52:02 2021 -0600

    Fix refs in upgrade-notes + cleanups for earlier changes
---
 solr/solr-ref-guide/modules/ROOT/pages/index.adoc  |   8 -
 .../pages/configuration-files.adoc                 |   2 +-
 .../getting-started/pages/introduction.adoc        |   2 +-
 .../transforming-and-indexing-custom-json.adoc     |   2 +-
 .../query-guide/pages/block-join-query-parser.adoc |  10 +-
 .../pages/collapse-and-expand-results.adoc         |   2 +-
 .../query-guide/pages/common-query-parameters.adoc |   2 +-
 .../query-guide/pages/dismax-query-parser.adoc     |   2 +-
 .../query-guide/pages/document-transformers.adoc   |   2 +-
 .../modules/query-guide/pages/faceting.adoc        |   4 +-
 .../query-guide/pages/function-queries.adoc        |   2 +-
 .../pages/json-faceting-domain-changes.adoc        |   2 +-
 .../query-guide/pages/math-expressions.adoc        |   3 +-
 .../pages/searching-nested-documents.adoc          |  13 +-
 .../modules/query-guide/pages/spell-checking.adoc  |   2 +-
 .../modules/query-guide/pages/sql-query.adoc       |   2 +-
 .../query-guide/pages/standard-query-parser.adoc   |   4 +-
 .../pages/stream-decorator-reference.adoc          |   2 +-
 .../pages/major-changes-in-solr-6.adoc             |  40 +++--
 .../pages/major-changes-in-solr-7.adoc             | 162 ++++++++++++++-------
 .../pages/major-changes-in-solr-8.adoc             |  59 ++++----
 .../pages/major-changes-in-solr-9.adoc             |   8 +-
 .../upgrade-notes/pages/solr-upgrade-notes.adoc    |  23 +--
 .../modules/upgrade-notes/upgrade-nav.adoc         |   1 -
 solr/solr-ref-guide/package-lock.json              |   6 +-
 25 files changed, 211 insertions(+), 154 deletions(-)

diff --git a/solr/solr-ref-guide/modules/ROOT/pages/index.adoc b/solr/solr-ref-guide/modules/ROOT/pages/index.adoc
index 8f9eb3f..33086df 100644
--- a/solr/solr-ref-guide/modules/ROOT/pages/index.adoc
+++ b/solr/solr-ref-guide/modules/ROOT/pages/index.adoc
@@ -1,13 +1,5 @@
 = Apache Solr Reference Guide
-:page-children: getting-started, \
-    deployment-guide, \
-    configuration-guide, \
-    schema-indexing-guide, \
-    query-guide, \
-    solr-upgrade-notes
-:page-notitle:
 :page-show-toc: false
-:page-layout: home
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
diff --git a/solr/solr-ref-guide/modules/configuration-guide/pages/configuration-files.adoc b/solr/solr-ref-guide/modules/configuration-guide/pages/configuration-files.adoc
index 45aeef3..9cb7be6 100644
--- a/solr/solr-ref-guide/modules/configuration-guide/pages/configuration-files.adoc
+++ b/solr/solr-ref-guide/modules/configuration-guide/pages/configuration-files.adoc
@@ -80,7 +80,7 @@ For more information on `solrconfig.xml`, see xref:configuring-solrconfig-xml.ad
 The schema defines a document as a collection of fields.
 You can define both the field types and the fields themselves.
 Field type definitions are powerful and include information about how Solr processes incoming field values and query values.
-For more information on Solr schemas, see xref:indexing-guide:solr-schema.adoc[].
+For more information on Solr schemas, see xref:indexing-guide:schema-elements.adoc[].
 ** `data/` contains index files.
 
 Note that the SolrCloud example does not include a `conf` directory for each Solr Core (so there is no `solrconfig.xml` or schema file).
diff --git a/solr/solr-ref-guide/modules/getting-started/pages/introduction.adoc b/solr/solr-ref-guide/modules/getting-started/pages/introduction.adoc
index 0700b7a..216679c 100644
--- a/solr/solr-ref-guide/modules/getting-started/pages/introduction.adoc
+++ b/solr/solr-ref-guide/modules/getting-started/pages/introduction.adoc
@@ -26,7 +26,7 @@ Any platform capable of HTTP can talk to Solr.
 See xref:deployment-guide:client-apis.adoc[] for details on client APIs.
 
 Flexible schema configurations allow nearly any type of data to be stored in Solr.
-The xref:indexing-guide:schema-indexing-guide.adoc[] has more details on these options.
+The xref:indexing-guide:schema-elements.adoc[] has more details on these options.
 
 Solr offers support for the simplest keyword searching through to complex queries on multiple fields and faceted search results.
 Collapsing and clustering results offer compelling features for e-commerce and storefronts.
diff --git a/solr/solr-ref-guide/modules/indexing-guide/pages/transforming-and-indexing-custom-json.adoc b/solr/solr-ref-guide/modules/indexing-guide/pages/transforming-and-indexing-custom-json.adoc
index 621874d..766ee74 100644
--- a/solr/solr-ref-guide/modules/indexing-guide/pages/transforming-and-indexing-custom-json.adoc
+++ b/solr/solr-ref-guide/modules/indexing-guide/pages/transforming-and-indexing-custom-json.adoc
@@ -332,7 +332,7 @@ Solr will automatically attempt to add the content of the field from the JSON in
 ====
 Documents will be rejected during indexing if the fields do not exist in the schema before indexing.
 So, if you are NOT using schemaless mode, you must pre-create all fields.
-If you are working in xref:configuration-guide:schemaless-mode.adoc[], however, fields that don't exist will be created on the fly with Solr's best guess for the field type.
+If you are working in xref:schemaless-mode.adoc[], however, fields that don't exist will be created on the fly with Solr's best guess for the field type.
 ====
 
 === Reusing Parameters in Multiple Requests
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/block-join-query-parser.adoc b/solr/solr-ref-guide/modules/query-guide/pages/block-join-query-parser.adoc
index 9cfbed4..b798ffe 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/block-join-query-parser.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/block-join-query-parser.adoc
@@ -54,7 +54,7 @@ This parser wraps a query that matches some parent documents and returns the chi
 The syntax for this parser is: `q={!child of=<blockMask>}<someParents>`.
 
 * The inner subordinate query string (`someParents`) must be a query that will match some parent documents
-* The `of` parameter must be a query string to use as a <<#block-mask,Block Mask>> -- typically a query that matches the set of all possible parent documents
+* The `of` parameter must be a query string to use as a <<block-mask,Block Mask>> -- typically a query that matches the set of all possible parent documents
 
 The resulting query will match all documents which do _not_ match the `<blockMask>` query and are children (or descendents) of the documents matched by `<someParents>`.
 
@@ -74,7 +74,7 @@ We only get one document in response:
 
 [CAUTION]
 ====
-The query for `someParents` *MUST* match a strict subset of the documents matched by the <<#block-mask,Block Mask>> or your query may result in an Error:
+The query for `someParents` *MUST* match a strict subset of the documents matched by the <<block-mask,Block Mask>> or your query may result in an Error:
 
 [literal]
 Parent query must not match any docs besides parent filter.
@@ -114,7 +114,7 @@ This parser takes a query that matches child documents and returns their parents
 The syntax for this parser is similar to the `child` parser: `q={!parent which=<blockMask>}<someChildren>`.
 
 * The inner subordinate query string (`someChildren`) must be a query that will match some child documents
-* The `which` parameter must be a query string to use as a <<#block-mask,Block Mask>> -- typically a query that matches the set of all possible parent documents
+* The `which` parameter must be a query string to use as a <<block-mask,Block Mask>> -- typically a query that matches the set of all possible parent documents
 
 The resulting query will match all documents which _do_ match the `<blockMask>` query and are parents (or ancestors) of the documents matched by `<someChildren>`.
 
@@ -135,7 +135,7 @@ We get this document in response:
 
 [CAUTION]
 ====
-The query for `someChildren` *MUST NOT* match any documents matched by the <<#block-mask,Block Mask>> or your query may result in an Error:
+The query for `someChildren` *MUST NOT* match any documents matched by the <<block-mask,Block Mask>> or your query may result in an Error:
 
 [literal]
 Child query must not match same docs with parent filter.
@@ -210,4 +210,4 @@ A similar problematic situation can arise when mixing parent/child documents wit
 ...then our simple `doc_type:parent` Block Mask would no longer be adequate.
  We would instead need to use `\*:* -doc_type:child` or `doc_type:(simple parent)` to prevent our "simple" document from mistakenly being treated as a "child" of an adjacent "parent" document.
 
-The <<searching-nested-documents#searching-nested-documents,Searching Nested Documents>> section contains more detailed examples of specifing Block Mask queries with non trivial hierarchicies of documents.
+The xref:query-guide:searching-nested-documents[] section contains more detailed examples of specifying Block Mask queries with non trivial hierarchies of documents.
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/collapse-and-expand-results.adoc b/solr/solr-ref-guide/modules/query-guide/pages/collapse-and-expand-results.adoc
index bb55fd8..2d23b9d 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/collapse-and-expand-results.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/collapse-and-expand-results.adoc
@@ -107,7 +107,7 @@ The `top_fc` hint is only available when collapsing on String fields.
 `top_fc` will also result in having the collapsed field cached in memory twice if it's used for faceting or sorting.
 For very high cardinality (high distinct count) fields, `top_fc` may not fare so well.
 +
-* `block`: This indicates that the field being collapsed on is suitable for the optimzed <<#block-collapsing,Block Collapse>> logic described below.
+* `block`: This indicates that the field being collapsed on is suitable for the optimized <<Block Collapsing>> logic described below.
 
 `size`::
 +
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/common-query-parameters.adoc b/solr/solr-ref-guide/modules/query-guide/pages/common-query-parameters.adoc
index 1576a09..bbdd5c5 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/common-query-parameters.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/common-query-parameters.adoc
@@ -18,7 +18,7 @@
 
 Several query parsers share supported query parameters.
 
-The following sections describe Solr's common query parameters, which are supported by the <<requesthandlers-searchcomponents#search-handlers,Search RequestHandlers>>.
+The following sections describe Solr's common query parameters, which are supported by the xref:configuration-guide:requesthandlers-searchcomponents#search-handlers[search request handlers].
 
 == defType Parameter
 
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/dismax-query-parser.adoc b/solr/solr-ref-guide/modules/query-guide/pages/dismax-query-parser.adoc
index b0c03af..52d209b 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/dismax-query-parser.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/dismax-query-parser.adoc
@@ -198,7 +198,7 @@ q=cheese
 bf=div(1,sum(1,price))^1.5
 ----
 
-Specifying functions with the bf parameter is essentially just shorthand for using the `bq` parameter (<<#bq-bf-shortcomings,with the same shortcomings>>) combined with the `{!func}` parser -- with the addition of the simplified "query boost" syntax.
+Specifying functions with the bf parameter is essentially just shorthand for using the `bq` parameter (<<bq-bf-shortcomings,with the same shortcomings>>) combined with the `{!func}` parser -- with the addition of the simplified "query boost" syntax.
 
 For example, the two `bf` parameters listed below, are completely equivalent to the two `bq` parameters below:
 
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/document-transformers.adoc b/solr/solr-ref-guide/modules/query-guide/pages/document-transformers.adoc
index e4ab4b4..c1ddce4 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/document-transformers.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/document-transformers.adoc
@@ -137,7 +137,7 @@ Note that this transformer can be used even when the query used to match the res
 q=book_title:Solr&fl=id,[child childFilter=doc_type:chapter limit=100]
 ----
 
-If the documents involved include a `\_nest_path_` field, then it is used to re-create the hierarchical structure of the descendent documents using the original pseudo-field names the documents were indexed with, otherwise the descendent documents are returned as a flat list of <<indexing-nested-documents#indexing-anonymous-children,anonymous children>>.
+If the documents involved include a `\_nest_path_` field, then it is used to re-create the hierarchical structure of the descendent documents using the original pseudo-field names the documents were indexed with, otherwise the descendent documents are returned as a flat list of xref:indexing-guide:indexing-nested-documents#indexing-anonymous-children[anonymous children].
 
 `childFilter`::
 +
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/faceting.adoc b/solr/solr-ref-guide/modules/query-guide/pages/faceting.adoc
index fdfeac4..8c1cacf 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/faceting.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/faceting.adoc
@@ -140,7 +140,7 @@ There are two options for this parameter.
 For terms in the ASCII range, this will be alphabetically sorted.
 +
 The default is `count` if `facet.limit` is greater than 0, otherwise, the default is `index`.
-Note that the default logic is changed when <<#limiting-facet-with-certain-terms>>
+Note that the default logic is changed when <<Limiting Facet with Certain Terms>>.
 
 `facet.limit`::
 +
@@ -423,7 +423,7 @@ filter::: This method generates the ranges based on other facet.range parameters
 It will make use of the filterCache, so it will benefit of a cache large enough to contain all ranges.
 +
 dv::: This method iterates the documents that match the main query, and for each of them finds the correct range for the value.
-This method will make use of xref:docvalues.adoc[] (if enabled for the field) or fieldCache.
+This method will make use of xref:indexing-guide:docvalues.adoc[] (if enabled for the field) or fieldCache.
 The `dv` method is not supported for field type DateRangeField or when using xref:result-grouping.adoc[group.facets].
 --
 
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/function-queries.adoc b/solr/solr-ref-guide/modules/query-guide/pages/function-queries.adoc
index d679525..be91507 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/function-queries.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/function-queries.adoc
@@ -285,7 +285,7 @@ Use the `field(myfield,min)` <<field Function,syntax for selecting the minimum v
 Returns milliseconds of difference between its arguments.
 Dates are relative to the Unix or POSIX time epoch, midnight, January 1, 1970 UTC.
 
-Arguments may be the name of a `DatePointField`, `TrieDateField`, or date math based on a xref:date-formatting-math.adoc[constant date or `NOW`].
+Arguments may be the name of a `DatePointField`, `TrieDateField`, or date math based on a xref:indexing-guide:date-formatting-math.adoc[constant date or `NOW`].
 
 * `ms()`: Equivalent to `ms(NOW)`, number of milliseconds since the epoch.
 * `ms(a):` Returns the number of milliseconds since the epoch that the argument represents.
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/json-faceting-domain-changes.adoc b/solr/solr-ref-guide/modules/query-guide/pages/json-faceting-domain-changes.adoc
index d7e5049..6390db0 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/json-faceting-domain-changes.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/json-faceting-domain-changes.adoc
@@ -226,7 +226,7 @@ When a collection contains xref:indexing-guide:indexing-nested-documents.adoc[ne
 Both of these options work similarly to the corresponding xref:block-join-query-parser.adoc[] by taking in a single String query that exclusively matches all parent documents in the collection.
 If `blockParent` is used, then the resulting domain will contain all parent documents of the children from the original domain.
 If `blockChildren` is used, then the resulting domain will contain all child documents of the parents from the original domain.
-Quite often facets over child documents needs to be counted in parent documents, this can be done by `uniqueBlock(\_root_)` as described in <<json-facet-api#uniqueblock-and-block-join-counts, Block Join Facet Counts>>.
+Quite often facets over child documents needs to be counted in parent documents, this can be done by `uniqueBlock(\_root_)` as described in xref:json-facet-api.adoc#uniqueblock-and-block-join-counts[Block Join Facet Counts].
 
 [source,json,subs="verbatim,callouts"]]
 ----
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/math-expressions.adoc b/solr/solr-ref-guide/modules/query-guide/pages/math-expressions.adoc
index 512ed1a..d3030c5 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/math-expressions.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/math-expressions.adoc
@@ -1,5 +1,5 @@
 = Streaming Expressions and Math Expressions
-:toc!:
+:page-toclevels: 0
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -47,3 +47,4 @@ image::math-expressions/searchiris.png[]
 | *xref:graph.adoc[]*: Bipartite graphs, in-degree centrality, graph recommenders, temporal graphs and event correlation.
 | *xref:computational-geometry.adoc[]*: Convex Hulls and Enclosing Disks.
 | *xref:logs.adoc[]*: Solr log analytics and visualization.
+|===
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/searching-nested-documents.adoc b/solr/solr-ref-guide/modules/query-guide/pages/searching-nested-documents.adoc
index 6650402..c3ae864 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/searching-nested-documents.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/searching-nested-documents.adoc
@@ -1,4 +1,5 @@
 = Searching Nested Child Documents
+:page-partial:
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -23,13 +24,13 @@ Please refer to xref:indexing-guide:indexing-nested-documents.adoc[] for details
 
 [NOTE]
 This section does not demonstrate faceting on nested documents.
-For nested document faceting, please refer to the <<json-facet-api#uniqueblock-and-block-join-counts,Block Join Facet Counts>> section.
+For nested document faceting, please refer to the xref:json-facet-api.adoc#uniqueblock-and-block-join-counts[Block Join Facet Counts] section.
 
 == Query Examples
 
-For the upcoming examples, we'll assume an index containing the same documents covered in <<indexing-nested-documents#example-indexing-syntax,Indexing Nested Documents>>:
+For the upcoming examples, we'll assume an index containing the same documents covered in xref:indexing-guide:indexing-nested-documents.adoc#example-indexing-syntax[Indexing Nested Documents]:
 
-include::indexing-nested-documents.adoc[tag=sample-indexing-deeply-nested-documents]
+include::indexing-guide:page$indexing-nested-documents.adoc[tag=sample-indexing-deeply-nested-documents]
 
 === Child Doc Transformer
 
@@ -175,7 +176,7 @@ Note that in the above example, the `/` characters in the `\_nest_path_` were "d
 
 (You can see that only a single level of of `\` escaping is needed in the body of the query string -- to prevent the Regex syntax --  because it's not a quoted string local param).
 
-You may find it more convenient to use <<local-params#parameter-dereferencing,parameter references>> in conjunction with <<other-parsers#other-parsers,other parsers>> that do not treat `/` as a special character to express the same query in a more verbose form:
+You may find it more convenient to use xref:local-params.adoc#parameter-dereferencing[parameter references] in conjunction with xref:other-parsers[other parsers] that do not treat `/` as a special character to express the same query in a more verbose form:
 
 [source,text]
 ----
@@ -236,7 +237,7 @@ $ curl 'http://localhost:8983/solr/gettingstarted/select' -d 'omitHeader=true' -
   }}
 ----
 
-In this example we've used `\*:* -\_nest_path_:*` as our <<block-join-query-parser#block-mask,`which` parameter>> to indicate we want to consider all documents which don't have a nest path -- i.e., all "root" level document -- as the set of possible parents.
+In this example we've used `\*:* -\_nest_path_:*` as our xref:block-join-query-parser.adoc#block-mask[`which` parameter] to indicate we want to consider all documents which don't have a nest path -- i.e., all "root" level document -- as the set of possible parents.
 
 By changing the `which` parameter to match ancestors at specific `\_nest_path_` levels, we can change the type of ancestors we return.
 In the query below, we search for `skus` (using an `which` parameter that identifies all documents that do _not_ have a `\_nest_path_` with the prefix `/skus/*`) that are the ancestors of `manuals` with exactly `1` page:
@@ -261,7 +262,7 @@ $ curl 'http://localhost:8983/solr/gettingstarted/select' -d 'omitHeader=true' -
 
 [CAUTION]
 ====
-Note that in the above example, the `/` characters in the `\_nest_path_` were "double escaped" in the `which` parameter, for the <<#double-escaping-nest-path-slashes,same reasons discussed above>> regarding the `{!child} pasers `of` parameter.
+Note that in the above example, the `/` characters in the `\_nest_path_` were "double escaped" in the `which` parameter, for the <<double-escaping-nest-path-slashes,same reasons discussed above>> regarding the `{!child} pasers `of` parameter.
 ====
 
 === Combining Block Join Query Parsers with Child Doc Transformer
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/spell-checking.adoc b/solr/solr-ref-guide/modules/query-guide/pages/spell-checking.adoc
index 40b4c63..84b0194 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/spell-checking.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/spell-checking.adoc
@@ -173,7 +173,7 @@ The results are combined and collations can contain a mix of corrections from bo
 
 === Add It to a Request Handler
 
-Queries will be sent to a xref:configuration-guide:request-handlers-and-search-components.adoc[request handler].
+Queries will be sent to a xref:configuration-guide:requesthandlers-searchcomponents.adoc[request handler].
 If every request should generate a suggestion, then you would add the following to the `requestHandler` that you are using:
 
 [source,xml]
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/sql-query.adoc b/solr/solr-ref-guide/modules/query-guide/pages/sql-query.adoc
index 7a6ef1c..e6ec10f 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/sql-query.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/sql-query.adoc
@@ -356,7 +356,7 @@ By default, the `/sql` request handler is configured as an implicit handler, mea
 
 ==== Authorization for SQL Requests
 
-If your Solr cluster is configured to use the xref:rule-based-authorization-plugin.adoc[],
+If your Solr cluster is configured to use the xref:deployment-guide:rule-based-authorization-plugin.adoc[],
 then you need to grant `GET` and `POST` permissions on the `/sql`, `/select`, and `/export` endpoints for all collections you intend to execute SQL queries against.
 The `/select` endpoint is used for `LIMIT` queries, whereas the `/export` handler is used for queries without a `LIMIT`, so in most cases, you'll want to grant access to both.
 If you're using a worker collection for the `/sql` handler, then you only need to grant access to the `/sql` endpoint for the worker collection and not the collections in the data tier.
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/standard-query-parser.adoc b/solr/solr-ref-guide/modules/query-guide/pages/standard-query-parser.adoc
index 8aac0d2..fe9bb92 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/standard-query-parser.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/standard-query-parser.adoc
@@ -200,7 +200,7 @@ The brackets around a query determine its inclusiveness.
 Here's an example: `count:{1 TO 10]`
 
 Wildcards, `*`, can also be used for either or both endpoints to specify an open-ended range query.
-This is a <<#differences-between-lucenes-classic-query-parser-and-solrs-standard-query-parser,divergence from Lucene's Classic Query Parser>>.
+This is a <<differences-between-lucenes-classic-query-parser-and-solrs-standard-query-parser,divergence from Lucene's Classic Query Parser>>.
 
 * `field:[* TO 100]` finds all field values less than or equal to 100.
 * `field:[100 TO *]` finds all field values greater than or equal to 100.
@@ -253,7 +253,7 @@ Example:
 
 == Querying Specific Fields
 
-Data indexed in Solr is organized in xref:indexing-guide:fields.adoc[fields], which are defined in xref:indexing-guide:schema-element.adoc[a schema].
+Data indexed in Solr is organized in xref:indexing-guide:fields.adoc[fields], which are defined in xref:indexing-guide:schema-elements.adoc[a schema].
 Searches can take advantage of fields to add precision to queries.
 For example, you can search for a term only in a specific field, such as a title field.
 
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/stream-decorator-reference.adoc b/solr/solr-ref-guide/modules/query-guide/pages/stream-decorator-reference.adoc
index 322c754..7774a35 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/stream-decorator-reference.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/stream-decorator-reference.adoc
@@ -632,7 +632,7 @@ daemonStream.close();
 
 The `delete` function wraps other functions and uses the `id` and `\_version_` values found to send the tuples to a SolrCloud collection as xref:indexing-guide:indexing-with-update-handlers.adoc#delete-operations[Delete By Id] commands.
 
-This is similar to the `<<#update,update()>>` function described below.
+This is similar to the `<<update,update()>>` function described below.
 
 === delete Parameters
 
diff --git a/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-6.adoc b/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-6.adoc
index 04fced2..ace04dd 100644
--- a/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-6.adoc
+++ b/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-6.adoc
@@ -18,7 +18,8 @@
 
 There are some major changes in Solr 6 to consider before starting to migrate your configurations and indexes.
 
-There are many hundreds of changes, so a thorough review of the <<solr-upgrade-notes.adoc#,Solr Upgrade Notes>> section as well as the {solr-javadocs}/changes//Changes.html[CHANGES.txt] file in your Solr instance will help you plan your migration to Solr 6. This section attempts to highlight some of the major changes you should be aware of.
+There are many hundreds of changes, so a thorough review of the xref:solr-upgrade-notes.adoc[] section as well as the {solr-javadocs}/changes//Changes.html[CHANGES.txt] file in your Solr instance will help you plan your migration to Solr 6.
+This section attempts to highlight some of the major changes you should be aware of.
 
 == Highlights of New Features in Solr 6
 
@@ -27,7 +28,7 @@ Some of the major improvements in Solr 6 include:
 [[major-5-6-streaming]]
 === Streaming Expressions
 
-Introduced in Solr 5, <<streaming-expressions.adoc#,Streaming Expressions>> allow querying Solr and getting results as a stream of data, sorted and aggregated as requested.
+Introduced in Solr 5, xref:query-guide:streaming-expressions.adoc[] allow querying Solr and getting results as a stream of data, sorted and aggregated as requested.
 
 Several new expression types have been added in Solr 6:
 
@@ -40,7 +41,8 @@ Several new expression types have been added in Solr 6:
 [[major-5-6-parallel-sql]]
 === SQL Query
 
-Built on streaming expressions, new in Solr 6 is a <<sql-query.adoc#,SQL support>> to be able to send SQL queries to Solr. SQL statements are compiled to streaming expressions on the fly, providing the full range of aggregations available to streaming expression requests. A JDBC driver is included, which allows using SQL clients and database visualization tools to query your Solr index and import data to other systems.
+Built on streaming expressions, new in Solr 6 is a xref:query-guide:sql-query.adoc[SQL support] to be able to send SQL queries to Solr. SQL statements are compiled to streaming expressions on the fly, providing the full range of aggregations available to streaming expression requests.
+A JDBC driver is included, which allows using SQL clients and database visualization tools to query your Solr index and import data to other systems.
 
 
 === Cross Data Center Replication
@@ -49,42 +51,52 @@ Replication across data centers is now possible with Cross Data Center Replicati
 
 === Graph QueryParser
 
-A new <<other-parsers.adoc#graph-query-parser,`graph` query parser>> makes it possible to to graph traversal queries of Directed (Cyclic) Graphs modelled using Solr documents.
+A new xref:query-guide:other-parsers.adoc#graph-query-parser[`graph` query parser] makes it possible to to graph traversal queries of Directed (Cyclic) Graphs modelled using Solr documents.
 
 [[major-5-6-docvalues]]
 === DocValues
 
-Most non-text field types in the Solr sample configsets now default to using <<docvalues.adoc#,DocValues>>.
+Most non-text field types in the Solr sample configsets now default to using xref:indexing-guide:docvalues.adoc[].
 
 == Java 8 Required
 
-The minimum supported version of Java for Solr 6 (and the <<solrj.adoc#,SolrJ client libraries>>) is now Java 8.
+The minimum supported version of Java for Solr 6 (and the xref:deployment-guide:solrj.adoc[SolrJ client libraries]) is now Java 8.
 
 == Index Format Changes
 
-Solr 6 has no support for reading Lucene/Solr 4.x and earlier indexes. Be sure to run the Lucene `IndexUpgrader` included with Solr 5.5 if you might still have old 4x formatted segments in your index. Alternatively: fully optimize your index with Solr 5.5 to make sure it consists only of one up-to-date index segment.
+Solr 6 has no support for reading Lucene/Solr 4.x and earlier indexes.
+Be sure to run the Lucene `IndexUpgrader` included with Solr 5.5 if you might still have old 4x formatted segments in your index.
+Alternatively: fully optimize your index with Solr 5.5 to make sure it consists only of one up-to-date index segment.
 
 == Managed Schema is now the Default
 
-Solr's default behavior when a `solrconfig.xml` does not explicitly define a `<schemaFactory/>` is now dependent on the `luceneMatchVersion` specified in that `solrconfig.xml`. When `luceneMatchVersion < 6.0`, `ClassicIndexSchemaFactory` will continue to be used for back compatibility, otherwise an instance of <<schema-factory.adoc#,`ManagedIndexSchemaFactory`>> will be used.
+Solr's default behavior when a `solrconfig.xml` does not explicitly define a `<schemaFactory/>` is now dependent on the `luceneMatchVersion` specified in that `solrconfig.xml`.
+When `luceneMatchVersion < 6.0`, `ClassicIndexSchemaFactory` will continue to be used for back compatibility, otherwise an instance of xref:configuration-guide:schema-factory.adoc[`ManagedIndexSchemaFactory`] will be used.
 
 The most notable impacts of this change are:
 
 * Existing `solrconfig.xml` files that are modified to use `luceneMatchVersion >= 6.0`, but do _not_ have an explicitly configured `ClassicIndexSchemaFactory`, will have their `schema.xml` file automatically upgraded to a `managed-schema` file.
-* Schema modifications via the <<schema-api.adoc#,Schema API>> will now be enabled by default.
+* Schema modifications via the xref:indexing-guide:schema-api.adoc[] will now be enabled by default.
 
-Please review the <<schema-factory.adoc#,Schema Factory Definition in SolrConfig>> section for more details.
+Please review the xref:configuration-guide:schema-factory.adoc[] section for more details.
 
 == Default Similarity Changes
 
-Solr's default behavior when a Schema does not explicitly define a global <<schema-elements.adoc#similarity,`<similarity/>`>> is now dependent on the `luceneMatchVersion` specified in the `solrconfig.xml`. When `luceneMatchVersion < 6.0`, an instance of `ClassicSimilarityFactory` will be used, otherwise an instance of `SchemaSimilarityFactory` will be used. Most notably this change means that users can take advantage of per Field Type similarity declarations, without needing to also expl [...]
+Solr's default behavior when a Schema does not explicitly define a global xref:indexing-guide:schema-elements.adoc#similarity[`<similarity/>`] is now dependent on the `luceneMatchVersion` specified in the `solrconfig.xml`.
+When `luceneMatchVersion < 6.0`, an instance of `ClassicSimilarityFactory` will be used, otherwise an instance of `SchemaSimilarityFactory` will be used.
+Most notably this change means that users can take advantage of per Field Type similarity declarations, without needing to also explicitly declare a global usage of `SchemaSimilarityFactory`.
 
-Regardless of whether it is explicitly declared, or used as an implicit global default, `SchemaSimilarityFactory` 's implicit behavior when a Field Types do not declare an explicit `<similarity />` has also been changed to depend on the `luceneMatchVersion`. When `luceneMatchVersion < 6.0`, an instance of `ClassicSimilarity` will be used, otherwise an instance of `BM25Similarity` will be used. A `defaultSimFromFieldType` init option may be specified on the `SchemaSimilarityFactory` decla [...]
+Regardless of whether it is explicitly declared, or used as an implicit global default, `SchemaSimilarityFactory` 's implicit behavior when a Field Types do not declare an explicit `<similarity />` has also been changed to depend on the `luceneMatchVersion`.
+When `luceneMatchVersion < 6.0`, an instance of `ClassicSimilarity` will be used, otherwise an instance of `BM25Similarity` will be used.
+A `defaultSimFromFieldType` init option may be specified on the `SchemaSimilarityFactory` declaration to change this behavior.
+Please review the `SchemaSimilarityFactory` javadocs for more details.
 
 == Replica & Shard Delete Command Changes
 
-DELETESHARD and DELETEREPLICA now default to deleting the instance directory, data directory, and index directory for any replica they delete. Please review the <<collections-api.adoc#,Collection API>> documentation for details on new request parameters to prevent this behavior if you wish to keep all data on disk when using these commands.
+DELETESHARD and DELETEREPLICA now default to deleting the instance directory, data directory, and index directory for any replica they delete.
+Please review the xref:configuration-guide:collections-api.adoc[] documentation for details on new request parameters to prevent this behavior if you wish to keep all data on disk when using these commands.
 
 == facet.date.* Parameters Removed
 
-The `facet.date` parameter (and associated `facet.date.*` parameters) that were deprecated in Solr 3.x have been removed completely. If you have not yet switched to using the equivalent <<faceting.adoc#,`facet.range`>> functionality you must do so now before upgrading.
+The `facet.date` parameter (and associated `facet.date.*` parameters) that were deprecated in Solr 3.x have been removed completely.
+If you have not yet switched to using the equivalent xref:query-guide:faceting.adoc[`facet.range`] functionality you must do so now before upgrading.
diff --git a/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-7.adoc b/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-7.adoc
index 9d4dfe3..4293deb 100644
--- a/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-7.adoc
+++ b/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-7.adoc
@@ -19,28 +19,40 @@
 Solr 7 is a major new release of Solr which introduces new features and a number of other changes that may impact your existing installation.
 
 == Upgrade Planning
-There are major changes in Solr 7 to consider before starting to migrate your configurations and indexes. This page is designed to highlight the biggest changes - new features you may want to be aware of, but also changes in default behavior and deprecated features that have been removed.
+There are major changes in Solr 7 to consider before starting to migrate your configurations and indexes.
+This page is designed to highlight the biggest changes - new features you may want to be aware of, but also changes in default behavior and deprecated features that have been removed.
 
-There are many hundreds of changes in Solr 7, however, so a thorough review of the <<solr-upgrade-notes.adoc#,Solr Upgrade Notes>> as well as the {solr-javadocs}/changes//Changes.html[CHANGES.txt] file in your Solr instance will help you plan your migration to Solr 7. This section attempts to highlight some of the major changes you should be aware of.
+There are many hundreds of changes in Solr 7, however, so a thorough review of the xref:solr-upgrade-notes.adoc[] as well as the {solr-javadocs}/changes//Changes.html[CHANGES.txt] file in your Solr instance will help you plan your migration to Solr 7.
+This section attempts to highlight some of the major changes you should be aware of.
 
-You should also consider all changes that have been made to Solr in any version you have not upgraded to already. For example, if you are currently using Solr 6.2, you should review changes made in all subsequent 6.x releases in addition to changes for 7.0.
+You should also consider all changes that have been made to Solr in any version you have not upgraded to already.
+For example, if you are currently using Solr 6.2, you should review changes made in all subsequent 6.x releases in addition to changes for 7.0.
 
-<<reindexing.adoc#upgrades,Reindexing>> your data is considered the best practice and you should try to do so if possible. However, if reindexing is not feasible, keep in mind you can only upgrade one major version at a time. Thus, Solr 6.x indexes will be compatible with Solr 7 but Solr 5.x indexes will not be.
+xref:indexing-guide:reindexing.adoc#upgrades[Reindexing] your data is considered the best practice and you should try to do so if possible.
+However, if reindexing is not feasible, keep in mind you can only upgrade one major version at a time.
+Thus, Solr 6.x indexes will be compatible with Solr 7 but Solr 5.x indexes will not be.
 
-If you do not reindex now, keep in mind that you will need to either reindex your data or upgrade your indexes before you will be able to move to Solr 8 when it is released in the future. See the section <<indexupgrader-tool.adoc#,IndexUpgrader Tool>> for more details on how to upgrade your indexes.
+If you do not reindex now, keep in mind that you will need to either reindex your data or upgrade your indexes before you will be able to move to Solr 8 when it is released in the future.
+See the section xref:deployment-guide:indexupgrader-tool.adoc[] for more details on how to upgrade your indexes.
 
-See also the section <<upgrading-a-solr-cluster.adoc#,Upgrading a Solr Cluster>> for details on how to upgrade a SolrCloud cluster.
+See also the section xref:deployment-guide:upgrading-a-solr-cluster.adoc[] for details on how to upgrade a SolrCloud cluster.
 
 == New Features & Enhancements
 
 === Replication Modes
-Until Solr 7, the SolrCloud model for replicas has been to allow any replica to become a leader when a leader is lost. This is highly effective for most users, providing reliable failover in case of issues in the cluster. However, it comes at a cost in large clusters because all replicas must be in sync at all times.
+Until Solr 7, the SolrCloud model for replicas has been to allow any replica to become a leader when a leader is lost.
+This is highly effective for most users, providing reliable failover in case of issues in the cluster.
+However, it comes at a cost in large clusters because all replicas must be in sync at all times.
 
-To provide additional flexibility, two new types of replicas have been added, named TLOG & PULL. These new types provide options to have replicas which only sync with the leader by copying index segments from the leader. The TLOG type has an additional benefit of maintaining a transaction log (the "tlog" of its name), which would allow it to recover and become a leader if necessary; the PULL type does not maintain a transaction log, so cannot become a leader.
+To provide additional flexibility, two new types of replicas have been added, named TLOG & PULL.
+These new types provide options to have replicas which only sync with the leader by copying index segments from the leader.
+The TLOG type has an additional benefit of maintaining a transaction log (the "tlog" of its name), which would allow it to recover and become a leader if necessary; the PULL type does not maintain a transaction log, so cannot become a leader.
 
-As part of this change, the traditional type of replica is now named NRT. If you do not explicitly define a number of TLOG or PULL replicas, Solr defaults to creating NRT replicas. If this model is working for you, you will not have to change anything.
+As part of this change, the traditional type of replica is now named NRT.
+If you do not explicitly define a number of TLOG or PULL replicas, Solr defaults to creating NRT replicas.
+If this model is working for you, you will not have to change anything.
 
-See the section <<solrcloud-shards-indexing.adoc#types-of-replicas,Types of Replicas>> for more details on the new replica modes, and how define the replica type in your cluster.
+See the section xref:deployment-guide:solrcloud-shards-indexing.adoc#types-of-replicas[Types of Replicas] for more details on the new replica modes, and how define the replica type in your cluster.
 
 === Autoscaling
 Solr autoscaling is a new suite of features in Solr to make managing a SolrCloud cluster easier and more automated.
@@ -49,23 +61,25 @@ At its core, Solr autoscaling provides users with a rule syntax to define prefer
 
 === Other Features & Enhancements
 
-* The <<analytics.adoc#,Analytics Component>> has been refactored.
+* The xref:query-guide:analytics.adoc[] has been refactored.
 
 * There were several other new features released in earlier 6.x releases, which you may have missed:
-** <<learning-to-rank.adoc#,Learning to Rank>>
-** <<highlighting.adoc#unified-highlighter,Unified Highlighter>>
-** <<metrics-reporting.adoc#,Metrics API>>. See also information about related deprecations in the section <<JMX Support and MBeans>> below.
-** <<other-parsers.adoc#payload-query-parsers,Payload queries>>
-** <<stream-evaluator-reference.adoc#,Streaming Evaluators>>
-** <<v2-api.adoc#,/v2 API>>
-** <<graph-traversal.adoc#,Graph streaming expressions>>
+** xref:query-guide:learning-to-rank.adoc[]
+** xref:query-guide:highlighting.adoc#unified-highlighter[Unified Highlighter]
+** xref:deployment-guide:metrics-reporting.adoc[Metrics API].
+See also information about related deprecations in the section <<JMX Support and MBeans>> below.
+** xref:query-guide:other-parsers.adoc#payload-query-parsers[Payload queries]
+** xref:query-guide:stream-evaluator-reference.adoc[Streaming Evaluators]
+** xref:configuration-guide:v2-api.adoc[v2 API]
+** xref:query-guide:graph-traversal.adoc[Graph streaming expressions]
 
 == Configuration and Default Changes
 
 === New Default Configset
 Several changes have been made to configsets that ship with Solr; not only their content but how Solr behaves in regard to them:
 
-* The `data_driven_configset` and `basic_configset` have been removed, and replaced by the `_default` configset. The `sample_techproducts_configset` also remains, and is designed for use with the example documents shipped with Solr in the `example/exampledocs` directory.
+* The `data_driven_configset` and `basic_configset` have been removed, and replaced by the `_default` configset.
+The `sample_techproducts_configset` also remains, and is designed for use with the example documents shipped with Solr in the `example/exampledocs` directory.
 * When creating a new collection, if you do not specify a configset, the `_default` will be used.
 ** If you use SolrCloud, the `_default` configset will be automatically uploaded to ZooKeeper.
 ** If you run a user-managed cluster or a single-node installation, the instanceDir will be created automatically, using the `_default` configset as its basis.
@@ -74,14 +88,20 @@ Several changes have been made to configsets that ship with Solr; not only their
 
 To improve the functionality of Schemaless Mode, Solr now behaves differently when it detects that data in an incoming field should have a text-based field type.
 
-* Incoming fields will be indexed as `text_general` by default (you can change this). The name of the field will be the same as the field name defined in the document.
-* A copy field rule will be inserted into your schema to copy the new `text_general` field to a new field with the name `<name>_str`. This field's type will be a `strings` field (to allow for multiple values). The first 256 characters of the text field will be inserted to the new `strings` field.
+* Incoming fields will be indexed as `text_general` by default (you can change this).
+The name of the field will be the same as the field name defined in the document.
+* A copy field rule will be inserted into your schema to copy the new `text_general` field to a new field with the name `<name>_str`.
+This field's type will be a `strings` field (to allow for multiple values).
+The first 256 characters of the text field will be inserted to the new `strings` field.
 
-This behavior can be customized if you wish to remove the copy field rule, or to change the number of characters inserted to the string field, or the field type used. See the section <<schemaless-mode.adoc#,Schemaless Mode>> for details.
+This behavior can be customized if you wish to remove the copy field rule, or to change the number of characters inserted to the string field, or the field type used.
+See the section xref:indexing-guide:schemaless-mode.adoc[] for details.
 
-TIP: Because copy field rules can slow indexing and increase index size, it's recommended you only use copy fields when you need to. If you do not need to sort or facet on a field, you should remove the automatically-generated copy field rule.
+TIP: Because copy field rules can slow indexing and increase index size, it's recommended you only use copy fields when you need to.
+If you do not need to sort or facet on a field, you should remove the automatically-generated copy field rule.
 
-Automatic field creation can be disabled with the `update.autoCreateFields` property. To do this, you can use the Config API with a command such as:
+Automatic field creation can be disabled with the `update.autoCreateFields` property.
+To do this, you can use the Config API with a command such as:
 
 [.dynamic-tabs]
 --
@@ -105,28 +125,40 @@ curl http://host:8983/api/collections/mycollection/config -d '{"set-user-propert
 --
 
 === Changes to Default Behaviors
-* JSON is now the default response format. If you rely on XML responses, you must now define `wt=xml` in your request. In addition, line indentation is enabled by default (`indent=on`).
-* The `sow` parameter (short for "Split on Whitespace") now defaults to `false`, which allows support for multi-word synonyms out of the box. This parameter is used with the eDisMax and standard/"lucene" query parsers. If this parameter is not explicitly specified as `true`, query text will not be split on whitespace before analysis.
-* The `legacyCloud` parameter now defaults to `false`. If an entry for a replica does not exist in `state.json`, that replica will not get registered.
+* JSON is now the default response format.
+If you rely on XML responses, you must now define `wt=xml` in your request.
+In addition, line indentation is enabled by default (`indent=on`).
+* The `sow` parameter (short for "Split on Whitespace") now defaults to `false`, which allows support for multi-word synonyms out of the box.
+This parameter is used with the eDisMax and standard/"lucene" query parsers.
+If this parameter is not explicitly specified as `true`, query text will not be split on whitespace before analysis.
+* The `legacyCloud` parameter now defaults to `false`.
+If an entry for a replica does not exist in `state.json`, that replica will not get registered.
 +
-This may affect users who bring up replicas and they are automatically registered as a part of a shard. It is possible to fall back to the old behavior by setting the property `legacyCloud=true`, in the cluster properties using the following command:
+This may affect users who bring up replicas and they are automatically registered as a part of a shard.
+It is possible to fall back to the old behavior by setting the property `legacyCloud=true`, in the cluster properties using the following command:
 +
 `./server/scripts/cloud-scripts/zkcli.sh -zkhost 127.0.0.1:2181  -cmd clusterprop -name legacyCloud -val true`
-* The eDisMax query parser parameter `lowercaseOperators` now defaults to `false` if the `luceneMatchVersion` in `solrconfig.xml` is 7.0.0 or above. Behavior for `luceneMatchVersion` lower than 7.0.0 is unchanged (so, `true`). This means that clients must sent boolean operators (such as AND, OR and NOT) in upper case in order to be recognized, or you must explicitly set this parameter to `true`.
-* The `handleSelect` parameter in `solrconfig.xml` now defaults to `false` if the `luceneMatchVersion` is 7.0.0 or above. This causes Solr to ignore the `qt` parameter if it is present in a request. If you have request handlers without a leading '/', you can set `handleSelect="true"` or consider migrating your configuration.
+* The eDisMax query parser parameter `lowercaseOperators` now defaults to `false` if the `luceneMatchVersion` in `solrconfig.xml` is 7.0.0 or above.
+Behavior for `luceneMatchVersion` lower than 7.0.0 is unchanged (so, `true`).
+This means that clients must sent boolean operators (such as AND, OR and NOT) in upper case in order to be recognized, or you must explicitly set this parameter to `true`.
+* The `handleSelect` parameter in `solrconfig.xml` now defaults to `false` if the `luceneMatchVersion` is 7.0.0 or above.
+This causes Solr to ignore the `qt` parameter if it is present in a request.
+If you have request handlers without a leading '/', you can set `handleSelect="true"` or consider migrating your configuration.
 +
 The `qt` parameter is still used as a SolrJ special parameter that specifies the request handler (tail URL path) to use.
-* The `lucenePlusSort` query parser (aka the "Old Lucene Query Parser") has been deprecated and is no longer implicitly defined. If you wish to continue using this parser until Solr 8 (when it will be removed), you must register it in your `solrconfig.xml`, as in: `<queryParser name="lucenePlusSort" class="solr.OldLuceneQParserPlugin"/>`.
+* The `lucenePlusSort` query parser (aka the "Old Lucene Query Parser") has been deprecated and is no longer implicitly defined.
+If you wish to continue using this parser until Solr 8 (when it will be removed), you must register it in your `solrconfig.xml`, as in: `<queryParser name="lucenePlusSort" class="solr.OldLuceneQParserPlugin"/>`.
 * The name of `TemplateUpdateRequestProcessorFactory` is changed to `template` from `Template` and the name of `AtomicUpdateProcessorFactory` is changed to `atomic` from `Atomic`
 ** Also, `TemplateUpdateRequestProcessorFactory` now uses `{}` instead of `${}` for `template`.
 
-
 == Deprecations and Removed Features
 
 === Point Fields Are Default Numeric Types
-Solr has implemented \*PointField types across the board, to replace Trie* based numeric fields. All Trie* fields are now considered deprecated, and will be removed in Solr 8.
+Solr has implemented \*PointField types across the board, to replace Trie* based numeric fields.
+All Trie* fields are now considered deprecated, and will be removed in Solr 8.
 
-If you are using Trie* fields in your schema, you should consider moving to PointFields as soon as feasible. Changing to the new PointField types will require you to reindex your data.
+If you are using Trie* fields in your schema, you should consider moving to PointFields as soon as feasible.
+Changing to the new PointField types will require you to reindex your data.
 
 === Spatial Fields
 
@@ -143,50 +175,70 @@ Choose one of these field types instead:
 * `SpatialRecursivePrefixTreeField`
 * `RptWithGeometrySpatialField`
 
-See the section <<spatial-search.adoc#,Spatial Search>> for more information.
+See the section xref:query-guide:spatial-search.adoc[] for more information.
 
 === JMX Support and MBeans
 * The `<jmx>` element in `solrconfig.xml` has been removed in favor of `<metrics><reporter>` elements defined in `solr.xml`.
 +
-Limited back-compatibility is offered by automatically adding a default instance of `SolrJmxReporter` if it's missing AND when a local MBean server is found. A local MBean server can be activated either via `ENABLE_REMOTE_JMX_OPTS` in `solr.in.sh` or via system properties, e.g., `-Dcom.sun.management.jmxremote`. This default instance exports all Solr metrics from all registries as hierarchical MBeans.
+Limited back-compatibility is offered by automatically adding a default instance of `SolrJmxReporter` if it's missing AND when a local MBean server is found.
+A local MBean server can be activated either via `ENABLE_REMOTE_JMX_OPTS` in `solr.in.sh` or via system properties, e.g., `-Dcom.sun.management.jmxremote`.
+This default instance exports all Solr metrics from all registries as hierarchical MBeans.
 +
-This behavior can be also disabled by specifying a `SolrJmxReporter` configuration with a boolean init argument `enabled` set to `false`. For a more fine-grained control users should explicitly specify at least one `SolrJmxReporter` configuration.
+This behavior can be also disabled by specifying a `SolrJmxReporter` configuration with a boolean init argument `enabled` set to `false`.
+For a more fine-grained control users should explicitly specify at least one `SolrJmxReporter` configuration.
 +
-See also the section <<metrics-reporting.adoc#the-metrics-reporters-element,The <metrics><reporters> Element>>, which describes how to set up Metrics Reporters in `solr.xml`. Note that back-compatibility support may be removed in Solr 8.
+See also the section xref:deployment-guide:metrics-reporting.adoc#the-metrics-reporters-element[The <metrics><reporters> Element], which describes how to set up Metrics Reporters in `solr.xml`.
+Note that back-compatibility support may be removed in Solr 8.
 
-* MBean names and attributes now follow the hierarchical names used in metrics. This is reflected also in `/admin/mbeans` and `/admin/plugins` output, and can be observed in the UI Plugins tab, because now all these APIs get their data from the metrics API. The old (mostly flat) JMX view has been removed.
+* MBean names and attributes now follow the hierarchical names used in metrics. This is reflected also in `/admin/mbeans` and `/admin/plugins` output, and can be observed in the UI Plugins tab, because now all these APIs get their data from the metrics API.
+The old (mostly flat) JMX view has been removed.
 
 === SolrJ
 The following changes were made in SolrJ.
 
 * `HttpClientInterceptorPlugin` is now `HttpClientBuilderPlugin` and must work with a `SolrHttpClientBuilder` rather than an `HttpClientConfigurer`.
-* `HttpClientUtil` now allows configuring `HttpClient` instances via `SolrHttpClientBuilder` rather than an `HttpClientConfigurer`. Use of env variable `SOLR_AUTHENTICATION_CLIENT_CONFIGURER` no longer works, please use `SOLR_AUTHENTICATION_CLIENT_BUILDER`
-* `SolrClient` implementations now use their own internal configuration for socket timeouts, connect timeouts, and allowing redirects rather than what is set as the default when building the `HttpClient` instance. Use the appropriate setters on the `SolrClient` instance.
+* `HttpClientUtil` now allows configuring `HttpClient` instances via `SolrHttpClientBuilder` rather than an `HttpClientConfigurer`.
+Use of env variable `SOLR_AUTHENTICATION_CLIENT_CONFIGURER` no longer works, please use `SOLR_AUTHENTICATION_CLIENT_BUILDER`
+* `SolrClient` implementations now use their own internal configuration for socket timeouts, connect timeouts, and allowing redirects rather than what is set as the default when building the `HttpClient` instance.
+Use the appropriate setters on the `SolrClient` instance.
 * `HttpSolrClient#setAllowCompression` has been removed and compression must be enabled as a constructor parameter.
-* `HttpSolrClient#setDefaultMaxConnectionsPerHost` and `HttpSolrClient#setMaxTotalConnections` have been removed. These now default very high and can only be changed via parameter when creating an HttpClient instance.
+* `HttpSolrClient#setDefaultMaxConnectionsPerHost` and `HttpSolrClient#setMaxTotalConnections` have been removed.
+These now default very high and can only be changed via parameter when creating an HttpClient instance.
 
 === Other Deprecations and Removals
-* The `defaultOperator` parameter in the schema is no longer supported. Use the `q.op` parameter instead. This option had been deprecated for several releases. See the section <<standard-query-parser.adoc#standard-query-parser-parameters,Standard Query Parser Parameters>> for more information.
-* The `defaultSearchField` parameter in the schema is no longer supported. Use the `df` parameter instead. This option had been deprecated for several releases. See the section <<standard-query-parser.adoc#standard-query-parser-parameters,Standard Query Parser Parameters>> for more information.
-* The `mergePolicy`, `mergeFactor` and `maxMergeDocs` parameters have been removed and are no longer supported. You should define a `mergePolicyFactory` instead. See the section <<index-segments-merging.adoc#mergepolicyfactory,the mergePolicyFactory>> for more information.
-* The PostingsSolrHighlighter has been deprecated. It's recommended that you move to using the UnifiedHighlighter instead. See the section <<highlighting.adoc#unified-highlighter,Unified Highlighter>> for more information about this highlighter.
-* Index-time boosts have been removed from Lucene, and are no longer available from Solr. If any boosts are provided, they will be ignored by the indexing chain. As a replacement, index-time scoring factors should be indexed in a separate field and combined with the query score using a function query. See the section <<function-queries.adoc#,Function Queries>> for more information.
-* The `StandardRequestHandler` is deprecated. Use `SearchHandler` instead.
-* To improve parameter consistency in the Collections API, the parameter names `fromNode` for the MOVEREPLICA command and `source`, `target` for the REPLACENODE command have been deprecated and replaced with `sourceNode` and `targetNode` instead. The old names will continue to work for back-compatibility but they will be removed in Solr 8.
+* The `defaultOperator` parameter in the schema is no longer supported. Use the `q.op` parameter instead. This option had been deprecated for several releases. See the section xref:query-guide:standard-query-parser.adoc#standard-query-parser-parameters[Standard Query Parser Parameters] for more information.
+* The `defaultSearchField` parameter in the schema is no longer supported.
+Use the `df` parameter instead. This option had been deprecated for several releases.
+See the section xref:query-guide:standard-query-parser.adoc#standard-query-parser-parameters[Standard Query Parser Parameters] for more information.
+* The `mergePolicy`, `mergeFactor` and `maxMergeDocs` parameters have been removed and are no longer supported.
+You should define a `mergePolicyFactory` instead. See the section xref:configuration-guide:index-segments-merging.adoc#mergepolicyfactory[mergePolicyFactory] for more information.
+* The PostingsSolrHighlighter has been deprecated. It's recommended that you move to using the UnifiedHighlighter instead.
+See the section xref:query-guide:highlighting.adoc#unified-highlighter[Unified Highlighter] for more information about this highlighter.
+* Index-time boosts have been removed from Lucene, and are no longer available from Solr.
+If any boosts are provided, they will be ignored by the indexing chain.
+As a replacement, index-time scoring factors should be indexed in a separate field and combined with the query score using a function query.
+See the section xref:query-guide:function-queries.adoc[] for more information.
+* The `StandardRequestHandler` is deprecated.
+Use `SearchHandler` instead.
+* To improve parameter consistency in the Collections API, the parameter names `fromNode` for the MOVEREPLICA command and `source`, `target` for the REPLACENODE command have been deprecated and replaced with `sourceNode` and `targetNode` instead.
+The old names will continue to work for back-compatibility but they will be removed in Solr 8.
 * The unused `valType` option has been removed from ExternalFileField, if you have this in your schema you can safely remove it.
 
 == Major Changes in Earlier 6.x Versions
-The following summary of changes in earlier 6.x releases highlights significant changes released between Solr 6.0 and 6.6 that were listed in earlier versions of this Guide. Mentions of deprecations are likely superseded by removal in Solr 7, as noted in the above sections.
+The following summary of changes in earlier 6.x releases highlights significant changes released between Solr 6.0 and 6.6 that were listed in earlier versions of this Guide.
+Mentions of deprecations are likely superseded by removal in Solr 7, as noted in the above sections.
 
 Note again that this is not a complete list of all changes that may impact your installation, so a thorough review of CHANGES.txt is highly recommended if upgrading from any version earlier than 6.6.
 
 * The Solr contribs map-reduce, morphlines-core and morphlines-cell have been removed.
 * JSON Facet API now uses hyper-log-log for numBuckets cardinality calculation and calculates cardinality before filtering buckets by any `mincount` greater than 1.
 * If you use historical dates, specifically on or before the year 1582, you should reindex for better date handling.
-* If you use the JSON Facet API (json.facet) with `method=stream`, you must now set `sort='index asc'` to get the streaming behavior; otherwise it won't stream. Reminder: `method` is a hint that doesn't change defaults of other parameters.
+* If you use the JSON Facet API (json.facet) with `method=stream`, you must now set `sort='index asc'` to get the streaming behavior; otherwise it won't stream.
+Reminder: `method` is a hint that doesn't change defaults of other parameters.
 * If you use the JSON Facet API (json.facet) to facet on a numeric field and if you use `mincount=0` or if you set the prefix, you will now get an error as these options are incompatible with numeric faceting.
 * Solr's logging verbosity at the INFO level has been greatly reduced, and you may need to update the log configs to use the DEBUG level to see all the logging messages you used to see at INFO level before.
-* We are no longer backing up `solr.log` and `solr_gc.log` files in date-stamped copies forever. If you relied on the `solr_log_<date>` or `solr_gc_log_<date>` being in the logs folder that will no longer be the case. See the section <<configuring-logging.adoc#,Configuring Logging>> for details on how log rotation works as of Solr 6.3.
+* We are no longer backing up `solr.log` and `solr_gc.log` files in date-stamped copies forever. If you relied on the `solr_log_<date>` or `solr_gc_log_<date>` being in the logs folder that will no longer be the case.
+See the section xref:deployment-guide:configuring-logging.adoc[] for details on how log rotation works as of Solr 6.3.
 * The create/deleteCollection methods on `MiniSolrCloudCluster` have been deprecated. Clients should instead use the `CollectionAdminRequest` API. In addition, `MiniSolrCloudCluster#uploadConfigDir(File, String)` has been deprecated in favour of `#uploadConfigSet(Path, String)`.
 * The `bin/solr.in.sh` (`bin/solr.in.cmd` on Windows) is now completely commented by default. Previously, this wasn't so, which had the effect of masking existing environment variables.
 * The `\_version_` field is no longer indexed and is now defined with `indexed=false` by default, because the field has DocValues enabled.
@@ -196,7 +248,7 @@ Note again that this is not a complete list of all changes that may impact your
 ** The metrics "75thPctlRequestTime", "95thPctlRequestTime", "99thPctlRequestTime" and "999thPctlRequestTime" in Overseer Status API have been renamed to "75thPcRequestTime", "95thPcRequestTime" and so on for consistency with stats output in other parts of Solr.
 ** The metrics "avgRequestsPerMinute", "5minRateRequestsPerMinute" and "15minRateRequestsPerMinute" have been replaced by corresponding per-second rates viz. "avgRequestsPerSecond", "5minRateRequestsPerSecond" and "15minRateRequestsPerSecond" for consistency with stats output in other parts of Solr.
 * A new highlighter named UnifiedHighlighter has been added. You are encouraged to try out the UnifiedHighlighter by setting `hl.method=unified` and report feedback. It's more efficient/faster than the other highlighters, especially compared to the original Highlighter. See `HighlightParams.java` for a listing of highlight parameters annotated with which highlighters use them. `hl.useFastVectorHighlighter` is now considered deprecated in lieu of `hl.method=fastVector`.
-* The <<caches-warming.adoc#,`maxWarmingSearchers` parameter>> now defaults to 1, and more importantly commits will now block if this limit is exceeded instead of throwing an exception (a good thing). Consequently there is no longer a risk in overlapping commits. Nonetheless users should continue to avoid excessive committing. Users are advised to remove any pre-existing `maxWarmingSearchers` entries from their `solrconfig.xml` files.
-* The <<other-parsers.adoc#complex-phrase-query-parser,Complex Phrase query parser>> now supports leading wildcards. Beware of its possible heaviness, users are encouraged to use ReversedWildcardFilter in index time analysis.
+* The xref:configuration-guide:caches-warming.adoc[`maxWarmingSearchers` parameter] now defaults to 1, and more importantly commits will now block if this limit is exceeded instead of throwing an exception (a good thing). Consequently there is no longer a risk in overlapping commits. Nonetheless users should continue to avoid excessive committing. Users are advised to remove any pre-existing `maxWarmingSearchers` entries from their `solrconfig.xml` files.
+* The xref:query-guide:other-parsers.adoc#complex-phrase-query-parser[Complex Phrase query parser] now supports leading wildcards. Beware of its possible heaviness, users are encouraged to use ReversedWildcardFilter in index time analysis.
 * The JMX metric "avgTimePerRequest" (and the corresponding metric in the metrics API for each handler) used to be a simple non-decaying average based on total cumulative time and the number of requests. The Codahale Metrics implementation applies exponential decay to this value, which heavily biases the average towards the last 5 minutes.
-* Parallel SQL now uses Apache Calcite as its SQL framework. As part of this change the default aggregation mode has been changed to `facet` rather than `map_reduce`. There have also been changes to the SQL aggregate response and some SQL syntax changes. Consult the <<sql-query.adoc#,SQL Query>> documentation for full details.
+* Parallel SQL now uses Apache Calcite as its SQL framework. As part of this change the default aggregation mode has been changed to `facet` rather than `map_reduce`. There have also been changes to the SQL aggregate response and some SQL syntax changes. Consult the xref:query-guide:sql-query.adoc[] documentation for full details.
diff --git a/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-8.adoc b/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-8.adoc
index e343e5e..99fade5 100644
--- a/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-8.adoc
+++ b/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-8.adoc
@@ -52,7 +52,7 @@ When using this parameter internal requests are sent by using HTTP/1.1.
 ./bin/solr start -c -Dsolr.http1=true -z localhost:2481/solr -s /path/to/solr/home
 ----
 +
-Note the above command *must* be customized for your environment. The section <<solr-control-script-reference.adoc#,Solr Control Script Reference>> has all the possible options. If you are running Solr as a service, you may prefer to review the section <<upgrading-a-solr-cluster.adoc#,Upgrading a Solr Cluster>>.
+Note the above command *must* be customized for your environment. The section xref:deployment-guide:solr-control-script-reference.adoc[] has all the possible options. If you are running Solr as a service, you may prefer to review the section xref:deployment-guide:upgrading-a-solr-cluster.adoc[].
 
 . When all nodes have been upgraded to 8.0, restart each one without the `-Dsolr.http1` parameter.
 
@@ -60,7 +60,7 @@ Note the above command *must* be customized for your environment. The section <<
 
 It is always strongly recommended that you fully reindex your documents after a major version upgrade.
 
-Solr has a new section of the Reference Guide, <<reindexing.adoc#,Reindexing>> which covers several strategies for how to reindex.
+Solr has a new section of the Reference Guide, xref:indexing-guide:reindexing.adoc[] which covers several strategies for how to reindex.
 
 [#new-features-8]
 == New Features & Enhancements
@@ -135,8 +135,8 @@ then you must do so with a delete-by-query technique.
 * Solr has a new field in the `\_default` configset, called `_nest_path_`. This field stores the path of the document
 in the hierarchy for non-root documents.
 
-See the sections <<indexing-nested-documents.adoc#,Indexing Nested Documents>> and
-<<searching-nested-documents.adoc#,Searching Nested Documents>> for more information
+See the sections xref:indexing-guide:indexing-nested-documents.adoc[] and
+xref:query-guide:searching-nested-documents.adoc[] for more information
 and configuration details.
 
 [#config-changes-8]
@@ -153,15 +153,15 @@ The following changes impact how fields behave.
 Note that if you have not specified any similarityFactory in the schema, or use the default
 `SchemaSimilarityFactory`, then `LegacyBM25Similarity` is automatically selected when the value for `luceneMatchVersion` is lower than `8.0.0`.
 +
-See also the section <<schema-elements.adoc#similarity,Similarity>> for more information.
+See also the section xref:indexing-guide:schema-elements.adoc#similarity[Similarity] for more information.
 
 *Memory Codecs Removed*
 
 * Memory codecs have been removed from Lucene (`MemoryPostings`, `MemoryDocValues`) and are no longer available in Solr.
 If you used `postingsFormat="Memory"` or `docValuesFormat="Memory"` on any field or field type configuration then either remove that setting to use the default or experiment with one of the other options.
 +
-For more information on defining a codec, see the section <<codec-factory.adoc#,Codec Factory>>;
-for more information on field properties, see the section <<field-type-definitions-and-properties.adoc#, Field Type Definitions and Properties>>.
+For more information on defining a codec, see the section xref:configuration-guide:codec-factory.adoc[];
+for more information on field properties, see the section xref:indexing-guide:field-type-definitions-and-properties.adoc[].
 
 *LowerCaseTokenizer*
 
@@ -177,7 +177,7 @@ The following changes impact how documents are indexed.
 
 *Index-time Boosts*
 
-* Index-time boosts were removed from <<major-changes-in-solr-7.adoc#other-deprecations-and-removals,Lucene in version 7.0>>, and in Solr 7.x the syntax was still allowed (although it logged a warning in the logs). The syntax was similar to:
+* Index-time boosts were removed from xref:major-changes-in-solr-7.adoc#other-deprecations-and-removals[Lucene in version 7.0], and in Solr 7.x the syntax was still allowed (although it logged a warning in the logs). The syntax was similar to:
 +
 [source,json]
 ----
@@ -192,13 +192,13 @@ This syntax has been removed entirely and if sent to Solr it will now produce an
 The pattern language is very similar but not the same.
 Typically, simply update the pattern by changing an uppercase 'Z' to lowercase 'z' and that's it.
 +
-For the current recommended set of patterns in schemaless mode, see the section <<schemaless-mode.adoc#,Schemaless Mode>>, or simply examine the `_default` configset (found in `server/solr/configsets`).
+For the current recommended set of patterns in schemaless mode, see the section xref:indexing-guide:schemaless-mode.adoc[], or simply examine the `_default` configset (found in `server/solr/configsets`).
 +
 Also note that the default set of date patterns (formats) have expanded from previous releases to subsume those patterns previously handled by the "extract" contrib (Solr Cell / Tika).
 
 *Solr Cell*
 
-* The extraction contrib (<<indexing-with-tika.adoc#,Solr Cell>>) no longer does any date parsing, and thus no longer supports the `date.formats` parameter. To ensure date strings are properly parsed, use the `ParseDateFieldUpdateProcessorFactory` in your update chain. This update request processor is found by default with the "parse-date" update processor when running Solr in "<<schemaless-mode.adoc#set-the-default-updaterequestprocessorchain,schemaless mode>>".
+* The extraction contrib xref:indexing-guide:indexing-with-tika.adoc[Solr Cell]) no longer does any date parsing, and thus no longer supports the `date.formats` parameter. To ensure date strings are properly parsed, use the `ParseDateFieldUpdateProcessorFactory` in your update chain. This update request processor is found by default with the "parse-date" update processor when running Solr in xref:indexing-guide:schemaless-mode.adoc#set-the-default-updaterequestprocessorchain[schemaless mode]".
 
 *Langid Contrib*
 
@@ -210,7 +210,7 @@ The following changes impact query behavior.
 
 *Highlighting*
 
-* The Unified Highlighter parameter `hl.weightMatches` now defaults to `true`. See the section <<highlighting.adoc#,Highlighting>> for more information about Highlighter parameters.
+* The Unified Highlighter parameter `hl.weightMatches` now defaults to `true`. See the section xref:query-guide:highlighting.adoc[] for more information about Highlighter parameters.
 
 *eDisMax Query Parser*
 
@@ -218,7 +218,7 @@ The following changes impact query behavior.
 
 *Function Query Parser*
 
-* The <<other-parsers.adoc#function-query-parser,Function Query Parser>> now returns scores that are equal to zero (0) when a negative value is produced. This change is due to the fact that Lucene now requires scores to be positive.
+* The xref:query-guide:other-parsers.adoc#function-query-parser[Function Query Parser] now returns scores that are equal to zero (0) when a negative value is produced. This change is due to the fact that Lucene now requires scores to be positive.
 
 === Authentication & Security Changes in 8.0
 
@@ -277,8 +277,8 @@ When upgrading to Solr 7.7.x, users should be aware of the following major chang
 *Admin UI*
 
 * The Admin UI now presents a login screen for any users with authentication enabled on their cluster.
-Clusters with <<basic-authentication-plugin.adoc#,Basic Authentication>> will prompt users to enter a username and password.
-On clusters configured to use <<kerberos-authentication-plugin.adoc#,Kerberos Authentication>>, authentication is handled transparently by the browser as before, but if authentication fails, users will be directed to configure their browser to provide an appropriate Kerberos ticket.
+Clusters with xref:deployment-guide:basic-authentication-plugin.adoc[Basic Authentication] will prompt users to enter a username and password.
+On clusters configured to use xref:deployment-guide:kerberos-authentication-plugin.adoc[Kerberos Authentication], authentication is handled transparently by the browser as before, but if authentication fails, users will be directed to configure their browser to provide an appropriate Kerberos ticket.
 +
 The login screen's purpose is cosmetic only - Admin UI-triggered Solr requests were subject to authentication prior to 7.7 and still are today.  The login screen changes only the user experience of providing this authentication.
 
@@ -291,7 +291,7 @@ In SolrCloud mode this allow-list is automatically configured to contain all liv
 In a user-managed cluster or a single-node installation the allow-list is empty by default.
 Upgrading users who use the `shards` parameter in these installations can set this value by setting the `shardsWhitelist` property in any `shardHandler` configurations in their `solrconfig.xml` file.
 +
-For more information, see the <<solrcloud-distributed-requests.adoc#configuring-the-shardhandlerfactory,Distributed Request>> documentation.
+For more information, see the xref:deployment-guide:solrcloud-distributed-requests.adoc#configuring-the-shardhandlerfactory[Distributed Request] documentation.
 
 === Solr 7.6
 
@@ -301,7 +301,7 @@ When upgrading to Solr 7.6, users should be aware of the following major changes
 
 *Collections*
 
-* The JSON parameter to set cluster-wide default cluster properties with the <<cluster-node-management.adoc#clusterprop,CLUSTERPROP>> command has changed.
+* The JSON parameter to set cluster-wide default cluster properties with the xref:deployment-guide:cluster-node-management.adoc#clusterprop[CLUSTERPROP] command has changed.
 +
 The old syntax nested the defaults into a property named `clusterDefaults`. The new syntax uses only `defaults`. The command to use is still `set-obj-property`.
 +
@@ -348,7 +348,7 @@ While most users are still encouraged to use the `NRTCachingDirectoryFactory`, w
 +
 For more information about the new directory factory, see the Jira issue https://issues.apache.org/jira/browse/LUCENE-8438[LUCENE-8438].
 +
-For more information about the directory factory configuration in Solr, see the section <<index-location-format.adoc#,DataDir and DirectoryFactory in SolrConfig>>.
+For more information about the directory factory configuration in Solr, see the section xref:configuration-guide:index-location-format.adoc[].
 
 === Solr 7.5
 
@@ -358,12 +358,12 @@ When upgrading to Solr 7.5, users should be aware of the following major changes
 
 *Schema Changes*
 
-* Since Solr 7.0, Solr's schema field-guessing has created `_str` fields for all `_txt` fields, and returned those by default with queries. As of 7.5, `_str` fields will no longer be returned by default. They will still be available and can be requested with the `fl` parameter on queries. See also the section on <<schemaless-mode.adoc#enable-field-class-guessing,field guessing>> for more information about how schema field guessing works.
+* Since Solr 7.0, Solr's schema field-guessing has created `_str` fields for all `_txt` fields, and returned those by default with queries. As of 7.5, `_str` fields will no longer be returned by default. They will still be available and can be requested with the `fl` parameter on queries. See also the section on xref:indexing-guide:schemaless-mode.adoc#enable-field-class-guessing[field guessing] for more information about how schema field guessing works.
 * The Standard Filter, which has been non-operational since at least Solr v4, has been removed.
 
 *Index Merge Policy*
 
-* When using the <<index-segments-merging.adoc#mergepolicyfactory,`TieredMergePolicy`>>, the default merge policy for Solr, `optimize` and `expungeDeletes` now respect the `maxMergedSegmentMB` configuration parameter, which defaults to `5000` (5GB).
+* When using the xref:configuration-guide:index-segments-merging.adoc#mergepolicyfactory[`TieredMergePolicy`], the default merge policy for Solr, `optimize` and `expungeDeletes` now respect the `maxMergedSegmentMB` configuration parameter, which defaults to `5000` (5GB).
 +
 If it is absolutely necessary to control the number of segments present after optimize, specify `maxSegments` as a positive integer. Setting `maxSegments` higher than `1` are honored on a "best effort" basis.
 +
@@ -377,8 +377,7 @@ The `TieredMergePolicy` will also reclaim resources from segments that exceed `m
 
 * Solr's logging configuration file is now located in `server/resources/log4j2.xml` by default.
 
-* A bug for Windows users has been corrected. When using Solr's examples (`bin/solr start -e`) log files will now be put in the correct location (`example/` instead of `server`). See also <<installing-solr.adoc#solr-examples,Solr Examples>> and <<solr-control-script-reference.adoc#,Solr Control Script Reference>> for more information.
-
+* A bug for Windows users has been corrected. When using Solr's examples (`bin/solr start -e`) log files will now be put in the correct location (`example/` instead of `server`). See also xref:deployment-guide:installing-solr.adoc#solr-examples[Solr Examples] and xref:deployment-guide:solr-control-script-reference.adoc[] for more information.
 
 === Solr 7.4
 
@@ -388,13 +387,13 @@ When upgrading to Solr 7.4, users should be aware of the following major changes
 
 *Logging*
 
-* Solr now uses Log4j v2.11. The Log4j configuration is now in `log4j2.xml` rather than `log4j.properties` files. This is a server side change only and clients using SolrJ won't need any changes. Clients can still use any logging implementation which is compatible with SLF4J. We now let Log4j handle rotation of Solr logs at startup, and `bin/solr` start scripts will no longer attempt this nor move existing console or garbage collection logs into `logs/archived` either. See <<configuring- [...]
+* Solr now uses Log4j v2.11. The Log4j configuration is now in `log4j2.xml` rather than `log4j.properties` files. This is a server side change only and clients using SolrJ won't need any changes. Clients can still use any logging implementation which is compatible with SLF4J. We now let Log4j handle rotation of Solr logs at startup, and `bin/solr` start scripts will no longer attempt this nor move existing console or garbage collection logs into `logs/archived` either. See xref:deploymen [...]
 
 * Configuring `slowQueryThresholdMillis` now logs slow requests to a separate file named `solr_slow_requests.log`. Previously they would get logged in the `solr.log` file.
 
 *User-Managed Clusters*
 
-* In the <<user-managed-index-replication.adoc#,leader-follower model>> of scaling Solr, a follower no longer commits an empty index when a completely new index is detected on leader during replication. To return to the previous behavior pass `false` to `skipCommitOnLeaderVersionZero` in the follower section of replication handler configuration, or pass it to the `fetchindex` command.
+* In the xref:deployment-guide:user-managed-index-replication.adoc[leader-follower model] of scaling Solr, a follower no longer commits an empty index when a completely new index is detected on leader during replication. To return to the previous behavior pass `false` to `skipCommitOnLeaderVersionZero` in the follower section of replication handler configuration, or pass it to the `fetchindex` command.
 
 If you are upgrading from a version earlier than Solr 7.3, please see previous version notes below.
 
@@ -410,7 +409,7 @@ When upgrading to Solr 7.3, users should be aware of the following major changes
 
 *Learning to Rank*
 
-* The `rq` parameter used with Learning to Rank `rerank` query parsing no longer considers the `defType` parameter. See <<learning-to-rank.adoc#running-a-rerank-query,Running a Rerank Query>> for more information about this parameter.
+* The `rq` parameter used with Learning to Rank `rerank` query parsing no longer considers the `defType` parameter. See xref:query-guide:learning-to-rank.adoc#running-a-rerank-query[Running a Rerank Query] for more information about this parameter.
 
 *Autoscaling & AutoAddReplicas*
 
@@ -422,7 +421,7 @@ When upgrading to Solr 7.3, users should be aware of the following major changes
 
 *Logging*
 
-* The default Solr log file size and number of backups have been raised to 32MB and 10 respectively. See the section <<configuring-logging.adoc#,Configuring Logging>> for more information about how to configure logging.
+* The default Solr log file size and number of backups have been raised to 32MB and 10 respectively. See the section xref:deployment-guide:configuring-logging.adoc[] for more information about how to configure logging.
 
 *SolrCloud*
 
@@ -430,11 +429,11 @@ When upgrading to Solr 7.3, users should be aware of the following major changes
 +
 This means to upgrade to Solr 8 in the future, you will need to be on Solr 7.3 or higher.
 
-* Replicas which are not up-to-date are no longer allowed to become leader. Use the <<shard-management.adoc#forceleader,FORCELEADER command>> of the Collections API to allow these replicas become leader.
+* Replicas which are not up-to-date are no longer allowed to become leader. Use the xref:deployment-guide:shard-management.adoc#forceleader[FORCELEADER command] of the Collections API to allow these replicas become leader.
 
 *Spatial*
 
-* If you are using the spatial JTS library with Solr, you must upgrade to 1.15.0. This new version of JTS is now dual-licensed to include a BSD style license. See the section on <<spatial-search.adoc#,Spatial Search>> for more information.
+* If you are using the spatial JTS library with Solr, you must upgrade to 1.15.0. This new version of JTS is now dual-licensed to include a BSD style license. See the section on xref:query-guide:spatial-search.adoc[] for more information.
 
 *Highlighting*
 
@@ -450,7 +449,7 @@ When upgrading to Solr 7.2, users should be aware of the following major changes
 
 *Local Params*
 
-* Starting a query string with <<local-params.adoc#,local params>> `{!myparser ...}` is used to switch from one query parser to another, and is intended for use by Solr system developers, not end users doing searches. To reduce negative side-effects of unintended hack-ability, Solr now limits the cases when local params will be parsed to only contexts in which the default parser is "<<standard-query-parser.adoc#,lucene>>" or "<<other-parsers.adoc#function-query-parser,func>>".
+* Starting a query string with xref:query-guide:local-params.adoc[] `{!myparser ...}` is used to switch from one query parser to another, and is intended for use by Solr system developers, not end users doing searches. To reduce negative side-effects of unintended hack-ability, Solr now limits the cases when local params will be parsed to only contexts in which the default parser is xref:query-guide:standard-query-parser.adoc[`lucene`] or xref:query-guide:other-parsers.adoc#function-quer [...]
 +
 So, if `defType=edismax` then `q={!myparser ...}` won't work. In that example, put the desired query parser into the `defType` parameter.
 +
@@ -480,7 +479,7 @@ When upgrading to Solr 7.1, users should be aware of the following major changes
 +
 Existing users of this feature should not have to change anything. However, they should note these changes:
 
-** Behavior: Changing the `autoAddReplicas` property from disabled (`false`) to enabled (`true`) using <<collection-management.adoc#modifycollection,MODIFYCOLLECTION API>> no longer replaces down replicas for the collection immediately. Instead, replicas are only added if a node containing them went down while `autoAddReplicas` was enabled. The parameters `autoReplicaFailoverBadNodeExpiration` and `autoReplicaFailoverWorkLoopDelay` are no longer used.
+** Behavior: Changing the `autoAddReplicas` property from disabled (`false`) to enabled (`true`) using xref:deployment-guide:collection-management.adoc#modifycollection[MODIFYCOLLECTION API] no longer replaces down replicas for the collection immediately. Instead, replicas are only added if a node containing them went down while `autoAddReplicas` was enabled. The parameters `autoReplicaFailoverBadNodeExpiration` and `autoReplicaFailoverWorkLoopDelay` are no longer used.
 ** Deprecations: Enabling/disabling autoAddReplicas cluster-wide with the API will be deprecated; use suspend/resume trigger APIs with `name=".auto_add_replicas"` instead.
 
 *Metrics Reporters*
@@ -507,4 +506,4 @@ See the section in Metrics Reporting: Shard and Cluster Reporters for more infor
 
 * In the XML query parser (`defType=xmlparser` or `{!xmlparser ... }`) the resolving of external entities is now disallowed by default.
 
-If you are upgrading from a version earlier than Solr 7.0, please see <<major-changes-in-solr-7.adoc#,Major Changes in Solr 7>> before starting your upgrade.
+If you are upgrading from a version earlier than Solr 7.0, please see xref:major-changes-in-solr-7.adoc[] before starting your upgrade.
diff --git a/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-9.adoc b/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-9.adoc
index cd8c41e..4d7e0d6 100644
--- a/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-9.adoc
+++ b/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-9.adoc
@@ -44,7 +44,7 @@ Also removes support for cluster property `legacyCloud` (as if always false now)
 Otherwise, SolrJ will not be able to connect to the cluster once it has upgraded to Solr 9.
 Once you have upgraded all Solr clusters that the client is connecting to, you can upgrade the SolrJ client to 9.x.
 
-* If you're using Solr in standalone mode with the <<query-elevation-component.adoc#,Query Elevation Component>> with it's elevation file in the data directory, you'll have to move it to the <<config-sets.adoc#,Configset>> instead.
+* If you're using Solr in standalone mode with the xref:query-guide:query-elevation-component.adoc[] with it's elevation file in the data directory, you'll have to move it to the xref:configuration-guide:config-sets.adoc[Configset] instead.
 The only reason QEC supported the data directory was to support loading its changes on a commit instead of a more expensive core reload.
 That feature now works from the configset dir too.
 SolrCloud doesn't support that but may sometime.
@@ -128,14 +128,14 @@ Any custom response writer extending `TextResponseWriter` will need to implement
 
 === solr.xml maxBooleanClauses now enforced recursively
 
-Lucene 9.0 has additional safety checks over previous versions that impact how the `solr.xml` global `<<configuring-solr-xml#global-maxbooleanclauses,maxBooleanClauses>>` option is enforced.
+Lucene 9.0 has additional safety checks over previous versions that impact how the `solr.xml` global xref:configuration-guide:configuring-solr-xml#global-maxbooleanclauses[`maxBooleanClauses`] option is enforced.
 
 In previous versios of Solr, this option was a hard limit on the number of clauses in any `BooleanQuery` object - but it was only enforced for the _direct_ clauses.
 Starting with Solr 9, this global limit is now also enforced against the total number of clauses in a _nested_ query structure.
 
 Users who upgrade from prior versions of Solr may find that some requests involving complex internal query structures (Example: long query strings using `edismax` with many `qf` and `pf` fields that include query time synonym expansion) which worked in the past now hit this limit and fail.
 
-User's in this situation are advised to consider the complexity f their queries/configuration, and increase the value of `<<configuring-solr-xml#global-maxbooleanclauses,maxBooleanClauses>>` if warranted.
+User's in this situation are advised to consider the complexity f their queries/configuration, and increase the value of xref:configuration-guide:configuring-solr-xml#global-maxbooleanclauses[`maxBooleanClauses`] if warranted.
 
 === Log4J configuration & Solr MDC values
 
@@ -174,7 +174,7 @@ What percentage of them get reported to a tracing server is up to you.
 This change is backward incompatible.
 If you need the pre-9.0 default behavior, you need to explicitly set `blockUnknown:false` in `security.json`.
 
-* The allow-list defining allowed URLs for the `shards` parameter is not in the `shardHandler` configuration anymore. It is defined by the `allowUrls` top-level property of the `solr.xml` file. For more information, see <<configuring-solr-xml.adoc#allow-urls, Format of solr.allowUrls>> documentation.
+* The allow-list defining allowed URLs for the `shards` parameter is not in the `shardHandler` configuration anymore. It is defined by the `allowUrls` top-level property of the `solr.xml` file. For more information, see xref:configuration-guide:configuring-solr-xml.adoc#allow-urls[Format of solr.allowUrls] documentation.
 
 * SOLR-13985: Solr's Jetty now binds to localhost network interface by default for better out of the box security.
 Administrators that need Solr exposed more broadly can change the SOLR_JETTY_HOST property in their Solr include (solr.in.sh/solr.in.cmd) file.
diff --git a/solr/solr-ref-guide/modules/upgrade-notes/pages/solr-upgrade-notes.adoc b/solr/solr-ref-guide/modules/upgrade-notes/pages/solr-upgrade-notes.adoc
index e6b5802..60d2f87 100644
--- a/solr/solr-ref-guide/modules/upgrade-notes/pages/solr-upgrade-notes.adoc
+++ b/solr/solr-ref-guide/modules/upgrade-notes/pages/solr-upgrade-notes.adoc
@@ -60,21 +60,22 @@ The `MultiAuthRuleBasedAuthorizationPlugin` is used when the `MultiAuthPlugin` i
 
 For information on configuring these plugins, see the following sections:
 
-* <<basic-authentication-plugin.adoc#combining-basic-authentication-with-other-schemes,Combining Basic Authentication with Other Schemes>>
-* <<rule-based-authorization-plugin.adoc#multiple-authorization-plugins,Multiple Authorization Plugins>>
+* xref:deployment-guide:basic-authentication-plugin.adoc#combining-basic-authentication-with-other-schemes[Combining Basic Authentication with Other Schemes]
+* xref:deployment-guide:rule-based-authorization-plugin.adoc#multiple-authorization-plugins[Multiple Authorization Plugins]
 
 
 *ZooKeeper chroot*
 
 It's now possible to create the ZooKeeper chroot at startup if it does not already exist.
-See the section <<zookeeper-ensemble.adoc#using-the-z-parameter-with-binsolr,Using the -z Parameter with bin/solr>> for an example.
+See the section xref:deployment-guide:zookeeper-ensemble.adoc#using-the-z-parameter-with-binsolr[Using the -z Parameter with bin/solr] for an example.
 
 *Other Changes*
 
 A few other minor changes are worth noting:
 
-* The `config-read` pre-defined permission now correctly governs access for various configuration-related APIs. See also <<rule-based-authorization-plugin.adoc#predefined-permissions,Predefined Permissions>>.
-* The S3BackupRepository supports configuring the AWS Profile, if necessary. See also <<backup-restore.adoc#s3backuprepository,S3BackupRepository>>.
+* The `config-read` pre-defined permission now correctly governs access for various configuration-related APIs.
+See also xref:deployment-guide:rule-based-authorization-plugin.adoc#predefined-permissions[Predefined Permissions].
+* The S3BackupRepository supports configuring the AWS Profile, if necessary. See also xref:deployment-guide:backup-restore.adoc#s3backuprepository[S3BackupRepository].
 * Additionally, backups will now properly succeed after SPLITSHARD operations, and will correctly handle incremental backup purges.
 * SolrJ now supports uploading configsets.
 
@@ -100,13 +101,13 @@ The designer screen provides a safe environment for you to:
 * Test how schema changes will impact query-time behavior.
 * Save your changes to a configset to use with a new collection.
 
-See the section <<schema-designer.adoc#,Schema Designer>> for full details.
+See the section xref:indexing-guide:schema-designer.adoc[] for full details.
 
 *Backups in S3*
 
 Following the redesign of backups in Solr 8.8 that allowed storage of incremental backups in Google Cloud environments, Solr 8.10 provides support for storing backups in Amazon S3 buckets.
 
-See the section <<backup-restore.adoc#s3backuprepository,S3BackupRepository>> for how to configure.
+See the section xref:deployment-guide:backup-restore.adoc#s3backuprepository[S3BackupRepository] for how to configure.
 
 *Security Admin UI*
 
@@ -115,7 +116,7 @@ Solr's Admin UI also got a new screen to support management of users, roles, and
 The new UI works when authentication and/or authorization has been enabled with `bin/solr auth` or by manually installing a `security.json` file.
 Before this, it provides a warning that your Solr instance is unsecured.
 
-See the section <<security-ui.adoc#,Security UI>> for details.
+See the section xref:deployment-guide:security-ui.adoc[] for details.
 
 *Solr SQL Improvements*
 
@@ -132,12 +133,12 @@ A number of improvements have been made in Solr's SQL functionality:
 A new option for the `shards.preference` parameter allows selection of nodes based on whether or not the replica is a leader.
 Now adding `shards.preference=replica.leader:false` will limit queries only to replicas which are not currently their shard's leader.
 
-See the section <<solrcloud-distributed-requests.adoc#shards-preference-parameter,shards.preference Parameter>> for details and examples.
+See the section xref:deployment-guide:solrcloud-distributed-requests.adoc#shards-preference-parameter[shards.preference Parameter] for details and examples.
 
 *Metrics & Prometheus Exporter*
 
 A new `expr` option in the Metrics API allows for more advanced filtering of metrics based on regular expressions.
-See the section <<metrics-reporting.adoc#metrics-api,Metrics API>> for examples.
+See the section xref:deployment-guide:metrics-reporting.adoc#metrics-api[Metrics API] for examples.
 
 The Prometheus Exporter's default `solr-exporter.config` has been improved to use the new `expr` option in the Metrics API to get a smaller set of metrics.
 The default metrics exported still include most metrics, but the configuration will be easier to trim as needed.
@@ -146,7 +147,7 @@ This should help provide performance improvements in busy clusters being monitor
 *ZooKeeper Credentials*
 
 ZooKeeper credentials can now be stored in a file whose location is defined with a system property instead of being passed in plain-text.
-See <<zookeeper-access-control.adoc#out-of-the-box-credential-implementations,Out of the Box Credential Implementations>> for how to set this up.
+See xref:deployment-guide:zookeeper-access-control.adoc#out-of-the-box-credential-implementations[Out of the Box Credential Implementations] for how to set this up.
 
 === Solr 8.9
 
diff --git a/solr/solr-ref-guide/modules/upgrade-notes/upgrade-nav.adoc b/solr/solr-ref-guide/modules/upgrade-notes/upgrade-nav.adoc
index 844b76a..3ee8638 100644
--- a/solr/solr-ref-guide/modules/upgrade-notes/upgrade-nav.adoc
+++ b/solr/solr-ref-guide/modules/upgrade-notes/upgrade-nav.adoc
@@ -1,4 +1,3 @@
-.Solr Upgrade Notes
 * xref:solr-upgrade-notes.adoc[]
 ** xref:major-changes-in-solr-9.adoc[]
 ** xref:major-changes-in-solr-8.adoc[]
diff --git a/solr/solr-ref-guide/package-lock.json b/solr/solr-ref-guide/package-lock.json
index 7e2dfa5..546befa 100644
--- a/solr/solr-ref-guide/package-lock.json
+++ b/solr/solr-ref-guide/package-lock.json
@@ -328,9 +328,9 @@
           "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA=="
         },
         "sonic-boom": {
-          "version": "2.3.1",
-          "resolved": "https://registry.npmjs.org/sonic-boom/-/sonic-boom-2.3.1.tgz",
-          "integrity": "sha512-o0vJPsRiCW5Q0EmRKjNiiYGy2DqSXcxk4mY9vIBSPwmkH/e/vJ2Tq8EECd5NTiO77x8vlVN+ykDjRQJTqf7eKg==",
+          "version": "2.3.2",
+          "resolved": "https://registry.npmjs.org/sonic-boom/-/sonic-boom-2.3.2.tgz",
+          "integrity": "sha512-qr2XGoMP23j85LfqOMxBEunlJ5P66hhy+3CLLTFtoLHYJuG89Qs+zUZC+d4xH1Jd4lfT7IkGaNa1z5kJt15fjA==",
           "requires": {
             "atomic-sleep": "^1.0.0"
           }

[solr] 02/04: Fix all refs in the query guide + move 'pure nav' pages aside

Posted by ct...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ctargett pushed a commit to branch jira/solr-15556-antora
in repository https://gitbox.apache.org/repos/asf/solr.git

commit 3c360f0d25137feb7262b0e1a319782dae465c10
Author: Cassandra Targett <ct...@apache.org>
AuthorDate: Fri Nov 26 21:17:13 2021 -0600

    Fix all refs in the query guide + move 'pure nav' pages aside
---
 solr/solr-ref-guide/antora.yml                     |   1 +
 .../pages/analytics-expression-sources.adoc        |   8 +-
 .../pages/analytics-mapping-functions.adoc         |   4 +-
 .../pages/analytics-reduction-functions.adoc       |   8 +-
 .../modules/query-guide/pages/analytics.adoc       |  30 ++--
 .../query-guide/pages/block-join-query-parser.adoc |   6 +-
 .../pages/collapse-and-expand-results.adoc         |  12 +-
 .../query-guide/pages/common-query-parameters.adoc |  34 ++---
 .../query-guide/pages/dismax-query-parser.adoc     |   6 +-
 .../query-guide/pages/document-transformers.adoc   |  10 +-
 .../query-guide/pages/edismax-query-parser.adoc    |  16 +--
 .../query-guide/pages/exporting-result-sets.adoc   |  12 +-
 .../modules/query-guide/pages/faceting.adoc        |  22 +--
 .../query-guide/pages/function-queries.adoc        |  13 +-
 .../modules/query-guide/pages/graph-traversal.adoc |   6 +-
 .../modules/query-guide/pages/graph.adoc           |   2 +-
 .../modules/query-guide/pages/highlighting.adoc    |   6 +-
 .../modules/query-guide/pages/jdbc-zeppelin.adoc   |   2 +-
 .../query-guide/pages/join-query-parser.adoc       |   2 +-
 .../modules/query-guide/pages/json-facet-api.adoc  |  22 +--
 .../pages/json-faceting-domain-changes.adoc        |  16 +--
 .../modules/query-guide/pages/json-query-dsl.adoc  |  18 +--
 .../query-guide/pages/json-request-api.adoc        |   6 +-
 .../query-guide/pages/learning-to-rank.adoc        |  10 +-
 .../modules/query-guide/pages/local-params.adoc    |   2 +-
 .../modules/query-guide/pages/logs.adoc            |   4 +-
 .../query-guide/pages/machine-learning.adoc        |   4 +-
 .../query-guide/pages/math-expressions.adoc        |  91 ++++--------
 .../modules/query-guide/pages/math-start.adoc      |   2 +-
 .../modules/query-guide/pages/matrix-math.adoc     |   2 +-
 .../modules/query-guide/pages/morelikethis.adoc    |  18 +--
 .../modules/query-guide/pages/other-parsers.adoc   |  34 ++---
 .../query-guide/pages/pagination-of-results.adoc   |  10 +-
 .../pages/query-elevation-component.adoc           |   6 +-
 .../query-guide/pages/query-re-ranking.adoc        |  10 +-
 .../modules/query-guide/pages/query-screen.adoc    |  22 +--
 .../pages/query-syntax-and-parsers.adoc            |  32 ++---
 .../query-guide/pages/response-writers.adoc        |   8 +-
 .../query-guide/pages/result-clustering.adoc       |   2 +-
 .../modules/query-guide/pages/result-grouping.adoc |   8 +-
 .../modules/query-guide/pages/search-sample.adoc   |   6 +-
 .../pages/searching-nested-documents.adoc          |  10 +-
 .../modules/query-guide/pages/simulations.adoc     |   2 +-
 .../modules/query-guide/pages/spatial-search.adoc  |  16 +--
 .../modules/query-guide/pages/spell-checking.adoc  |   6 +-
 .../modules/query-guide/pages/sql-query.adoc       |  18 +--
 .../modules/query-guide/pages/sql-screen.adoc      |   2 +-
 .../query-guide/pages/standard-query-parser.adoc   |  13 +-
 .../modules/query-guide/pages/stats-component.adoc |   6 +-
 .../modules/query-guide/pages/stream-api.adoc      |   2 +-
 .../pages/stream-decorator-reference.adoc          |  20 +--
 .../pages/stream-evaluator-reference.adoc          |   2 +-
 .../modules/query-guide/pages/stream-screen.adoc   |   4 +-
 .../query-guide/pages/stream-source-reference.adoc |  10 +-
 .../query-guide/pages/streaming-expressions.adoc   |  16 +--
 .../modules/query-guide/pages/suggester.adoc       |   8 +-
 .../modules/query-guide/pages/tagger-handler.adoc  |   2 +-
 .../query-guide/pages/term-vector-component.adoc   |   2 +-
 .../modules/query-guide/pages/terms-component.adoc |   6 +-
 .../modules/query-guide/querying-nav.adoc          | 159 ++++++++++-----------
 .../old-pages}/controlling-results.adoc            |   0
 .../pages => src/old-pages}/enhancing-queries.adoc |   0
 .../pages => src/old-pages}/query-guide.adoc       |   0
 63 files changed, 399 insertions(+), 438 deletions(-)

diff --git a/solr/solr-ref-guide/antora.yml b/solr/solr-ref-guide/antora.yml
index 4a0c208..c4e088f 100644
--- a/solr/solr-ref-guide/antora.yml
+++ b/solr/solr-ref-guide/antora.yml
@@ -22,3 +22,4 @@ asciidoc:
     solr-docs-version: '9.0'
     idseparator: '-'
     idprefix: ''
+    toc: ~
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/analytics-expression-sources.adoc b/solr/solr-ref-guide/modules/query-guide/pages/analytics-expression-sources.adoc
index 8e82344..267762b 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/analytics-expression-sources.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/analytics-expression-sources.adoc
@@ -16,16 +16,16 @@
 // specific language governing permissions and limitations
 // under the License.
 
-Expression sources are the source of the data being aggregated in <<analytics.adoc#expressions,analytics expressions>>.
+Expression sources are the source of the data being aggregated in xref:analytics.adoc#expressions[analytics expressions].
 
 These sources can be either Solr fields indexed with docValues, or constants.
 
 == Supported Field Types
 
-The following <<field-types-included-with-solr.adoc#, Solr field types>> are supported.
+The following xref:indexing-guide:field-types-included-with-solr.adoc[Solr field types] are supported.
 Fields of these types can be either multi-valued and single-valued.
 
-All fields used in analytics expressions *must* have <<docvalues.adoc#,docValues>> enabled.
+All fields used in analytics expressions *must* have xref:indexing-guide:docvalues.adoc[] enabled.
 
 
 // Since Trie* fields are deprecated as of 7.0, we should consider removing Trie* fields from this list...
@@ -79,7 +79,7 @@ There are two possible ways of specifying constant strings, as shown below.
 
 Dates can be specified in the same way as they are in Solr queries.
 Just use ISO-8601 format.
-For more information, refer to the <<date-formatting-math.adoc#,Working with Dates>> section.
+For more information, refer to xref:indexing-guide:date-formatting-math.adoc[].
 
 * `2017-07-17T19:35:08Z`
 
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/analytics-mapping-functions.adoc b/solr/solr-ref-guide/modules/query-guide/pages/analytics-mapping-functions.adoc
index ab4367c..b1a2e7b 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/analytics-mapping-functions.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/analytics-mapping-functions.adoc
@@ -318,7 +318,7 @@ Explicitly converts the values of a `String` or `Long` expression into `Dates`.
 [[analytics-date-math]]
 === Date Math
 Compute the given date math strings for the values of a `Date` expression.
-The date math strings *must* be <<analytics-expression-sources.adoc#strings, constant>>.
+The date math strings *must* be xref:analytics-expression-sources.adoc#strings[constant].
 
 `date_math(< _Date_ >, < _Constant String_ >...)` => `< _Date_ >`::
     * `date_math(1800-04-15, '+1DAY', '-1MONTH')` => `1800-03-16`
@@ -348,7 +348,7 @@ Concatenations the values of the `String` expression(s) together.
     _Empty values are ignored_
 
 === Separated Concatenation
-Concatenations the values of the `String` expression(s) together using the given <<analytics-expression-sources.adoc#strings, constant string>> value as a separator.
+Concatenations the values of the `String` expression(s) together using the given xref:analytics-expression-sources.adoc#strings[constant string] value as a separator.
 
 `concat_sep(< _Constant String_ >, < _Multi String_ >)` => `< _Single String_ >`::
     * `concat_sep('-', ['a','b'])` => `'a-b'`
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/analytics-reduction-functions.adoc b/solr/solr-ref-guide/modules/query-guide/pages/analytics-reduction-functions.adoc
index b6f57d8..25255de 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/analytics-reduction-functions.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/analytics-reduction-functions.adoc
@@ -17,8 +17,8 @@
 // specific language governing permissions and limitations
 // under the License.
 
-Reduction functions reduce the values of <<analytics-expression-sources.adoc#,sources>>
-and/or unreduced <<analytics-mapping-functions.adoc#,mapping functions>>
+Reduction functions reduce the values of xref:analytics-expression-sources.adoc[sources>]
+and/or unreduced xref:analytics-mapping-functions.adoc[mapping functions]
 for every Solr Document to a single value.
 
 Below is a list of all reduction functions provided by the Analytics Component.
@@ -109,14 +109,14 @@ This function accepts `Numeric` and `Date` expressions.
 Calculates the given percentile of all values for the expression.
 This function accepts `Numeric`, `Date` and `String` expressions for the 2^nd^ parameter.
 
-The percentile, given as the 1^st^ parameter, must be a <<analytics-expression-sources.adoc#numeric,constant double>> between [0, 100).
+The percentile, given as the 1^st^ parameter, must be a xref:analytics-expression-sources.adoc#numeric[constant double] between [0, 100).
 
 `percentile(<Constant Double>, < T >)` => `< _Single_ T >`
 
 === Ordinal
 Calculates the given ordinal of all values for the expression.
 This function accepts `Numeric`, `Date` and `String` expressions for the 2^nd^ parameter.
-The ordinal, given as the 1^st^ parameter, must be a <<analytics-expression-sources.adoc#numeric,constant integer>>.
+The ordinal, given as the 1^st^ parameter, must be a xref:analytics-expression-sources.adoc#numeric[constant integer].
 *0 is not accepted as an ordinal value.*
 
 If the ordinal is positive, the returned value will be the _n_^th^ smallest value.
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/analytics.adoc b/solr/solr-ref-guide/modules/query-guide/pages/analytics.adoc
index 2e79e9d..359523b 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/analytics.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/analytics.adoc
@@ -32,9 +32,9 @@ Since the Analytics framework is a _search component_, it must be declared as su
 For distributed analytics requests over cloud collections, the component uses the `AnalyticsHandler` strictly for inter-shard communication.
 The Analytics Handler should not be used by users to submit analytics requests.
 
-To use the Analytics Component, the first step is to install this contrib module's plugins into Solr -- see the <<solr-plugins.adoc#installing-plugins,Solr Plugins>> section on how to do this.
+To use the Analytics Component, the first step is to install this contrib module's plugins into Solr -- see the xref:configuration-guide:solr-plugins.adoc#installing-plugins[Installing Plugins] section on how to do this.
 Note: Method with `<lib/>` directive doesn't work.
-Instead copy `${solr.install.dir}/dist/solr-analytics-x.x.x.jar` to `${solr.install.dir}/server/solr-webapp/webapp/WEB-INF/lib/`, as described in the <<libs.adoc#lib-directories,lib directories documentation>>.
+Instead copy `${solr.install.dir}/dist/solr-analytics-x.x.x.jar` to `${solr.install.dir}/server/solr-webapp/webapp/WEB-INF/lib/`, as described in the xref:configuration-guide:libs.adoc[].
 
 Next you need to register the request handler and search component.
 Add the following lines to `solrconfig.xml`, near the defintions for other request handlers:
@@ -60,14 +60,14 @@ For these changes to take effect, restart Solr or reload the core or collection.
 == Request Syntax
 
 An Analytics request is passed to Solr with the parameter `analytics` in a request sent to the
-<<requesthandlers-searchcomponents.adoc#search-handlers,Search Handler>>.
+xref:configuration-guide:requesthandlers-searchcomponents.adoc#search-handlers[search handler].
 Since the analytics request is sent inside of a search handler request, it will compute results based on the result set determined by the search handler.
 
 For example, this curl command encodes and POSTs a simple analytics request to the search handler:
 
 [source,bash]
 ----
-curl --data-binary 'analytics={
+$ curl --data-binary 'analytics={
    "expressions" : {
       "revenue" : "sum(mult(price,quantity))"
       }
@@ -162,14 +162,14 @@ The ways that these can be defined are described below.
 
 Sources::
 * Constants: The values defined in the expression.
-The supported constant types are described in the <<analytics-expression-sources.adoc#constants, Analytics Expression Source Reference>>.
+The supported constant types are described in the section xref:analytics-expression-sources.adoc#constants[Constants].
 
 * Fields: Solr fields that are read from the index.
-The supported fields are listed in the <<analytics-expression-sources.adoc#supported-field-types, Analytics Expression Source Reference>>.
+The supported fields are listed in the section xref:analytics-expression-sources.adoc#supported-field-types[Supported Field Types].
 
 Mapping Functions::
 Mapping functions map values for each Solr Document or Reduction.
-The provided mapping functions are detailed in the <<analytics-mapping-functions.adoc#,Analytics Mapping Function Reference>>.
+The provided mapping functions are detailed in the xref:analytics-mapping-functions.adoc[].
 
 * Unreduced Mapping: Mapping a Field with another Field or Constant returns a value for every Solr Document.
 Unreduced mapping functions can take fields, constants as well as other unreduced mapping functions as input.
@@ -178,7 +178,7 @@ Unreduced mapping functions can take fields, constants as well as other unreduce
 
 Reduction Functions::
 Functions that reduce the values of sources and/or unreduced mapping functions for every Solr Document to a single value.
-The provided reduction functions are detailed in the <<analytics-reduction-functions.adoc#,Analytics Reduction Function Reference>>.
+The provided reduction functions are detailed in the xref:analytics-reduction-functions.adoc[].
 
 ==== Component Ordering
 
@@ -212,25 +212,25 @@ With the above definitions and ordering, an example expression can be broken up
 div(sum(a,fill_missing(b,0)),add(10.5,count(mult(a,c)))))
 
 As a whole, this is a reduced mapping function.
-The `div` function is a reduced mapping function since it is a <<analytics-mapping-functions.adoc#division,provided mapping function>> and has reduced arguments.
+The `div` function is a reduced mapping function since it is a xref:analytics-mapping-functions.adoc#division[provided mapping function] and has reduced arguments.
 
 If we break down the expression further:
 
 * `sum(a,fill_missing(b,0))`: Reduction Function +
-`sum` is a <<analytics-reduction-functions.adoc#sum,provided reduction function>>.
+`sum` is a xref:analytics-reduction-functions.adoc#sum[provided reduction function].
 ** `a`: Field
 ** `fill_missing(b,0)`: Unreduced Mapping Function +
-`fill_missing` is an unreduced mapping function since it is a <<analytics-mapping-functions.adoc#fill-missing,provided mapping function>> and has a field argument.
+`fill_missing` is an unreduced mapping function since it is a xref:analytics-mapping-functions.adoc#fill-missing[provided mapping function] and has a field argument.
 *** `b`: Field
 *** `0`: Constant
 
 * `add(10.5,count(mult(a,c)))`: Reduced Mapping Function +
-`add` is a reduced mapping function since it is a <<analytics-mapping-functions.adoc#addition,provided mapping function>> and has a reduction function argument.
+`add` is a reduced mapping function since it is a xref:analytics-mapping-functions.adoc#addition[provided mapping function] and has a reduction function argument.
 ** `10.5`: Constant
 ** `count(mult(a,c))`: Reduction Function +
-`count` is a  <<analytics-reduction-functions.adoc#count,provided reduction function>>
+`count` is a xref:analytics-reduction-functions.adoc#count[provided reduction function].
 *** `mult(a,c)`: Unreduced Mapping Function +
-`mult` is an unreduced mapping function since it is a <<analytics-mapping-functions.adoc#multiplication,provided mapping function>> and has two field arguments.
+`mult` is an unreduced mapping function since it is a xref:analytics-mapping-functions.adoc#multiplication[provided mapping function] and has two field arguments.
 **** `a`: Field
 **** `c`: Field
 
@@ -771,7 +771,7 @@ A <<Facet Sorting,sort>> for the results of the pivot.
 
 Range Facets are used to group documents by the value of a field into a given set of ranges.
 The inputs for analytics range facets are identical to those used for Solr range facets.
-Refer to the <<faceting.adoc#range-faceting,Range Facet documentation>> for additional questions regarding use.
+Refer to the section xref:faceting.adoc#range-faceting[Range Faceting] for more information regarding use.
 
 ==== Parameters
 
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/block-join-query-parser.adoc b/solr/solr-ref-guide/modules/query-guide/pages/block-join-query-parser.adoc
index 27e8932..9cfbed4 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/block-join-query-parser.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/block-join-query-parser.adoc
@@ -17,7 +17,7 @@
 // under the License
 
 There are two query parsers that support block joins.
-These parsers allow indexing and searching for relational content that has been <<indexing-nested-documents.adoc#,indexed as Nested Documents>>.
+These parsers allow indexing and searching for relational content that has been xref:indexing-guide:indexing-nested-documents.adoc[indexed as Nested Documents].
 
 The example usage of the query parsers below assumes the following documents have been indexed:
 
@@ -99,7 +99,7 @@ This is equivalent to:
 q={!child of=<blockMask>}+<someParents> +BRAND:Foo +NAME:Bar
 
 Notice "$" syntax in `filters` for referencing queries; comma-separated tags `excludeTags` allows to exclude certain queries by tagging.
-Overall the idea is similar to <<faceting.adoc#tagging-and-excluding-filters, excluding fq in facets>>.
+Overall the idea is similar to xref:faceting.adoc#tagging-and-excluding-filters[excluding `fq` in facets].
 Note, that filtering is applied to the subordinate clause (`<someParents>`), and the intersection result is joined to the children.
 
 === All Children Syntax
@@ -162,7 +162,7 @@ q={!parent which=<blockMask>}+<someChildren> +COLOR:Red +SIZE:XL
 
 Notice the "$" syntax in `filters` for referencing queries.
 Comma-separated tags in `excludeTags` allow excluding certain queries by tagging.
-Overall the idea is similar to <<faceting.adoc#tagging-and-excluding-filters, excluding fq in facets>>.
+Overall the idea is similar to xref:faceting.adoc#tagging-and-excluding-filters[excluding `fq` in facets].
 Note that filtering is applied to the subordinate clause (`<someChildren>`) first, and the intersection result is joined to the parents.
 
 === Scoring with the Block Join Parent Query Parser
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/collapse-and-expand-results.adoc b/solr/solr-ref-guide/modules/query-guide/pages/collapse-and-expand-results.adoc
index c103f02..bb55fd8 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/collapse-and-expand-results.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/collapse-and-expand-results.adoc
@@ -19,7 +19,7 @@
 The Collapsing query parser and the Expand component combine to form an approach to grouping documents for field collapsing in search results.
 
 The Collapsing query parser groups documents (collapsing the result set) according to your parameters, while the Expand component provides access to documents in the collapsed group for use in results display or other processing by a client application.
-Collapse & Expand can together do what the older <<result-grouping.adoc#,Result Grouping>> (`group=true`) does for _most_ use-cases but not all.
+Collapse & Expand can together do what the older xref:result-grouping.adoc[] (`group=true`) does for _most_ use-cases but not all.
 Collapse and Expand are not supported when Result Grouping is enabled.
 Generally, you should prefer Collapse & Expand.
 
@@ -27,7 +27,7 @@ Generally, you should prefer Collapse & Expand.
 ====
 In order to use these features with SolrCloud, the documents must be located on the same shard.
 To ensure document co-location, you can define the `router.name` parameter as `compositeId` when creating the collection.
-For more information on this option, see the section <<solrcloud-shards-indexing.adoc#document-routing,Document Routing>>.
+For more information on this option, see the section xref:deployment-guide:solrcloud-shards-indexing.adoc#document-routing[Document Routing].
 ====
 
 == Collapsing Query Parser
@@ -59,7 +59,7 @@ The field must be a single-valued String, Int or Float-type of field.
 |Optional |Default: none
 |===
 +
-Selects the group head document for each group based on which document has the minimum or maximum value of the specified numeric field or <<function-queries.adoc#,function query>>.
+Selects the group head document for each group based on which document has the minimum or maximum value of the specified numeric field or xref:function-queries.adoc[function query].
 +
 At most only one of the `min`, `max`, or `sort` (see below) parameters may be specified.
 +
@@ -72,7 +72,7 @@ If none are specified, the group head document of each group will be selected ba
 |Optional |Default: none
 |===
 +
-Selects the group head document for each group based on which document comes first according to the specified <<common-query-parameters.adoc#sort-parameter,sort string>>.
+Selects the group head document for each group based on which document comes first according to the specified xref:common-query-parameters.adoc#sort-parameter[sort string].
 +
 At most only one of the `min`, `max`, (see above) or `sort` parameters may be specified.
 +
@@ -128,7 +128,7 @@ Setting the size above the number of results expected in the result set will eli
 |Optional |Default: `true`
 |===
 +
-In combination with the <<collapse-and-expand-results.adoc#collapsing-query-parser,Collapse Query Parser>> all elevated docs are visible at the beginning of the result set.
+In combination with the <<Collapsing Query Parser>> all elevated docs are visible at the beginning of the result set.
 If this parameter is `false`, only the representative is visible if the elevated docs has the same collapse key.
 
 
@@ -186,7 +186,7 @@ fq={!collapse cost=1000 field=group_field}
 
 === Block Collapsing
 
-When collapsing on the `\_root_` field, using `nullPolicy=expand` or `nullPolicy=ignore`, the Collapsing Query Parser can take advantage of the fact that all docs with identical field values are adjacent to each other in the index in a single <<indexing-nested-documents.adoc#,"block" of nested documents>>.
+When collapsing on the `\_root_` field, using `nullPolicy=expand` or `nullPolicy=ignore`, the Collapsing Query Parser can take advantage of the fact that all docs with identical field values are adjacent to each other in the index in a single xref:indexing-guide:indexing-nested-documents.adoc["block" of nested documents].
 This allows the collapsing logic to be much faster and more memory efficient.
 
 The default collapsing logic must keep track of all group head documents -- for all groups encountered so far -- until it has evaluated all documents, because each document it considers may become the new group head of any group.
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/common-query-parameters.adoc b/solr/solr-ref-guide/modules/query-guide/pages/common-query-parameters.adoc
index ee20b27..1576a09 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/common-query-parameters.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/common-query-parameters.adoc
@@ -27,7 +27,7 @@ For example:
 
 `defType=dismax`
 
-If no `defType` parameter is specified, then by default, the <<standard-query-parser.adoc#,Standard Query Parser>> is used.
+If no `defType` parameter is specified, then by default, the xref:standard-query-parser.adoc[] is used.
 (e.g., `defType=lucene`)
 
 == sort Parameter
@@ -39,16 +39,16 @@ The directions can be entered in either all lowercase or all uppercase letters (
 Solr can sort query responses according to:
 
 * Document scores
-* <<function-queries.adoc#sort-by-function,Function results>>
+* xref:function-queries.adoc#sort-by-function[Function results]
 * The value of any primitive field (numerics, string, boolean, dates, etc.) which has `docValues="true"` (or `multiValued="false"` and `indexed="true"`, in which case the indexed terms will used to build DocValue like structures on the fly at runtime)
 * A SortableTextField which implicitly uses `docValues="true"` by default to allow sorting on the original input string regardless of the analyzers used for Searching.
 * A single-valued TextField that uses an analyzer (such as the KeywordTokenizer) that produces only a single term per document.
 TextField does not support `docValues="true"`, but a DocValue-like structure will be built on the fly at runtime.
-** *NOTE:* If you want to be able to sort on a field whose contents you want to tokenize to facilitate searching, <<copy-fields.adoc#,use a `copyField` directive>> in the Schema to clone the field.
+** *NOTE:* If you want to be able to sort on a field whose contents you want to tokenize to facilitate searching, xref:indexing-guide:copy-fields.adoc[use a `copyField` directive] in the Schema to clone the field.
 Then search on the field and sort on its clone.
 
 In the case of primitive fields, or SortableTextFields, that are `multiValued="true"` the representative value used for each doc when sorting depends on the sort direction: The minimum value in each document is used for ascending (`asc`) sorting, while the maximal value in each document is used for descending (`desc`) sorting.
-This default behavior is equivalent to explicitly sorting using the 2 argument `<<function-queries.adoc#field-function,field()>>` function: `sort=field(name,min) asc` and `sort=field(name,max) desc`
+This default behavior is equivalent to explicitly sorting using the 2 argument xref:function-queries.adoc#field-function[`field()`] function: `sort=field(name,min) asc` and `sort=field(name,max) desc`
 
 The table below explains how Solr responds to various settings of the `sort` parameter.
 
@@ -98,7 +98,7 @@ That is, by default, Solr returns 10 documents at a time in response to a query.
 == canCancel Parameter
 
 This parameter defines if this query is cancellable during execution using the
-<<task-management.adoc#,task management>> interface.
+xref:deployment-guide:task-management.adoc[task management] interface.
 
 == queryUUID Parameter
 
@@ -139,8 +139,8 @@ fq=+popularity:[10 TO *] +section:0
 
 * The document sets from each filter query are cached independently.
 Thus, concerning the previous examples: use a single `fq` containing two mandatory clauses if those clauses appear together often, and use two separate `fq` parameters if they are relatively independent.
-(To learn about tuning cache sizes and making sure a filter cache actually exists, see <<caches-warming.adoc#caches,Caches>>.)
-* It is also possible to use <<standard-query-parser.adoc#differences-between-lucenes-classic-query-parser-and-solrs-standard-query-parser,filter(condition) syntax>> inside the `fq` to cache clauses individually and - among other things - to achieve union of cached filter queries.
+(To learn about tuning cache sizes and making sure a filter cache actually exists, see xref:configuration-guide:caches-warming.adoc#caches[Caches].)
+* It is also possible to use xref:standard-query-parser.adoc#differences-between-lucenes-classic-query-parser-and-solrs-standard-query-parser[filter(condition) syntax] inside the `fq` to cache clauses individually and - among other things - to achieve union of cached filter queries.
 
 * As with all parameters: special characters in an URL need to be properly escaped and encoded as hex values.
 Online tools are available to help you with URL-encoding.
@@ -148,8 +148,8 @@ For example: http://meyerweb.com/eric/tools/dencoder/.
 
 === cache Local Parameter
 
-Solr caches the results of filter queries by default in the <<caches-warming.adoc#filter-cache,filter cache>>.
-To disable it, use the boolean `cache` <<local-params.adoc#,local param>>, such as `fq={!geofilt cache=false}...`.
+Solr caches the results of filter queries by default in the xref:configuration-guide:caches-warming.adoc#filter-cache[filter cache].
+To disable it, use the boolean `cache` xref:local-params.adoc[local param], such as `fq={!geofilt cache=false}...`.
 Do this when you think a query is unlikely to be repeated.
 
 Non-cached filter queries also support the `cost` local parameter to provide a _hint_ as to the order in which they are evaluated.
@@ -204,7 +204,7 @@ This table shows some basic examples of how to use `fl`:
 
 === Functions with fl
 
-<<function-queries.adoc#,Functions>> can be computed for each document in the result and returned as a pseudo-field:
+xref:function-queries.adoc[] can be computed for each document in the result and returned as a pseudo-field:
 
 [source,text]
 ----
@@ -213,7 +213,7 @@ fl=id,title,product(price,popularity)
 
 === Document Transformers with fl
 
-<<document-transformers.adoc#,Document Transformers>> can be used to modify the information returned about each documents in the results of a query:
+xref:document-transformers.adoc[] can be used to modify the information returned about each documents in the results of a query:
 
 [source,text]
 ----
@@ -283,9 +283,9 @@ The default value of this parameter is blank, which causes no extra "explain inf
 == timeAllowed Parameter
 
 This parameter specifies the amount of time, in milliseconds, allowed for a search to complete.
-If this time expires before the search is complete, any partial results will be returned, but values such as `numFound`, <<faceting.adoc#,facet>> counts, and result <<stats-component.adoc#,stats>> may not be accurate for the entire result set.
+If this time expires before the search is complete, any partial results will be returned, but values such as `numFound`, xref:faceting.adoc[facet] counts, and result xref:stats-component.adoc[stats] may not be accurate for the entire result set.
 In case of expiration, if `omitHeader` isn't set to `true` the response header contains a special flag called `partialResults`.
-When using `timeAllowed` in combination with <<pagination-of-results.adoc#using-cursors,`cursorMark`>>, and the `partialResults` flag is present, some matching documents may have been skipped in the result set.
+When using `timeAllowed` in combination with xref:pagination-of-results.adoc#using-cursors[`cursorMark`], and the `partialResults` flag is present, some matching documents may have been skipped in the result set.
 Additionally, if  the `partialResults` flag is present, `cursorMark` can match `nextCursorMark` even if there may be more results
 
 [source,json]
@@ -322,11 +322,11 @@ Regular search, JSON Facet and the Analytics component abandon requests in accor
 
 This parameter may be set to either `true` or `false`.
 
-If set to `true`, and if <<index-segments-merging.adoc#mergepolicyfactory,the mergePolicyFactory>> for this collection is a {solr-javadocs}/core/org/apache/solr/index/SortingMergePolicyFactory.html[`SortingMergePolicyFactory`] which uses a `sort` option compatible with <<sort Parameter,the sort parameter>> specified for this query, then Solr will be able to skip documents on a per-segment basis that are definitively not candidates for the current page of results.
+If set to `true`, and if xref:configuration-guide:index-segments-merging.adoc#mergepolicyfactory[the mergePolicyFactory] for this collection is a {solr-javadocs}/core/org/apache/solr/index/SortingMergePolicyFactory.html[`SortingMergePolicyFactory`] which uses a `sort` option compatible with <<sort Parameter,the sort parameter>> specified for this query, then Solr will be able to skip documents on a per-segment basis that are definitively not candidates for the current page of results.
 
 If early termination is used, a `segmentTerminatedEarly` header will be included in the `responseHeader`.
 
-Similar to using <<timeAllowed Parameter,the `timeAllowed` Parameter>>, when early segment termination happens values such as `numFound`, <<faceting.adoc#,Facet>> counts, and result <<stats-component.adoc#,Stats>> may not be accurate for the entire result set.
+Similar to using <<timeAllowed Parameter,the `timeAllowed` Parameter>>, when early segment termination happens values such as `numFound`, xref:faceting.adoc[Facet] counts, and result xref:stats-component.adoc[Stats] may not be accurate for the entire result set.
 
 The default value of this parameter is `false`.
 
@@ -337,12 +337,12 @@ This parameter may be set to either `true` or `false`.
 If set to `true`, this parameter excludes the header from the returned results.
 The header contains information about the request, such as the time it took to complete.
 The default value for this parameter is `false`.
-When using parameters such as <<common-query-parameters.adoc#timeallowed-parameter,`timeAllowed`>>, and <<solrcloud-distributed-requests.adoc#shards-tolerant-parameter,`shards.tolerant`>>, which can lead to partial results, it is advisable to keep the header, so that the `partialResults` flag can be checked, and values such as `numFound`, `nextCursorMark`, <<faceting.adoc#,Facet>> counts, and result <<stats-component.adoc#,Stats>> can be interpreted in the context of partial results.
+When using parameters such as <<timeallowed-parameter,`timeAllowed`>>, and xref:deployment-guide:solrcloud-distributed-requests.adoc#shards-tolerant-parameter[`shards.tolerant`], which can lead to partial results, it is advisable to keep the header, so that the `partialResults` flag can be checked, and values such as `numFound`, `nextCursorMark`, xref:faceting.adoc[Facet] counts, and result xref:stats-component.adoc[Stats] can be interpreted in the context of partial results.
 
 == wt Parameter
 
 The `wt` parameter selects the Response Writer that Solr should use to format the query's response.
-For detailed descriptions of Response Writers, see <<response-writers.adoc#,Response Writers>>.
+For detailed descriptions of Response Writers, see xref:response-writers.adoc[].
 
 If you do not define the `wt` parameter in your queries, JSON will be returned as the format of the response.
 
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/dismax-query-parser.adoc b/solr/solr-ref-guide/modules/query-guide/pages/dismax-query-parser.adoc
index c164898..b0c03af 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/dismax-query-parser.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/dismax-query-parser.adoc
@@ -182,14 +182,14 @@ The overall impact a particular `bq` parameter will have on a given document can
 
 "Multiplicative Boosting" is generally considered to be a more predictable method of influencing document score, because it acts as a "scaling factor" -- increasing (or decreasing) the scores of each document by a _relative_ amount.
 
-The <<other-parsers.adoc#boost-query-parser,`{!boost}` QParser>> provides a convenient wrapper for implementing multiplicative boosting, and the <<edismax-query-parser.adoc#extended-dismax-parameters,`{!edismax}` QParser>> offers a `boost` query parameter shortcut for using it.
+The xref:other-parsers.adoc#boost-query-parser[`{!boost}` QParser] provides a convenient wrapper for implementing multiplicative boosting, and the xref:edismax-query-parser.adoc#extended-dismax-parameters[`{!edismax}` QParser] offers a `boost` query parameter shortcut for using it.
 ====
 
 
 === bf (Boost Functions) Parameter
 
-The `bf` parameter specifies functions (with optional <<standard-query-parser.adoc#boosting-a-term-with,query boost>>) that will be used to construct FunctionQueries which will be _added_ to the user's main query as optional clauses that will influence the score.
-Any <<function-queries.adoc#available-functions,function supported natively by Solr>> can be used, along with a boost value.
+The `bf` parameter specifies functions (with optional xref:standard-query-parser.adoc#boosting-a-term-with[query boost]) that will be used to construct FunctionQueries which will be _added_ to the user's main query as optional clauses that will influence the score.
+Any xref:function-queries.adoc#available-functions[function] supported natively by Solr can be used, along with a boost value.
 For example:
 
 [source,text]
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/document-transformers.adoc b/solr/solr-ref-guide/modules/query-guide/pages/document-transformers.adoc
index 8aa005d..e4ab4b4 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/document-transformers.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/document-transformers.adoc
@@ -126,10 +126,10 @@ A default style can be configured by specifying an `args` parameter in your `sol
 === [child] - ChildDocTransformerFactory
 
 
-This transformer returns all <<indexing-nested-documents.adoc#,descendant documents>> of each parent document matching your query.
+This transformer returns all xref:indexing-guide:indexing-nested-documents.adoc[descendant documents] of each parent document matching your query.
 This is useful when you have indexed nested child documents and want to retrieve the child documents for the relevant parent documents for any type of search query.
 
-Note that this transformer can be used even when the query used to match the result documents is not a <<block-join-query-parser.adoc#,Block Join query>>.
+Note that this transformer can be used even when the query used to match the result documents is not a xref:block-join-query-parser.adoc[Block Join query].
 
 
 [source,plain]
@@ -224,7 +224,7 @@ DocIdAugmenterFactory does not support any request parameters, or configuration
 
 === [elevated] and [excluded]
 
-These transformers are available only when using the <<query-elevation-component.adoc#,Query Elevation Component>>.
+These transformers are available only when using the xref:query-elevation-component.adoc[].
 
 * `[elevated]` annotates each document to indicate if it was elevated or not.
 * `[excluded]` annotates each document to indicate if it would have been excluded - this is only supported if you also use the `markExcludes` parameter.
@@ -389,7 +389,7 @@ Formats spatial data from a spatial field using a designated format type name.
 Two inner parameters are required: `f` for the field name, and `w` for the format name.
 Example: `geojson:[geo f=mySpatialField w=GeoJSON]`.
 
-Normally you'll simply be consistent in choosing the format type you want by setting the `format` attribute on the spatial field type to `WKT` or `GeoJSON` – see the section <<spatial-search.adoc#,Spatial Search>> for more information.
+Normally you'll simply be consistent in choosing the format type you want by setting the `format` attribute on the spatial field type to `WKT` or `GeoJSON` – see the section xref:spatial-search.adoc[] for more information.
 If you are consistent, it'll come out the way you stored it.
 This transformer offers a convenience to transform the spatial format to something different on retrieval.
 
@@ -401,7 +401,7 @@ In a sense this double-storage between docValues and stored-value storage isn't
 
 === [features] - LTRFeatureLoggerTransformerFactory
 
-The "LTR" prefix stands for <<learning-to-rank.adoc#,Learning To Rank>>.
+The "LTR" prefix stands for xref:learning-to-rank.adoc[].
 This transformer returns the values of features and it can be used for feature extraction and feature logging.
 
 [source,plain]
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/edismax-query-parser.adoc b/solr/solr-ref-guide/modules/query-guide/pages/edismax-query-parser.adoc
index 1eabda2..fd9a7ad 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/edismax-query-parser.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/edismax-query-parser.adoc
@@ -16,11 +16,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-The Extended DisMax (eDisMax) query parser is an improved version of the <<dismax-query-parser.adoc#,DisMax query parser>>.
+The Extended DisMax (eDisMax) query parser is an improved version of the xref:dismax-query-parser.adoc[].
 
 In addition to supporting all the DisMax query parser parameters, Extended DisMax:
 
-* supports <<standard-query-parser.adoc#,Solr's standard query parser>> syntax such as (non-exhaustive list):
+* supports xref:standard-query-parser.adoc[] syntax such as (non-exhaustive list):
 ** boolean operators such as AND (+, &&), OR (||), NOT (-).
 ** optionally treats lowercase "and" and "or" as "AND" and "OR" in Lucene syntax mode
 ** optionally allows embedded queries using other query parsers or functions
@@ -28,13 +28,13 @@ In addition to supporting all the DisMax query parser parameters, Extended DisMa
 * improves proximity boosting by using word shingles; you do not need the query to match all words in the document before proximity boosting is applied.
 * includes advanced stopword handling: stopwords are not required in the mandatory part of the query but are still used in the proximity boosting part.
 If a query consists of all stopwords, such as "to be or not to be", then all words are required.
-* includes improved boost function: in Extended DisMax, the `boost` function is a multiplier <<dismax-query-parser.adoc#bq-bf-shortcomings,rather than an addend>>, improving your boost results; the additive boost functions of DisMax (`bf` and `bq`) are also supported.
+* includes improved boost function: in Extended DisMax, the `boost` function is a multiplier xref:dismax-query-parser.adoc#bq-bf-shortcomings[rather than an addend], improving your boost results; the additive boost functions of DisMax (`bf` and `bq`) are also supported.
 * supports pure negative nested queries: queries such as `+foo (-foo)` will match all documents.
 * lets you specify which fields the end user is allowed to query, and to disallow direct fielded searches.
 
 == Extended DisMax Parameters
 
-In addition to all the <<dismax-query-parser.adoc#dismax-query-parser-parameters,DisMax parameters>>, Extended DisMax includes these query parameters:
+In addition to all the xref:dismax-query-parser.adoc#dismax-query-parser-parameters[DisMax parameters], Extended DisMax includes these query parameters:
 
 `sow`::
 Split on whitespace.
@@ -43,7 +43,7 @@ The default is `false`; whitespace-separated term sequences will be provided to
 
 `mm`::
 Minimum should match.
-See the <<dismax-query-parser.adoc#mm-minimum-should-match-parameter,DisMax mm parameter>> for a description of `mm`.
+See the xref:dismax-query-parser.adoc#mm-minimum-should-match-parameter[DisMax `mm` parameter] for a description of `mm`.
 The default eDisMax `mm` value differs from that of DisMax:
 +
 * The default `mm` value is 0%:
@@ -52,14 +52,14 @@ The default eDisMax `mm` value differs from that of DisMax:
 * The default `mm` value is 100% if `q.op` is "AND" and the query does not contain any explicit operators other than "AND".
 
 `mm.autoRelax`::
-If `true`, the number of clauses required (<<dismax-query-parser.adoc#mm-minimum-should-match-parameter,minimum should match>>) will automatically be relaxed if a clause is removed (by e.g., stopwords filter) from some but not all <<dismax-query-parser.adoc#qf-query-fields-parameter,`qf`>> fields.
+If `true`, the number of clauses required will automatically be relaxed if a clause is removed (by e.g., stopwords filter) from some but not all xref:dismax-query-parser.adoc#qf-query-fields-parameter[`qf`] fields.
 Use this parameter as a workaround if you experience that queries return zero hits due to uneven stopword removal between the `qf` fields.
 +
 Note that relaxing `mm` may cause undesired side effects, such as hurting the precision of the search, depending on the nature of your index content.
 
 `boost`::
-A multivalued list of strings parsed as <<function-queries.adoc#available-functions,functions>> whose results will be multiplied into the score from the main query for all matching documents.
-This parameter is shorthand for wrapping the query produced by eDisMax using the <<other-parsers.adoc#boost-query-parser,`BoostQParserPlugin`>>.
+A multivalued list of strings parsed as xref:function-queries.adoc#available-functions[functions] whose results will be multiplied into the score from the main query for all matching documents.
+This parameter is shorthand for wrapping the query produced by eDisMax using the xref:other-parsers.adoc#boost-query-parser[`BoostQParserPlugin`].
 
 These two examples are equivalent:
 [source,text]
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/exporting-result-sets.adoc b/solr/solr-ref-guide/modules/query-guide/pages/exporting-result-sets.adoc
index 3c5e3a7..6719d37 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/exporting-result-sets.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/exporting-result-sets.adoc
@@ -16,7 +16,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-The `/export` request handler allows a fully sorted result set to be streamed out of Solr using a special <<query-re-ranking.adoc#,rank query parser>> and <<response-writers.adoc#,response writer>>.
+The `/export` request handler allows a fully sorted result set to be streamed out of Solr using a special xref:query-re-ranking.adoc[rank query parser] and xef:response-writers.adoc[response writer].
 These have been specifically designed to work together to handle scenarios that involve sorting and exporting millions of records.
 
 This feature uses a stream sorting technique that begins to send records within milliseconds and continues to stream results until the entire result set has been sorted and exported.
@@ -26,11 +26,11 @@ The cases where this functionality may be useful include: session analysis, dist
 == Field Requirements
 
 All the fields being sorted and exported must have docValues set to `true`.
-For more information, see the section on <<docvalues.adoc#,DocValues>>.
+For more information, see the section on xref:indexing-guide:docvalues.adoc[].
 
 == The /export RequestHandler
 
-The `/export` request handler with the appropriate configuration is one of Solr's out-of-the-box request handlers - see <<implicit-requesthandlers.adoc#,Implicit Request Handlers>> for more information.
+The `/export` request handler with the appropriate configuration is one of Solr's out-of-the-box request handlers - see xref:configuration-guide:implicit-requesthandlers.adoc[] for more information.
 
 Note that this request handler's properties are defined as "invariants", which means they cannot be overridden by other properties passed at another time (such as at query time).
 
@@ -74,13 +74,13 @@ However, returning scores and wildcards are not supported at this time.
 
 === Specifying the Local Streaming Expression
 
-The optional `expr` property defines a <<streaming-expressions.adoc#,stream expression>> that allows documents to be processed locally before they are exported in the result set.
+The optional `expr` property defines a xref:streaming-expressions.adoc[stream expression] that allows documents to be processed locally before they are exported in the result set.
 
 Expressions have to use a special `input()` stream that represents original results from the `/export` handler.
 Output from the stream expression then becomes the output from the `/export` handler.
 The `&streamLocalOnly=true` flag is always set for this streaming expression.
 
-Only stream <<stream-decorator-reference.adoc#,decorators>> and <<stream-evaluator-reference.adoc#,evaluators>> are supported in these expressions - using any of the <<stream-source-reference.adoc#,source>> expressions except for the pre-defined `input()` will result in an error.
+Only stream xref:stream-decorator-reference.adoc[decorators] and xref:stream-evaluator-reference.adoc[evaluators] are supported in these expressions - using any of the xref:stream-source-reference.adoc[source] expressions except for the pre-defined `input()` will result in an error.
 
 Using stream expressions with the `/export` handler may result in dramatic performance improvements due to the local in-memory reduction of the number of documents to be returned.
 
@@ -104,4 +104,4 @@ http://localhost:8983/solr/core_name/export?q=my-query&sort=reporter+desc,&fl=re
 
 == Distributed Support
 
-See the section <<streaming-expressions.adoc#,Streaming Expressions>> for distributed support.
+See the section xref:streaming-expressions.adoc[] for distributed support.
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/faceting.adoc b/solr/solr-ref-guide/modules/query-guide/pages/faceting.adoc
index e4a15ff..fdfeac4 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/faceting.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/faceting.adoc
@@ -21,7 +21,7 @@ Faceting is the arrangement of search results into categories based on indexed t
 Searchers are presented with the indexed terms, along with numerical counts of how many matching documents were found for each term.
 Faceting makes it easy for users to explore search results, narrowing in on exactly the results they are looking for.
 
-See also <<json-facet-api.adoc#, JSON Facet API>> for an alternative approach to this.
+See also xref:json-facet-api.adoc[] for an alternative approach to this.
 
 == General Facet Parameters
 
@@ -68,7 +68,7 @@ For text fields that include stemming, lowercasing, or word splitting, the resul
 
 If you want Solr to perform both analysis (for searching) and faceting on the full literal strings, use the `copyField` directive in your Schema to create two versions of the field: one Text and one String.
 The Text field should have `indexed="true" docValues=“false"` if used for searching but not faceting and the String field should have `indexed="false" docValues="true"` if used for faceting but not searching.
-(For more information about the `copyField` directive, see <<fields-and-schema-design.adoc#,Fields and Schema Design>>.)
+(For more information about the `copyField` directive, see xref:indexing-guide:copy-fields.adoc[].)
 
 Unless otherwise specified, all of the parameters below can be specified on a per-field basis with the syntax of `f.<fieldname>.facet.<parameter>`
 
@@ -423,16 +423,16 @@ filter::: This method generates the ranges based on other facet.range parameters
 It will make use of the filterCache, so it will benefit of a cache large enough to contain all ranges.
 +
 dv::: This method iterates the documents that match the main query, and for each of them finds the correct range for the value.
-This method will make use of <<docvalues.adoc#,docValues>> (if enabled for the field) or fieldCache.
-The `dv` method is not supported for field type DateRangeField or when using <<result-grouping.adoc#,group.facets>>.
+This method will make use of xref:docvalues.adoc[] (if enabled for the field) or fieldCache.
+The `dv` method is not supported for field type DateRangeField or when using xref:result-grouping.adoc[group.facets].
 --
 
 .Date Ranges & Time Zones
 [NOTE]
 ====
-Range faceting on date fields is a common situation where the <<date-formatting-math.adoc#tz,`TZ`>> parameter can be useful to ensure that the "facet counts per day" or "facet counts per month" are based on a meaningful definition of when a given day/month "starts" relative to a particular TimeZone.
+Range faceting on date fields is a common situation where the xref:indexing-guide:date-formatting-math.adoc#tz[`TZ`] parameter can be useful to ensure that the "facet counts per day" or "facet counts per month" are based on a meaningful definition of when a given day/month "starts" relative to a particular TimeZone.
 
-For more information, see the examples in the <<date-formatting-math.adoc#,Working with Dates>> section.
+For more information, see the examples in the section xref:indexing-guide:date-formatting-math.adoc[].
 ====
 
 === facet.mincount in Range Faceting
@@ -509,7 +509,7 @@ http://localhost:8983/solr/techproducts/select?q=*:*&facet.pivot=cat,popularity,
 
 === Combining Stats Component With Pivots
 
-In addition to some of the <<Local Params for Faceting,general local params>> supported by other types of faceting, a `stats` local params can be used with `facet.pivot` to refer to <<stats-component.adoc#,`stats.field`>> instances (by tag) that you would like to have computed for each Pivot Constraint.
+In addition to some of the <<Local Params for Faceting,general local params>> supported by other types of faceting, a `stats` local params can be used with `facet.pivot` to refer to xref:stats-component.adoc[`stats.field`] instances (by tag) that you would like to have computed for each Pivot Constraint.
 
 In the example below, two different (overlapping) sets of statistics are computed for each of the facet.pivot result hierarchies:
 
@@ -744,7 +744,7 @@ Even though the same functionality can be achieved by using a facet query with r
 If you are concerned about the performance of your searches you should test with both options.
 Interval faceting tends to be better with multiple intervals for the same fields, while facet query tend to be better in environments where filter cache is more effective (static indexes for example).
 
-This method will use <<docvalues.adoc#,docValues>> if they are enabled for the field, will use fieldCache otherwise.
+This method will use xref:indexing-guide:docvalues.adoc[] if they are enabled for the field, will use fieldCache otherwise.
 
 Use these parameters for interval faceting:
 
@@ -817,7 +817,7 @@ For example:
 
 == Local Params for Faceting
 
-The <<local-params.adoc#,LocalParams syntax>> allows overriding global settings.
+The xref:local-params.adoc[LocalParams syntax] allows overriding global settings.
 It can also provide a method of adding metadata to other parameter values, much like XML attributes.
 
 === Tagging and Excluding Filters
@@ -875,6 +875,6 @@ Note: other parameters might not be fully supported when this parameter is suppl
 
 == Related Topics
 
-See <<spatial-search.adoc#,Spatial Search>> for examples of faceting by distance and generating heatmaps via faceting.
+See xref:spatial-search.adoc[] for examples of faceting by distance and generating heatmaps via faceting.
 
-See <<response-writers.adoc#json-nl, Response Writers>> for details on the `json.nl` parameter for controlling the format for writing out field facet data when using the JSON response writer.
+See xref:response-writers.adoc#json-nl[json.nl] for details on the `json.nl` parameter for controlling the format for writing out field facet data when using the JSON response writer.
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/function-queries.adoc b/solr/solr-ref-guide/modules/query-guide/pages/function-queries.adoc
index f827253..d679525 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/function-queries.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/function-queries.adoc
@@ -18,7 +18,7 @@
 
 Function queries enable you to generate a relevancy score using the actual value of one or more numeric fields.
 
-Function queries are supported by the <<dismax-query-parser.adoc#,DisMax>>, <<edismax-query-parser.adoc#,Extended DisMax>>, and <<standard-query-parser.adoc#,standard>> query parsers.
+Function queries are supported by the xref:dismax-query-parser.adoc[], xref:edismax-query-parser.adoc[], and xref:standard-query-parser.adoc[].
 
 Function queries use _functions_.
 The functions can be a constant (numeric or string literal), a field, another function or a parameter substitution argument.
@@ -31,7 +31,7 @@ Functions must be expressed as function calls (for example, `sum(a,b)` instead o
 
 There are several ways of using function queries in a Solr query:
 
-* Via an explicit query parser that expects function arguments, such <<other-parsers.adoc#function-query-parser,`func`>> or <<other-parsers.adoc#function-range-query-parser,`frange`>>.
+* Via an explicit query parser that expects function arguments, such xref:other-parsers.adoc#function-query-parser[`func`] or xref:other-parsers.adoc#function-range-query-parser[`frange`].
 For example:
 +
 [source,text]
@@ -63,7 +63,8 @@ the output would be:
 <float name="score">0.343</float>
 ...
 ----
-* Use in a parameter that is explicitly for specifying functions, such as the eDisMax query parser's <<edismax-query-parser.adoc#extended-dismax-parameters,`boost` parameter>>, or the DisMax query parser's <<dismax-query-parser.adoc#bf-boost-functions-parameter,`bf` (boost function) parameter>>. (Note that the `bf` parameter actually takes a list of function queries separated by white space and each with an optional boost.
+* Use in a parameter that is explicitly for specifying functions, such as the eDisMax query parser's xref:edismax-query-parser.adoc#extended-dismax-parameters[`boost` parameter], or the DisMax query parser's xref:dismax-query-parser.adoc#bf-boost-functions-parameter[`bf` (boost function) parameter].
+(Note that the `bf` parameter actually takes a list of function queries separated by white space and each with an optional boost.
 Make sure you eliminate any internal white space in single function queries when using `bf`).
 For example:
 +
@@ -94,7 +95,7 @@ Returns the absolute value of the specified value or function.
 * `abs(-5)`
 
 === childfield(field) Function
-Returns the value of the given field for one of the matched child docs when searching by <<block-join-query-parser.adoc#block-join-parent-query-parser,{!parent}>>.
+Returns the value of the given field for one of the matched child docs when searching by xref:block-join-query-parser.adoc#block-join-parent-query-parser[`{!parent}`].
 It can be used only in `sort` parameter.
 
 *Syntax Examples*
@@ -284,7 +285,7 @@ Use the `field(myfield,min)` <<field Function,syntax for selecting the minimum v
 Returns milliseconds of difference between its arguments.
 Dates are relative to the Unix or POSIX time epoch, midnight, January 1, 1970 UTC.
 
-Arguments may be the name of a `DatePointField`, `TrieDateField`, or date math based on a <<date-formatting-math.adoc#,constant date or `NOW`>>.
+Arguments may be the name of a `DatePointField`, `TrieDateField`, or date math based on a xref:date-formatting-math.adoc[constant date or `NOW`].
 
 * `ms()`: Equivalent to `ms(NOW)`, number of milliseconds since the epoch.
 * `ms(a):` Returns the number of milliseconds since the epoch that the argument represents.
@@ -373,7 +374,7 @@ Returns the product of multiple values or functions, which are specified in a co
 
 === query Function
 Returns the score for the given subquery, or the default value for documents not matching the query.
-Any type of subquery is supported through either parameter de-referencing `$otherparam` or direct specification of the query string in the <<local-params.adoc#,local params>> through the `v` key.
+Any type of subquery is supported through either parameter de-referencing `$otherparam` or direct specification of the query string in the xref:local-params.adoc[] through the `v` key.
 
 *Syntax Examples*
 
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/graph-traversal.adoc b/solr/solr-ref-guide/modules/query-guide/pages/graph-traversal.adoc
index f608bec..1b79156 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/graph-traversal.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/graph-traversal.adoc
@@ -31,7 +31,7 @@ Some sample use cases are provided later in the document.
 ====
 This document assumes a basic understanding of graph terminology and streaming expressions.
 You can begin exploring graph traversal concepts with this https://en.wikipedia.org/wiki/Graph_traversal[Wikipedia article].
-More details about streaming expressions are available in this Guide, in the section <<streaming-expressions.adoc#,Streaming Expressions>>.
+More details about streaming expressions are available in this Guide, in the section xref:streaming-expressions.adoc[].
 ====
 
 == Basic Syntax
@@ -68,7 +68,7 @@ nodes(emails,
 
 The `nodes` function above finds all the edges with "johndoe@apache.org" or "janesmith@apache.org" in the `from` field and gathers the `to` field.
 
-Like all <<streaming-expressions.adoc#,Streaming Expressions>>, you can execute a `nodes` expression by sending it to the `/stream` handler.
+Like all xref:streaming-expressions.adoc[], you can execute a `nodes` expression by sending it to the `/stream` handler.
 For example:
 
 [source,bash]
@@ -260,7 +260,7 @@ nodes(emails,
 
 In the example above only emails that match the filter query will be included in the traversal.
 Any Solr query can be included here.
-So you can do fun things like <<spatial-search.adoc#,geospatial queries>>, apply any of the available <<query-syntax-and-parsers.adoc#,query parsers>>, or even write custom query parsers to limit the traversal.
+So you can do fun things like xref:spatial-search.adoc[geospatial queries], apply any of the available xref:query-syntax-and-parsers.adoc[query parsers], or even write custom query parsers to limit the traversal.
 
 == Root Streams
 
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/graph.adoc b/solr/solr-ref-guide/modules/query-guide/pages/graph.adoc
index 09845c1..52ca334 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/graph.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/graph.adoc
@@ -397,7 +397,7 @@ In order to support temporal graph queries a ten second truncated timestamp in *
 Here is a sample ten second truncated timestamp: `2021-02-10T20:51:30Z`.
 This small data change enables some very important use cases so it's well worth the effort.
 
-Solr's indexing tool for Solr logs, described <<logs.adoc#,here>>, already adds the ten second truncated timestamps.
+Solr's indexing tool for Solr logs, described xref:logs.adoc[here], already adds the ten second truncated timestamps.
 So those using Solr to analyze Solr logs get temporal graph expressions for free.
 
 === Root Events
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/highlighting.adoc b/solr/solr-ref-guide/modules/query-guide/pages/highlighting.adoc
index 87bf571..14ba5cb 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/highlighting.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/highlighting.adoc
@@ -69,7 +69,7 @@ When using `*`, consider adding `hl.requireFieldMatch=true`.
 Note that the field(s) listed here ought to have compatible text-analysis (defined in the schema) with field(s) referenced in the query to be highlighted.
 It may be necessary to modify `hl.q` and `hl.qparser` and/or modify the text analysis.
 +
-The following example uses the <<local-params.adoc#,local params>> syntax and <<edismax-query-parser.adoc#,the eDisMax parser>> to highlight fields in `hl.fl`:
+The following example uses the xref:local-params.adoc[local params] syntax and xref:edismax-query-parser.adoc[] to highlight fields in `hl.fl`:
 +
 [source,text]
 &hl.fl=field1 field2&hl.q={!edismax qf=$hl.fl v=$q}&hl.qparser=lucene&hl.requireFieldMatch=true
@@ -96,7 +96,7 @@ The default is the value of the `q` parameter (already parsed).
 |Optional |Default: _see description_
 |===
 +
-The <<query-syntax-and-parsers.adoc#,query parser>> to use for the `hl.q` query.
+The xref:query-syntax-and-parsers.adoc[query parser] to use for the `hl.q` query.
 It only applies when `hl.q` is set.
 +
 The default is the value of the `defType` parameter which in turn defaults to `lucene`.
@@ -247,7 +247,7 @@ When there is a match to the query term in that field, it will be included for e
 
 == Choosing a Highlighter
 
-Solr provides a `HighlightComponent` (a <<requesthandlers-searchcomponents.adoc#defining-search-components,`SearchComponent`>>) and it's in the default list of components for search handlers.
+Solr provides a `HighlightComponent` (a xref:configuration-guide:requesthandlers-searchcomponents.adoc#defining-search-components[`SearchComponent`]) and it's in the default list of components for search handlers.
 It offers a somewhat unified API over multiple actual highlighting implementations (or simply "highlighters") that do the business of highlighting.
 
 There are many parameters supported by more than one highlighter, and sometimes the implementation details and semantics will be a bit different, so don't expect identical results when switching highlighters.
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/jdbc-zeppelin.adoc b/solr/solr-ref-guide/modules/query-guide/pages/jdbc-zeppelin.adoc
index d7a7d1d..5299f0f 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/jdbc-zeppelin.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/jdbc-zeppelin.adoc
@@ -21,7 +21,7 @@ The Solr JDBC driver can support http://zeppelin.apache.org/[Apache Zeppelin].
 [NOTE]
 ====
 A Solr-specific interpreter is available as an alternative to using the JDBC driver.
-See the section <<math-start.adoc#zeppelin-solr-interpreter,Zeppelin-Solr Interpreter>> for details.
+See the section xref:math-start.adoc#zeppelin-solr-interpreter[Zeppelin-Solr Interpreter] for details.
 
 If you choose to use Zeppelin's JDBC interpreter instead, you must use Zeppelin 0.6.0 or greater to get JDBC support.
 ====
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/join-query-parser.adoc b/solr/solr-ref-guide/modules/query-guide/pages/join-query-parser.adoc
index 11e30aa..683d8b9 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/join-query-parser.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/join-query-parser.adoc
@@ -196,7 +196,7 @@ As the join keys are streamed to the node, a bitset of the matching documents in
 This avoids keeping the full set of join keys in memory at any given time.
 This bitset is then inserted into the filter cache upon successful execution as with the normal behavior of the Solr filter cache.
 
-If the local index is sharded according to the join key field, the cross collection join can leverage a secondary query parser called the <<other-parsers.adoc#hash-range-query-parser,Hash Range Query Parser>>.
+If the local index is sharded according to the join key field, the cross collection join can leverage a secondary query parser called the xref:other-parsers.adoc#hash-range-query-parser[Hash Range Query Parser].
 The hash range query parser is responsible for returning only the documents that hash to a given range of values.
 This allows the `CrossCollectionQuery` to query the remote Solr collection and return only the join keys that would match a specific shard in the local Solr collection.
 This has the benefit of making sure that network traffic doesn't increase as the number of shards increases and allows for much greater scalability.
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/json-facet-api.adoc b/solr/solr-ref-guide/modules/query-guide/pages/json-facet-api.adoc
index 2adbef2..7ab160f 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/json-facet-api.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/json-facet-api.adoc
@@ -195,7 +195,7 @@ include::example$JsonRequestApiTest.java[tag=solrj-json-terms-facet-2]
 |`limit` |Limits the number of buckets returned. Defaults to 10.
 |`sort` |Specifies how to sort the buckets produced.
 
-`count` specifies document count, `index` sorts by the index (natural) order of the bucket value. One can also sort by any <<json-facet-api.adoc#stat-facet-functions,facet function / statistic>> that occurs in the bucket. The default is `count desc`. This parameter may also be specified in JSON like `sort:{count:desc}`. The sort order may either be “asc” or “desc”
+`count` specifies document count, `index` sorts by the index (natural) order of the bucket value. One can also sort by any <<stat-facet-functions,facet function / statistic>> that occurs in the bucket. The default is `count desc`. This parameter may also be specified in JSON like `sort:{count:desc}`. The sort order may either be “asc” or “desc”
 |`overrequest` a|
 Number of buckets beyond the `limit` to internally request from shards during a distributed search.
 
@@ -225,7 +225,7 @@ This parameter indicates the facet algorithm to use:
 * `stream` Presently equivalent to `enum`. Used for indexed, non-point fields with sort `index asc` and `allBuckets`, `numBuckets`, and `missing` disabled.
 * `smart` Pick the best method for the field type (this is the default)
 
-|`prelim_sort` |An optional parameter for specifying an approximation of the final `sort` to use during initial collection of top buckets when the <<json-facet-api.adoc#sorting-facets-by-nested-functions,`sort` parameter is very costly>>.
+|`prelim_sort` |An optional parameter for specifying an approximation of the final `sort` to use during initial collection of top buckets when the <<sorting-facets-by-nested-functions,`sort` parameter is very costly>>.
 |===
 
 === Query Facet
@@ -504,7 +504,7 @@ Currently, custom `key` is not supported.
 
 The `heatmap` facet generates a 2D grid of facet counts for documents having spatial data in each grid cell.
 
-This feature is primarily documented in the <<spatial-search.adoc#heatmap-faceting,spatial>> section of the reference guide.
+This feature is primarily documented in the xref:spatial-search.adoc#heatmap-faceting[spatial] section of the reference guide.
 The key parameters are `type` to specify `heatmap` and `field` to indicate a spatial RPT field.
 The rest of the parameter names use the same names and semantics mirroring
   facet.heatmap query-parameter style faceting, albeit without the "facet.heatmap." prefix.
@@ -579,17 +579,17 @@ Instead, they calculate something over all the documents in the domain.
 |missing |`missing(author)` |number of documents which do not have value for given field or function
 |countvals |`countvals(author)` |number of values for a given field or function
 |unique |`unique(author)` |number of unique values of the given field. Beyond 100 values it yields not exact estimate
-|uniqueBlock |`uniqueBlock(\_root_)` or `uniqueBlock($fldref)` where `fldref=\_root_` |same as above with smaller footprint strictly for <<json-faceting-domain-changes.adoc#block-join-domain-changes,counting the number of Block Join blocks>>. The given field must be unique across blocks, and only singlevalued string fields are supported, docValues are recommended.
+|uniqueBlock |`uniqueBlock(\_root_)` or `uniqueBlock($fldref)` where `fldref=\_root_` |same as above with smaller footprint strictly for xref:json-faceting-domain-changes.adoc#block-join-domain-changes[counting the number of Block Join blocks]. The given field must be unique across blocks, and only singlevalued string fields are supported, docValues are recommended.
 | |`uniqueBlock({!v=type:parent})` or `uniqueBlock({!v=$qryref})` where `qryref=type:parent` |same as above, but using bitset of the given query to aggregate hits.
 |hll |`hll(author)` |distributed cardinality estimate via hyper-log-log algorithm
 |percentile |`percentile(salary,50,75,99,99.9)` |Percentile estimates via t-digest algorithm. When sorting by this metric, the first percentile listed is used as the sort value.
 |sumsq |`sumsq(rent)` |sum of squares of field or function
 |variance |`variance(rent)` |variance of numeric field or function
 |stddev |`stddev(rent)` |standard deviation of field or function
-|relatedness |`relatedness('popularity:[100 TO *]','inStock:true')`|A function for computing a relatedness score of the documents in the domain to a Foreground set, relative to a Background set (both defined as queries). This is primarily for use when building <<json-facet-api.adoc#relatedness-and-semantic-knowledge-graphs,Semantic Knowledge Graphs>>.
+|relatedness |`relatedness('popularity:[100 TO *]','inStock:true')`|A function for computing a relatedness score of the documents in the domain to a Foreground set, relative to a Background set (both defined as queries). This is primarily for use when building <<relatedness-and-semantic-knowledge-graphs,Semantic Knowledge Graphs>>.
 |===
 
-Numeric aggregation functions such as `avg` can be on any numeric field, or on a <<function-queries.adoc#,nested function>> of multiple numeric fields such as `avg(div(popularity,price))`.
+Numeric aggregation functions such as `avg` can be on any numeric field, or on a xref:function-queries.adoc[nested function] of multiple numeric fields such as `avg(div(popularity,price))`.
 
 The most common way of requesting an aggregation function is as a simple String containing the expression you wish to compute:
 
@@ -625,8 +625,8 @@ include::example$JsonRequestApiTest.java[tag=solrj-json-metrics-facet-simple]
 ====
 --
 
-An expanded form allows for <<local-params.adoc#,local params>> to be specified.
-These may be used explicitly by some specialized aggregations such as `<<json-facet-api.adoc#relatedness-options,relatedness()>>`, but can also be used as parameter references to make aggregation expressions more readable, with out needing to use (global) request parameters:
+An expanded form allows for xref:local-params.adoc[] to be specified.
+These may be used explicitly by some specialized aggregations such as <<relatedness-options,`relatedness()`>>, but can also be used as parameter references to make aggregation expressions more readable, with out needing to use (global) request parameters:
 
 [.dynamic-tabs]
 --
@@ -852,7 +852,7 @@ As discussed above, facets compute buckets or statistics based on their "domain"
 
 In addition to this default behavior, domains can be also be widened, narrowed, or changed entirely.
 The JSON Faceting API supports modifying domains through its `domain` property.
-This is discussed in more detail <<json-faceting-domain-changes.adoc#,here>>
+This is discussed in more detail in xref:json-faceting-domain-changes.adoc[].
 
 == Special Stat Facet Functions
 
@@ -862,7 +862,7 @@ These are described in more detail in the sections below.
 
 === uniqueBlock() and Block Join Counts
 
-When a collection contains <<indexing-nested-documents.adoc#, Nested Documents>>, the `blockChildren` and `blockParent` <<json-faceting-domain-changes.adoc#block-join-domain-changes, domain changes>> can be useful when searching for parent documents and you want to compute stats against all of the affected children documents (or vice versa).
+When a collection contains xref:indexing-guide:indexing-nested-documents.adoc[nested documents], the `blockChildren` and `blockParent` xref:json-faceting-domain-changes.adoc#block-join-domain-changes[domain changes] can be useful when searching for parent documents and you want to compute stats against all of the affected children documents (or vice versa).
 But if you only need to know the _count_ of all the blocks that exist in the current domain, a more efficient option is the `uniqueBlock()` aggregate function.
 
 Suppose we have products with multiple SKUs, and we want to count products for each color.
@@ -1023,7 +1023,7 @@ curl -sS -X POST http://localhost:8983/solr/gettingstarted/query -d 'rows=0&q=*:
 <1> Use the entire collection as our "Background Set"
 <2> Use a query for "age >= 35" to define our (initial) "Foreground Set"
 <3> For both the top level `hobbies` facet & the sub-facet on `state` we will be sorting on the `relatedness(...)` values
-<4> In both calls to the `relatedness(...)` function, we use <<local-params.adoc#parameter-dereferencing,parameter variables>> to refer to the previously defined `fore` and `back` queries.
+<4> In both calls to the `relatedness(...)` function, we use xref:local-params.adoc#parameter-dereferencing[parameter variables] to refer to the previously defined `fore` and `back` queries.
 
 .The Facet Response
 [source,json,subs="verbatim,callouts"]
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/json-faceting-domain-changes.adoc b/solr/solr-ref-guide/modules/query-guide/pages/json-faceting-domain-changes.adoc
index 9226373..d7e5049 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/json-faceting-domain-changes.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/json-faceting-domain-changes.adoc
@@ -75,7 +75,7 @@ Each query can be:
 
 * a string containing a query in Solr query syntax.
 * a reference to a request parameter containing Solr query syntax, of the form: `{param: <request_param_name>}`.
-It's possible to refer to one or multiple queries in DSL syntax defined under <<json-query-dsl.adoc#additional-queries,queries>> key in JSON Request API.
+It's possible to refer to one or multiple queries in DSL syntax defined under xref:json-query-dsl.adoc#additional-queries[queries] key in JSON Request API.
 The referred parameter might have 0 (absent) or many values.
 ** When no values are specified, no filter is applied and no error is thrown.
 ** When many values are specified, each value is parsed and used as filters in conjunction.
@@ -165,13 +165,13 @@ The value of `excludeTags` can be a single string tag, an array of string tags,
 
 When an `excludeTags` option is combined with other `domain` changing options, it expands the domain _before_ any other domain changes take place.
 
-See also the section on <<faceting.adoc#tagging-and-excluding-filters,multi-select faceting>>.
+See also the section on xref:faceting.adoc#tagging-and-excluding-filters[multi-select faceting].
 
 == Arbitrary Domain Query
 
 A `query` domain can be specified when you wish to compute a facet against an arbitrary set of documents, regardless of the original domain.
 The most common use case would be to compute a top level facet against a specific subset of the collection, regardless of the main query.
-But it can also be useful on nested facets when building <<json-facet-api.adoc#relatedness-and-semantic-knowledge-graphs,Semantic Knowledge Graphs>>.
+But it can also be useful on nested facets when building xref:json-facet-api.adoc#relatedness-and-semantic-knowledge-graphs[Semantic Knowledge Graphs].
 
 Example:
 
@@ -221,9 +221,9 @@ NOTE: While a `query` domain can be combined with an additional domain `filter`,
 
 == Block Join Domain Changes
 
-When a collection contains <<indexing-nested-documents.adoc#, Nested Documents>>, the `blockChildren` or `blockParent` domain options can be used to transform an existing domain containing one type of document, into a domain containing the documents with the specified relationship (child or parent of) to the documents from the original domain.
+When a collection contains xref:indexing-guide:indexing-nested-documents.adoc[nested documents], the `blockChildren` or `blockParent` domain options can be used to transform an existing domain containing one type of document, into a domain containing the documents with the specified relationship (child or parent of) to the documents from the original domain.
 
-Both of these options work similarly to the corresponding <<block-join-query-parser.adoc#,Block Join Query Parsers>> by taking in a single String query that exclusively matches all parent documents in the collection.
+Both of these options work similarly to the corresponding xref:block-join-query-parser.adoc[] by taking in a single String query that exclusively matches all parent documents in the collection.
 If `blockParent` is used, then the resulting domain will contain all parent documents of the children from the original domain.
 If `blockChildren` is used, then the resulting domain will contain all child documents of the parents from the original domain.
 Quite often facets over child documents needs to be counted in parent documents, this can be done by `uniqueBlock(\_root_)` as described in <<json-facet-api#uniqueblock-and-block-join-counts, Block Join Facet Counts>>.
@@ -252,7 +252,7 @@ The resulting `brands` sub-facet will count how many Product documents (that hav
 
 A `join` domain change option can be used to specify arbitrary `from` and `to` fields to use in transforming from the existing domain to a related set of documents.
 
-This works similarly to the <<join-query-parser.adoc#,Join Query Parser>>, and has the same limitations when dealing with multi-shard collections.
+This works similarly to the xref:join-query-parser.adoc[], and has the same limitations when dealing with multi-shard collections.
 
 Example:
 [source,json]
@@ -281,13 +281,13 @@ Example:
 
 `join` domain changes support an optional `method` parameter, which allows users to specify which join implementation they would like to use in this domain transform.
 Solr offers several join implementations, each with different performance characteristics.
-For more information on these implementations and their tradeoffs, see the `method` parameter documentation <<join-query-parser.adoc#parameters,here>>.  Join domain changes support all `method` values except `crossCollection`.
+For more information on these implementations and their tradeoffs, see the xref:join-query-parser.adoc#parameters[`method` parameter documentation].  Join domain changes support all `method` values except `crossCollection`.
 
 == Graph Traversal Domain Changes
 
 A `graph` domain change option works similarly to the `join` domain option, but can do traversal multiple hops `from` the existing domain `to` other documents.
 
-This works very similar to the <<other-parsers.adoc#graph-query-parser,Graph Query Parser>>, supporting all of its optional parameters, and has the same limitations when dealing with multi-shard collections.
+This works very similar to the xref:other-parsers.adoc#graph-query-parser[Graph Query Parser], supporting all of its optional parameters, and has the same limitations when dealing with multi-shard collections.
 
 Example:
 [source,json]
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/json-query-dsl.adoc b/solr/solr-ref-guide/modules/query-guide/pages/json-query-dsl.adoc
index 357e446..9ae5267 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/json-query-dsl.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/json-query-dsl.adoc
@@ -21,9 +21,9 @@ Queries and filters provided in JSON requests can be specified using a rich, pow
 == Query DSL Structure
 The JSON Request API accepts query values in three different formats:
 
-* A valid <<standard-query-parser.adoc#,query string>> that uses the default `deftype` (`lucene`, in most cases), e.g., `title:solr`.
+* A valid xref:standard-query-parser.adoc[query string] that uses the default `deftype` (`lucene`, in most cases), e.g., `title:solr`.
 
-* A valid <<local-params.adoc#,local params query string>> that specifies its `deftype` explicitly, e.g., `{!dismax qf=title}solr`.
+* A valid xref:local-params.adoc[local params query string] that specifies its `deftype` explicitly, e.g., `{!dismax qf=title}solr`.
 
 * A valid JSON object with the name of the query parser and any relevant parameters, e.g., `{ "lucene": {"df":"title", "query":"solr"}}`.
 ** The top level "query" JSON block generally only has a single property representing the name of the query parser to use.
@@ -237,7 +237,7 @@ include::example$JsonRequestApiTest.java[tag=solrj-ipod-query-boosted-dsl-2]
 --
 
 === Nested Boolean Query Example
-Query nesting is commonly seen when combining multiple query clauses together using pseudo-boolean logic with the <<other-parsers.adoc#boolean-query-parser,BoolQParser>>.
+Query nesting is commonly seen when combining multiple query clauses together using pseudo-boolean logic with the xref:other-parsers.adoc#boolean-query-parser[BoolQParser].
 
 The example below shows how the `BoolQParser` can be used to create powerful nested queries.
 In this example, a user searches for results with `iPod` in the field `name` which are _not_ in the bottom half of the `popularity` rankings.
@@ -308,7 +308,7 @@ include::example$JsonRequestApiTest.java[tag=solrj-ipod-query-bool-condensed]
 ====
 --
 
-Example of referencing <<Additional Queries,additional queries>>, <<Tagging in JSON Query DSL,tagging>> and <<other-parsers.adoc#boolean-query-parser,exclusions>>:
+Example of referencing <<Additional Queries,additional queries>>, <<Tagging in JSON Query DSL,tagging>>, and xref:other-parsers.adoc#boolean-query-parser[exclusions]:
 
 [source,bash]
 ----
@@ -395,14 +395,14 @@ curl -X POST http://localhost:8983/solr/techproducts/query -d '
 ----
 
 Overall this example doesn't make much sense, but just demonstrates the syntax.
-This feature is useful in <<json-faceting-domain-changes.adoc#adding-domain-filters,filtering domain>> in JSON Facet API <<json-facet-api.adoc#changing-the-domain,domain changes>>.
+This feature is useful in xref:json-faceting-domain-changes.adoc#adding-domain-filters[filtering domain] in JSON Facet API xref:json-facet-api.adoc#changing-the-domain[domain changes].
 Note that these declarations add request parameters underneath, so using same names with other parameters might cause unexpected behavior.
 
 == Tagging in JSON Query DSL
 
 Query and filter clauses can also be individually "tagged".
 Tags serve as handles for query clauses, allowing them to be referenced from elsewhere in the request.
-This is most commonly used by the filter-exclusion functionality offered by both <<faceting.adoc#tagging-and-excluding-filters,traditional>> and <<json-faceting-domain-changes.adoc#filter-exclusions,JSON>> faceting.
+This is most commonly used by the filter-exclusion functionality offered by both xref:faceting.adoc#tagging-and-excluding-filters[traditional] and xref:json-faceting-domain-changes.adoc#filter-exclusions[JSON] faceting.
 
 Queries and filters are tagged by wrapping them in a surrounding JSON object.
 The name of the tag is specified as a JSON key, with the query string (or object) becoming the value associated with that key.
@@ -454,8 +454,8 @@ Let's go on item by item:
 
 * _Filter exclusion_ is usually necessary when multiple filter values can be applied to each field.
 This is also also known as drill-sideways facets.
-See also <<faceting.adoc#tagging-and-excluding-filters,the classic example>> of filter exclusion.
-* _Nested documents_, or child documents, are described in <<indexing-nested-documents.adoc#,Indexing Nested Documents>>.
+See also xref:faceting.adoc#tagging-and-excluding-filters[tagging and filter exclusion].
+* _Nested documents_, or child documents, are described in xref:indexing-guide:indexing-nested-documents.adoc[].
 In the example below, they are referred as SKUs since this is a frequent use case for this feature.
 * _Counting facets over children documents_ means that even though the result of the search are usually parent documents, the children documents and their fields are used for faceting.
 * _Facet counts roll up_ means that child documents contribute facet hits because parent documents they are linked to are counted.
@@ -505,4 +505,4 @@ The SKU field is defined as `sku_attr1`, and we've set `limit=-1` so we get all
 The `blockChildren` value references the all-parents query.
 Then we define a filter to restrict to SKU docs but exclude one tag retaining only `sku_attr2:bar`.
 <6> Counts the subfacet in parent documents.
-See also <<json-facet-api.adoc#uniqueblock-and-block-join-counts>>.
+See also xref:json-facet-api.adoc#uniqueblock-and-block-join-counts[uniqueBlock() and Block Join Counts].
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/json-request-api.adoc b/solr/solr-ref-guide/modules/query-guide/pages/json-request-api.adoc
index dd2723e..ca851a9 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/json-request-api.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/json-request-api.adoc
@@ -21,7 +21,7 @@
 
 Solr supports an alternate request API which accepts requests composed in part or entirely of JSON objects.
 This alternate API can be preferable in some situations, where its increased readability and flexibility make it easier to use than the entirely query-parameter driven alternative.
-There is also some functionality which can only be accessed through this JSON request API, such as much of the analytics capabilities of <<json-facet-api.adoc#facet-analytics-module,JSON Faceting>>
+There is also some functionality which can only be accessed through this JSON request API, such as much of the analytics capabilities of the xref:json-facet-api.adoc[].
 
 == Building JSON Requests
 The core of the JSON Request API is its ability to specify request parameters as JSON in the request body, as shown by the example below:
@@ -174,7 +174,7 @@ include::example$JsonRequestApiTest.java[tag=solrj-json-facet-all-query-params-e
 ====
 --
 
-See the <<json-facet-api.adoc#facet-analytics-module,JSON Facet API>> for more on faceting and analytics commands.
+See the xref:json-facet-api.adoc[] for more on faceting and analytics commands.
 
 == Supported Properties and Syntax
 
@@ -249,7 +249,7 @@ Parameters placed in a `params` block act as if they were added verbatim to the
 curl "http://localhost:8983/solr/techproducts/query?fl=name,price&q=memory&rows=1"
 ----
 
-Usage of `queries` key is described in <<json-query-dsl.adoc#additional-queries,Query DSL>>.
+Usage of `queries` key is described in xref:json-query-dsl.adoc#additional-queries[Additional Queries].
 
 
 === Parameter Substitution / Macro Expansion
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/learning-to-rank.adoc b/solr/solr-ref-guide/modules/query-guide/pages/learning-to-rank.adoc
index cd7a74e..cac4a78 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/learning-to-rank.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/learning-to-rank.adoc
@@ -26,7 +26,7 @@ The only thing you need to do outside Solr is train your own ranking model.
 === Re-Ranking
 
 Re-Ranking allows you to run a simple query for matching documents and then re-rank the top N documents using the scores from a different, more complex query.
-This page describes the use of *LTR* complex queries, information on other rank queries included in the Solr distribution can be found on the <<query-re-ranking.adoc#,Query Re-Ranking>> page.
+This page describes the use of *LTR* complex queries, information on other rank queries included in the Solr distribution can be found in xref:query-re-ranking.adoc[].
 
 === Learning To Rank Models
 
@@ -91,7 +91,7 @@ The process of https://en.wikipedia.org/wiki/Feature_engineering[feature enginee
 
 ==== Feature Extraction
 
-The ltr contrib module includes a <<document-transformers.adoc#,[features>> transformer] to support the calculation and return of feature values for https://en.wikipedia.org/wiki/Feature_extraction[feature extraction] purposes including and especially when you do not yet have an actual reranking model.
+The ltr contrib module includes a xref:document-transformers.adoc[`[features]` transformer] to support the calculation and return of feature values for https://en.wikipedia.org/wiki/Feature_extraction[feature extraction] purposes including and especially when you do not yet have an actual reranking model.
 
 ==== Feature Selection and Model Training
 
@@ -647,7 +647,7 @@ Assuming that you consider to use a large model placed at `/path/to/models/myMod
 }
 ----
 
-First, add the directory to Solr's resource paths with a <<libs.adoc#lib-directives-in-solrconfig,`<lib/>` directive>>:
+First, add the directory to Solr's resource paths with a xref:configuration-guide:libs.adoc#lib-directives-in-solrconfig[`<lib/>` directive]:
 
 [source,xml]
 ----
@@ -674,11 +674,11 @@ NOTE: No `"features"` are configured in `myWrapperModel` because the features of
 
 CAUTION: `<lib dir="/path/to/models" regex=".*\.json" />` doesn't work as expected in this case, because `SolrResourceLoader` considers given resources as JAR if `<lib />` indicates files.
 
-As an alternative to the above-described `DefaultWrapperModel`, it is possible to <<zookeeper-ensemble.adoc#increasing-the-file-size-limit,increase ZooKeeper's file size limit>>.
+As an alternative to the above-described `DefaultWrapperModel`, it is possible to xref:deployment-guide:zookeeper-ensemble.adoc#increasing-the-file-size-limit[increase ZooKeeper's file size limit].
 
 === Applying Changes
 
-The feature store and the model store are both <<managed-resources.adoc#,Managed Resources>>.
+The feature store and the model store are both xref:configuration-guide:managed-resources.adoc[].
 Changes made to managed resources are not applied to the active Solr components until the Solr collection (or Solr core in single server mode) is reloaded.
 
 === LTR Examples
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/local-params.adoc b/solr/solr-ref-guide/modules/query-guide/pages/local-params.adoc
index 7bf7069..7f74b6a 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/local-params.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/local-params.adoc
@@ -58,7 +58,7 @@ is equivalent to:
 
 `q={!type=dismax qf=myfield}solr rocks`
 
-If no "type" is specified (either explicitly or implicitly) then the <<standard-query-parser.adoc#,lucene parser>> is used by default.
+If no "type" is specified (either explicitly or implicitly) then the xref:standard-query-parser.adoc[] is used by default.
 Thus:
 
 `fq={!df=summary}solr rocks`
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/logs.adoc b/solr/solr-ref-guide/modules/query-guide/pages/logs.adoc
index c58b760..7b94125 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/logs.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/logs.adoc
@@ -18,9 +18,9 @@
 
 This section of the user guide provides an introduction to Solr log analytics.
 
-NOTE: This is an appendix of the <<math-expressions.adoc#,Visual Guide to Streaming Expressions and Math Expressions>>.
+NOTE: This is an appendix of the xref::math-expressions.adoc[Visual Guide to Streaming Expressions and Math Expressions].
 All the functions described below are covered in detail in the guide.
-See the <<math-start.adoc#,Getting Started>> chapter to learn how to get started with visualizations and Apache Zeppelin.
+See the xref:math-start.adoc[] chapter to learn how to get started with visualizations and Apache Zeppelin.
 
 == Loading
 
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/machine-learning.adoc b/solr/solr-ref-guide/modules/query-guide/pages/machine-learning.adoc
index aed8a0a..5f602ef 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/machine-learning.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/machine-learning.adoc
@@ -294,7 +294,7 @@ r=knnRegress(obs, quality, 5, scale="true"),
 == knnSearch
 
 The `knnSearch` function returns the k-nearest neighbors for a document based on text similarity.
-Under the covers the `knnSearch` function uses Solr's <<other-parsers.adoc#more-like-this-query-parser,More Like This>> query parser plugin.
+Under the covers the `knnSearch` function uses Solr's xref:morelikethis.adoc#morelikethis-query-parser[More Like This query parser].
 This capability uses the search engine's query, term statistics, scoring, and ranking capability to perform a fast, nearest neighbor search for similar documents over large distributed indexes.
 
 The results of this search can be used directly or provide *candidates* for machine learning operations such as a secondary KNN vector search.
@@ -412,7 +412,7 @@ K-means clustering produces centroids or *prototype* vectors which can be used t
 In this example the key features of the centroids are extracted to represent the key phrases for clusters of TF-IDF term vectors.
 
 NOTE: The example below works with TF-IDF _term vectors_.
-The section <<term-vectors.adoc#,Text Analysis and Term Vectors>> offers a full explanation of this features.
+The section xref:term-vectors.adoc[] offers a full explanation of this features.
 
 In the example the `search` function returns documents where the `review_t` field matches the phrase "star wars".
 The `select` function is run over the result set and applies the `analyze` function which uses the Lucene/Solr analyzer attached to the schema field `text_bigrams` to re-analyze the `review_t` field.
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/math-expressions.adoc b/solr/solr-ref-guide/modules/query-guide/pages/math-expressions.adoc
index 598fafb..512ed1a 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/math-expressions.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/math-expressions.adoc
@@ -1,27 +1,5 @@
 = Streaming Expressions and Math Expressions
-:page-children: visualization, \
-  math-start, \
-  loading, \
-  search-sample, \
-  transform, \
-  scalar-math, \
-  vector-math, \
-  variables, \
-  matrix-math, \
-  term-vectors, \
-  probability-distributions, \
-  statistics, \
-  regression, \
-  curve-fitting, \
-  time-series, \
-  numerical-analysis, \
-  dsp, \
-  simulations, \
-  machine-learning, \
-  graph, \
-  computational-geometry, \
-  logs
-:page-show-toc: false
+:toc!:
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -45,46 +23,27 @@ image::math-expressions/searchiris.png[]
 
 == Table of Contents
 
-*<<visualization.adoc#,Visualizations>>*: Gallery of streaming expression and math expression visualizations.
-
-*<<math-start.adoc#,Getting Started>>*: Getting started with streaming expressions, math expressions, and visualization.
-
-*<<loading.adoc#,Data Loading>>*: Visualizing, transforming and loading CSV files.
-
-*<<search-sample.adoc#,Searching, Sampling and Aggregation>>*: Searching, sampling, aggregation and visualization of result sets.
-
-*<<transform.adoc#,Transforming Data>>*: Transforming and filtering result sets.
-
-*<<scalar-math.adoc#,Scalar Math>>*: Math functions and visualization applied to numbers.
-
-*<<vector-math.adoc#,Vector Math>>*: Vector math, manipulation and visualization.
-
-*<<variables.adoc#, Variables and Vectorization>>*: Vectorizing result sets and assigning and visualizing variables.
-
-*<<matrix-math.adoc#,Matrix Math>>*: Matrix math, manipulation and visualization.
-
-*<<term-vectors.adoc#,Text Analysis and Term Vectors>>*: Text analysis and TF-IDF term vectors.
-
-*<<probability-distributions.adoc#,Probability>>*: Continuous and discrete probability distribution functions.
-
-*<<statistics.adoc#,Statistics>>*: Descriptive statistics, histograms, percentiles, correlation, inference tests and other stats functions.
-
-*<<regression.adoc#,Linear Regression>>*: Simple and multivariate linear regression.
-
-*<<curve-fitting.adoc#,Curve Fitting>>*: Polynomial, harmonic and Gaussian curve fitting.
-
-*<<time-series.adoc#,Time Series>>*: Time series aggregation, visualization, smoothing, differencing, anomaly detection and forecasting.
-
-*<<numerical-analysis.adoc#,Interpolation and Numerical Calculus>>*: Interpolation, derivatives and integrals.
-
-*<<dsp.adoc#,Signal Processing>>*: Convolution, cross-correlation, autocorrelation and fast Fourier transforms.
-
-*<<simulations.adoc#,Simulations>>*: Monte Carlo simulations and random walks.
-
-*<<machine-learning.adoc#,Machine Learning>>*: Distance, KNN, DBSCAN, K-means, fuzzy K-means and other ML functions.
-
-*<<graph.adoc#,Graph>>*: Bipartite graphs, in-degree centrality, graph recommenders, temporal graphs and event correlation.
-
-*<<computational-geometry.adoc#,Computational Geometry>>*: Convex Hulls and Enclosing Disks.
-
-*<<logs.adoc#,Appendix A>>*: Solr log analytics and visualization.
+[cols="1,1",frame=none,grid=none,stripes=none]
+|===
+| *xref:visualization.adoc[]*: Gallery of streaming expression and math expression visualizations.
+| *xref:math-start.adoc[]*: Getting started with streaming expressions, math expressions, and visualization.
+| *xref:loading.adoc[]*: Visualizing, transforming and loading CSV files.
+| *xref:search-sample.adoc[]*: Searching, sampling, aggregation and visualization of result sets.
+| *xref:transform.adoc[]*: Transforming and filtering result sets.
+| *xref:scalar-math.adoc[]*: Math functions and visualization applied to numbers.
+| *xref:vector-math.adoc[]*: Vector math, manipulation and visualization.
+| *xref:variables.adoc[]*: Vectorizing result sets and assigning and visualizing variables.
+| *xref:matrix-math.adoc[]*: Matrix math, manipulation and visualization.
+| *xref:term-vectors.adoc[]*: Text analysis and TF-IDF term vectors.
+| *xref:probability-distributions.adoc[]*: Continuous and discrete probability distribution functions.
+| *xref:statistics.adoc[]*: Descriptive statistics, histograms, percentiles, correlation, inference tests and other stats functions.
+| *xref:regression.adoc[]*: Simple and multivariate linear regression.
+| *xref:curve-fitting.adoc[]*: Polynomial, harmonic and Gaussian curve fitting.
+| *xref:time-series.adoc[]*: Time series aggregation, visualization, smoothing, differencing, anomaly detection and forecasting.
+| *xref:numerical-analysis.adoc[]*: Interpolation, derivatives and integrals.
+| *xref:dsp.adoc[]*: Convolution, cross-correlation, autocorrelation and fast Fourier transforms.
+| *xref:simulations.adoc[]*: Monte Carlo simulations and random walks.
+| *xref:machine-learning.adoc[]*: Distance, KNN, DBSCAN, K-means, fuzzy K-means and other ML functions.
+| *xref:graph.adoc[]*: Bipartite graphs, in-degree centrality, graph recommenders, temporal graphs and event correlation.
+| *xref:computational-geometry.adoc[]*: Convex Hulls and Enclosing Disks.
+| *xref:logs.adoc[]*: Solr log analytics and visualization.
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/math-start.adoc b/solr/solr-ref-guide/modules/query-guide/pages/math-start.adoc
index 1127811..8a3b6551 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/math-start.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/math-start.adoc
@@ -40,7 +40,7 @@ This handler compiles the expression, runs the expression logic and returns a JS
 
 === Admin UI Stream Panel
 
-The easiest way to run streaming expressions and math expressions is through the <<stream-screen.adoc#,Stream Screen>> in the Solr Admin UI.
+The easiest way to run streaming expressions and math expressions is through the xref:stream-screen.adoc[] in the Solr Admin UI.
 
 A sample `search` streaming expression is shown in the screenshot below:
 
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/matrix-math.adoc b/solr/solr-ref-guide/modules/query-guide/pages/matrix-math.adoc
index 89dfbc6..ad9b0fe 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/matrix-math.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/matrix-math.adoc
@@ -119,7 +119,7 @@ responds with:
 == Visualization
 
 The `zplot` function can plot matrices as a heat map using the `heat` named parameter.
-Heat maps are powerful visualization tools for displaying <<statistics.adoc#correlation-matrices,*correlation*>> and <<machine-learning.adoc#distance-and-distance-matrices,*distance*>> matrices described later in the guide.
+Heat maps are powerful visualization tools for displaying xref:statistics.adoc#correlation-matrices[*correlation*] and xref:machine-learning.adoc#distance-and-distance-matrices[*distance*] matrices described later in the guide.
 The example below shows a 2x2 matrix visualized using the heat map
 visualization in Apache Zeppelin.
 
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/morelikethis.adoc b/solr/solr-ref-guide/modules/query-guide/pages/morelikethis.adoc
index 1ec05c9..273ba20 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/morelikethis.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/morelikethis.adoc
@@ -39,7 +39,7 @@ This operates in much the same way as the request handler but since it is a quer
 `MoreLikeThis` constructs a Lucene query based on terms in a document.
 It does this by pulling terms from the list of fields provided with the request.
 
-For best results, the fields should have stored term vectors (`termVectors=true`), which can be <<fields.adoc#,configured in the schema>>.
+For best results, the fields should have stored term vectors (`termVectors=true`), which can be xref:indexing-guide:fields.adoc[configured in the schema].
 If term vectors are not stored, MoreLikeThis can generate terms from stored fields.
 The field used for the `uniqueKey` must also be stored in order for MoreLikeThis to work properly.
 
@@ -157,7 +157,7 @@ Possible values are `true` or `false`.
 |Optional |Default: none
 |===
 +
-Query fields and their boosts using the same format used by the <<dismax-query-parser.adoc#,DisMax Query Parser>>.
+Query fields and their boosts using the same format used by the xref:dismax-query-parser.adoc[].
 These fields must also be specified in `mlt.fl`.
 
 `mlt.interestingTerms`::
@@ -177,7 +177,7 @@ Unless `mlt.boost=true`, all terms will have `boost=1.0`.
 
 +
 To use this parameter with the <<MoreLikeThis Search Component,search component>>, the query cannot be distributed.
-In order to get interesting terms, the query must be sent to a single shard and limited to that shard only (with the <<solrcloud-distributed-requests.adoc#limiting-which-shards-are-queried,`shards`>> parameter).
+In order to get interesting terms, the query must be sent to a single shard and limited to that shard only (with the xref:deployment-guide:solrcloud-distributed-requests.adoc#limiting-which-shards-are-queried[`shards` parameter]).
 Multi-shard support is, however, available with the MoreLikeThis request handler.
 
 === MoreLikeThis Request Handler
@@ -221,7 +221,7 @@ curl -X POST -H 'Content-type:application/json' -d {
 Both of the above examples set the `mlt.fl` parameter to "body" for the request handler.
 This means that all requests to the handler will use that value for the parameter unless specifically overridden in an individual request.
 
-For more about request handler configuration in general, see the section <<requesthandlers-searchcomponents.adoc#default-components,Request Handlers and SearchComponents>>.
+For more about request handler configuration in general, see the section xref:configuration-guide:requesthandlers-searchcomponents.adoc#default-components[Default Components].
 
 ==== Request Handler Parameters
 
@@ -319,7 +319,7 @@ If we had not requested `mlt.match.include=true`, the response would not have in
 
 An external document (one not in the index) can be passed to the MoreLikeThis request handler to be used for recommended documents.
 
-This is accomplished with the use of <<content-streams.adoc#,Content Streams>>.
+This is accomplished with the use of xref:indexing-guide:content-streams.adoc[].
 The body of a document can be passed directly to the request handler with the `stream.body` parameter.
 Alternatively, if remote streams are enabled, a URL or file could be passed.
 
@@ -339,7 +339,7 @@ It's important to note this could incur a cost to search performance so should o
 
 ==== Search Component Configuration
 
-The MoreLikeThis search component is a default search component that works with all search handlers (see also <<requesthandlers-searchcomponents.adoc#default-components,Default Components>>).
+The MoreLikeThis search component is a default search component that works with all search handlers (see also xref:configuration-guide:requesthandlers-searchcomponents.adoc#default-components[Default Components]).
 
 Since it is configured already, it doesn't need any additional configuration unless you'd like to set parameters for a particular collection that override the MoreLikeThis defaults.
 To do this, you could configure it like this:
@@ -356,8 +356,8 @@ The above example would always enable MoreLikeThis for all queries and will alwa
 This is probably not something you really want!
 But the example serves to show how you might define whichever parameters you would like to be default for MoreLikeThis.
 
-If you gave the search component a name other than "mlt" as in the above example, you would need to explicitly add it to a request handler as described in the section <<requesthandlers-searchcomponents.adoc#referencing-search-components,Referencing Search Components>>.
-Because the above example uses the same name as the default, the parameters defined there override Solr's default.
+If you gave the search component a name other than "mlt" as in the above example, you would need to explicitly add it to a request handler as described in the section xref:configuration-guide:requesthandlers-searchcomponents.adoc#referencing-search-components[Referencing Search Components].
+Because the above example uses the same name as the default, the parameters defined override Solr's default.
 
 ==== Search Component Parameters
 
@@ -592,7 +592,7 @@ It can be either `true` or `false`.
 
 === Query Parser Query and Response
 
-The structure of a MoreLikeThis query parser request is like a query using <<local-params.adoc#,local params>>, as in:
+The structure of a MoreLikeThis query parser request is like a query using xref:local-params.adoc[]>, as in:
 
 [source,bash]
 ----
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/other-parsers.adoc b/solr/solr-ref-guide/modules/query-guide/pages/other-parsers.adoc
index 5c4ecaf..93ca8e9 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/other-parsers.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/other-parsers.adoc
@@ -20,13 +20,13 @@ In addition to the main query parsers, there are several other query parsers tha
 
 This section details the other parsers, and gives examples for how they might be used.
 
-Many of these parsers are expressed the same way as <<local-params.adoc#,Local Params>>.
+Many of these parsers are expressed the same way as xref:local-params.adoc[].
 
 == Block Join Query Parsers
 
 The Block Join query parsers are used with nested documents to query for parents and/or children.
 
-These parsers are covered in detail in the section <<block-join-query-parser.adoc#,Block Join Query Parser>>.
+These parsers are covered in detail in the section xref:block-join-query-parser.adoc[].
 
 == Boolean Query Parser
 
@@ -106,7 +106,7 @@ q={!bool must=$ref}&ref=foo&ref=bar
 ----
 
 Referred queries might be excluded via tags.
-Overall the idea is similar to <<faceting.adoc#tagging-and-excluding-filters, excluding fq in facets>>.
+Overall the idea is similar to xref:faceting.adoc#tagging-and-excluding-filters[excluding fq in facets].
 
 [source,text]
 ----
@@ -124,7 +124,7 @@ q={!bool must=foo}
 
 `BoostQParser` extends the `QParserPlugin` and creates a boosted query from the input value.
 The main value is any query to be "wrapped" and "boosted" -- only documents which match that query will match the final query produced by this parser.
-Parameter `b` is a <<function-queries.adoc#available-functions,function>> to be evaluated against each document that matches the original query, and the result of the function will be multiplied into into the final score for that document.
+Parameter `b` is a xref:function-queries.adoc#available-functions[function] to be evaluated against each document that matches the original query, and the result of the function will be multiplied into into the final score for that document.
 
 === Boost Query Parser Examples
 
@@ -143,9 +143,9 @@ Creates a query `name:foo` which has it's scores multiplied by the _inverse_ of
 q={!boost b=div(1,add(1,price))}name:foo
 ----
 
-The `<<function-queries.adoc#query-function,query(...)>>` function is particularly useful for situations where you want to multiply (or divide) the score for each document matching your main query by the score that document would have from another query.
+The xref:function-queries.adoc#query-function[`query(...)`] function is particularly useful for situations where you want to multiply (or divide) the score for each document matching your main query by the score that document would have from another query.
 
-This example uses <<local-params.adoc#parameter-dereferencing,local param variables>> to create a query for `name:foo` which is boosted by the scores from the independently specified query `category:electronics`:
+This example uses xref:local-params.adoc#parameter-dereferencing[local param variables] to create a query for `name:foo` which is boosted by the scores from the independently specified query `category:electronics`:
 
 [source,text]
 ----
@@ -161,7 +161,7 @@ The `CollapsingQParser` is really a _post filter_ that provides more performant
 This parser collapses the result set to a single document per group before it forwards the result set to the rest of the search components.
 So all downstream components (faceting, highlighting, etc.) will work with the collapsed result set.
 
-Details about using the `CollapsingQParser` can be found in the section <<collapse-and-expand-results.adoc#,Collapse and Expand Results>>.
+Details about using the `CollapsingQParser` can be found in the section xref:collapse-and-expand-results.adoc[].
 
 == Complex Phrase Query Parser
 
@@ -216,7 +216,7 @@ It may be prudent to restrict wildcards to at least two or preferably three lett
 Allowing very short prefixes may result in to many low-quality documents being returned.
 
 Notice that it also supports leading wildcards "*a" as well with consequent performance implications.
-Applying <<filters.adoc#reversed-wildcard-filter,ReversedWildcardFilterFactory>> in index-time analysis is usually a good idea.
+Applying xref:indexing-guide:filters.adoc#reversed-wildcard-filter[ReversedWildcardFilterFactory] in index-time analysis is usually a good idea.
 
 ==== MaxBooleanClauses with Complex Phrase Parser
 
@@ -227,7 +227,7 @@ You may need to increase MaxBooleanClauses in `solrconfig.xml` as a result of th
 <maxBooleanClauses>4096</maxBooleanClauses>
 ----
 
-This property is described in more detail in the section <<caches-warming.adoc#query-sizing-and-warming,Query Sizing and Warming>>.
+This property is described in more detail in the section xref:configuration-guide:caches-warming.adoc#query-sizing-and-warming[Query Sizing and Warming].
 
 ==== Stopwords with Complex Phrase Parser
 
@@ -292,7 +292,7 @@ The `param` local parameter uses "`$`" syntax to refer to a few queries, where `
 == Function Query Parser
 
 The `FunctionQParser` extends the `QParserPlugin` and creates a function query from the input value.
-This is only one way to use function queries in Solr; for another, more integrated, approach, see the section on <<function-queries.adoc#,Function Queries>>.
+This is only one way to use function queries in Solr; for another, more integrated, approach, see the section on xref:function-queries.adoc[].
 
 Example:
 
@@ -572,7 +572,7 @@ The hash range query parser has a per-segment cache for each field that this que
 When specifying a min/max hash range and a field name with the hash range query parser, only documents that contain a field value that hashes into that range will be returned.
 If you want to query for a very large result set, you can query for various hash ranges to return a fraction of the documents with each range request.
 
-In the <<join-query-parser.adoc#cross-collection-join,cross collection join>> case, the hash range query parser is used to ensure that each shard only gets the set of join keys that would end up on that shard.
+In the xref:join-query-parser.adoc#cross-collection-join[cross collection join] case, the hash range query parser is used to ensure that each shard only gets the set of join keys that would end up on that shard.
 
 This query parser uses the MurmurHash3_x86_32.
 This is the same as the default hashing for the default composite ID router in Solr.
@@ -634,7 +634,7 @@ Note the name of the cache should be the field name prefixed by "`hash_`".
 
 The Join Query Parser allows users to run queries that normalize relationships between documents, similar to SQL-style joins.
 
-Details of this query parser are in the section <<join-query-parser.adoc#,Join Query Parser>>.
+Details of this query parser are in the section xref:join-query-parser.adoc[].
 
 == Learning To Rank Query Parser
 
@@ -647,7 +647,7 @@ Example:
 {!ltr model=myModel reRankDocs=100}
 ----
 
-Details about using the `LTRQParserPlugin` can be found in the <<learning-to-rank.adoc#,Learning To Rank>> section.
+Details about using the `LTRQParserPlugin` can be found in the xref:learning-to-rank.adoc[] section.
 
 == Max Score Query Parser
 
@@ -888,7 +888,7 @@ The implementation is derived from an unbiased method proposed in later work^[3]
 
 The `MLTQParser` enables retrieving documents that are similar to a given document.
 It uses Lucene's existing `MoreLikeThis` logic and also works in SolrCloud mode.
-Information about how to use this query parser is with the documentation about MoreLikeThis, in the section <<morelikethis.adoc#morelikethis-query-parser,MoreLikeThis Query Parser>>.
+Information about how to use this query parser is with the documentation about MoreLikeThis, in the section xref:morelikethis.adoc#morelikethis-query-parser[MoreLikeThis Query Parser].
 
 == Nested Query Parser
 
@@ -1095,7 +1095,7 @@ http://localhost:8983/solr/techproducts?q=memory _query_:{!rank f='pagerank', fu
 
 The `ReRankQParserPlugin` is a special purpose parser for Re-Ranking the top results of a simple query using a more complex ranking query.
 
-Details about using the `ReRankQParserPlugin` can be found in the <<query-re-ranking.adoc#,Query Re-Ranking>> section.
+Details about using the `ReRankQParserPlugin` can be found in the xref:query-re-ranking.adoc[] section.
 
 == Simple Query Parser
 
@@ -1178,7 +1178,7 @@ However, this can lead to odd results in some cases.
 There are two spatial QParsers in Solr: `geofilt` and `bbox`.
 But there are other ways to query spatially: using the `frange` parser with a distance function, using the standard (lucene) query parser with the range syntax to pick the corners of a rectangle, or with RPT and BBoxField you can use the standard query parser but use a special syntax within quotes that allows you to pick the spatial predicate.
 
-All these options are documented further in the section <<spatial-search.adoc#,Spatial Search>>.
+All these options are documented further in the section xref:spatial-search.adoc[].
 
 == Surround Query Parser
 
@@ -1284,7 +1284,7 @@ If no analysis or transformation is desired for any type of field, see the <<Raw
 
 `TermsQParser` functions similarly to the <<Term Query Parser,Term Query Parser>> but takes in multiple values separated by commas and returns documents matching any of the specified values.
 
-This can be useful for generating filter queries from the external human readable terms returned by the faceting or terms components, and may be more efficient in some cases than using the <<standard-query-parser.adoc#,Standard Query Parser>> to generate a boolean query since the default implementation `method` avoids scoring.
+This can be useful for generating filter queries from the external human readable terms returned by the faceting or terms components, and may be more efficient in some cases than using the xref:standard-query-parser.adoc[] to generate a boolean query since the default implementation `method` avoids scoring.
 
 This query parser takes the following parameters:
 
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/pagination-of-results.adoc b/solr/solr-ref-guide/modules/query-guide/pages/pagination-of-results.adoc
index 5d35ddc..da9f34a 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/pagination-of-results.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/pagination-of-results.adoc
@@ -22,7 +22,7 @@ In many applications the UI for these sorted results are displayed to the user i
 
 == Basic Pagination
 
-In Solr, this basic paginated searching is supported using the `start` and `rows` parameters, and performance of this common behaviour can be tuned by utilizing the <<caches-warming.adoc#query-result-cache,`queryResultCache`>> and adjusting the <<caches-warming.adoc#queryresultwindowsize-element,`queryResultWindowSize`>> configuration options based on your expected page sizes.
+In Solr, this basic paginated searching is supported using the `start` and `rows` parameters, and performance of this common behaviour can be tuned by utilizing the xref:configuration-guide:caches-warming.adoc#query-result-cache[`queryResultCache`] and adjusting the xref:configuration-guide:caches-warming.adoc#queryresultwindowsize-element[`queryResultWindowSize`] configuration options based on your expected page sizes.
 
 === Basic Pagination Examples
 
@@ -105,7 +105,7 @@ There are a few important constraints to be aware of when using `cursorMark` par
 
 . `cursorMark` and `start` are mutually exclusive parameters.
 * Your requests must either not include a `start` parameter, or it must be specified with a value of "```0```".
-. When using the <<common-query-parameters.adoc#timeallowed-parameter,`timeAllowed`>> request parameter, partial results may be returned.
+. When using the xref:common-query-parameters.adoc#timeallowed-parameter[`timeAllowed`] request parameter, partial results may be returned.
 If time expires before the search is complete, as indicated when the `responseHeader` includes `"partialResults": true`, some matching documents may have been skipped.
 Additionally, if `cursorMark` matches `nextCursorMark`, you cannot be sure that there are no more results.
 +
@@ -114,10 +114,10 @@ When the `responseHeader` no longer includes `"partialResults": true`, and `curs
 . `sort` clauses must include the uniqueKey field (either `asc` or `desc`).
 +
 If `id` is your uniqueKey field, then sort parameters like `id asc` and `name asc, id desc` would both work fine, but `name asc` by itself would not
-. Sorts including <<date-formatting-math.adoc#,Date Math>> based functions that involve calculations relative to `NOW` will cause confusing results, since every document will get a new sort value on every subsequent request.
+. Sorts including xref:indexing-guide:date-formatting-math.adoc[Date Math-based] functions that involve calculations relative to `NOW` will cause confusing results, since every document will get a new sort value on every subsequent request.
 This can easily result in cursors that never end, and constantly return the same documents over and over – even if the documents are never updated.
 +
-In this situation, choose & re-use a fixed value for the <<date-formatting-math.adoc#now,`NOW` request parameter>> in all of your cursor requests.
+In this situation, choose & re-use a fixed value for the xref:indexing-guide:date-formatting-math.adoc#now[`NOW` request parameter] in all of your cursor requests.
 
 Cursor mark values are computed based on the sort values of each document in the result, which means multiple documents with identical sort values will produce identical Cursor mark values if one of them is the last document on a page of results.
 In that situation, the subsequent request using that `cursorMark` would not know which of the documents with the identical mark values should be skipped.
@@ -288,4 +288,4 @@ while (true) {
 }
 ----
 
-TIP: For certain specialized cases, the <<exporting-result-sets.adoc#,/export handler>> may be an option.
+TIP: For certain specialized cases, the xref:exporting-result-sets.adoc[/export handler] may be an option.
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/query-elevation-component.adoc b/solr/solr-ref-guide/modules/query-guide/pages/query-elevation-component.adoc
index fd657f7..25085e0 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/query-elevation-component.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/query-elevation-component.adoc
@@ -20,7 +20,7 @@ The Query Elevation Component lets you configure the top results for a given que
 
 This is sometimes called "sponsored search", "editorial boosting", or "best bets." This component matches the user query text to a configured map of top results.
 The text can be any string or non-string IDs, as long as it's indexed.
-Although this component will work with any QueryParser, it makes the most sense to use with <<dismax-query-parser.adoc#,DisMax>> or <<edismax-query-parser.adoc#,eDisMax>>.
+Although this component will work with any QueryParser, it makes the most sense to use with the xref:dismax-query-parser.adoc[] or the xref:edismax-query-parser.adoc[].
 
 The Query Elevation Component also supports distributed searching.
 
@@ -98,7 +98,7 @@ s|Required |Default: none
 |===
 +
 Path to the file that defines the elevation rules.
-This file must exist in the <<config-sets.adoc#,configset>>.
+This file must exist in the xref:configuration-guide:config-sets.adoc[configset].
 Unlike most configuration, this component will re-read its configuration if the file changed following a commit.
 However, that doesn't work in SolrCloud, and there has to be an actual index change for a commit to have an effect for it to be used as a way to pick up changes.
 In all cases, you can reload affected cores/collections to use any new configuration in a configset.
@@ -205,7 +205,7 @@ You can force set `useConfiguredElevatedOrder` during runtime by supplying it as
 
 === Document Transformers and the markExcludes Parameter
 
-The `[elevated]` <<document-transformers.adoc#,Document Transformer>> can be used to annotate each document with information about whether or not it was elevated:
+The `[elevated]` xref:document-transformers.adoc[Document Transformer] can be used to annotate each document with information about whether or not it was elevated:
 
 [source,text]
 http://localhost:8983/solr/techproducts/elevate?q=ipod&df=text&fl=id,[elevated]
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/query-re-ranking.adoc b/solr/solr-ref-guide/modules/query-guide/pages/query-re-ranking.adoc
index 7c03c96..82d971f 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/query-re-ranking.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/query-re-ranking.adoc
@@ -48,7 +48,7 @@ The `rerank` parser wraps a query specified by an local parameter, along with ad
 s|Required |Default: none
 |===
 +
-The query string for your complex ranking query - in most cases <<local-params.adoc#,a variable>> will be used to refer to another request parameter.
+The query string for your complex ranking query - in most cases xref:local-params.adoc[a variable] will be used to refer to another request parameter.
 
 `reRankDocs`::
 +
@@ -81,11 +81,11 @@ If a document matches the original query, but does not match the re-ranking quer
 
 === LTR Query Parser
 
-The `ltr` stands for Learning To Rank, please see <<learning-to-rank.adoc#,Learning To Rank>> for more detailed information.
+The `ltr` stands for Learning To Rank, please see xref:learning-to-rank.adoc[] for more detailed information.
 
 == Combining Ranking Queries with Other Solr Features
 
 The `rq` parameter and the re-ranking feature in general works well with other Solr features.
-For example, it can be used in conjunction with the <<collapse-and-expand-results.adoc#,collapse parser>> to re-rank the group heads after they've been collapsed.
-It also preserves the order of documents elevated by the <<query-elevation-component.adoc#,elevation component>>.
-And it even has its own custom explain so you can see how the re-ranking scores were derived when looking at <<common-query-parameters.adoc#debug-parameter,debug information>>.
+For example, it can be used in conjunction with xref:collapse-and-expand-results.adoc[] to re-rank the group heads after they've been collapsed.
+It also preserves the order of documents elevated by the xref:query-elevation-component.adoc[].
+And it even has its own custom explain so you can see how the re-ranking scores were derived when looking at xref:common-query-parameters.adoc#debug-parameter[debug information].
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/query-screen.adoc b/solr/solr-ref-guide/modules/query-guide/pages/query-screen.adoc
index cb60a81..f07b8fb 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/query-screen.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/query-screen.adoc
@@ -48,23 +48,23 @@ If a query handler is not specified, Solr processes the response with the standa
 
 q::
 The query event.
-See <<query-guide.adoc#,Query Guide>> for an explanation of this parameter.
+See xref:standard-query-parser.adoc[] for an explanation of this parameter.
 
 fq::
 The filter queries.
-See <<common-query-parameters.adoc#,Common Query Parameters>> for more information on this parameter.
+See xref:common-query-parameters.adoc[] for more information on this parameter.
 
 sort::
 Sorts the response to a query in either ascending or descending order based on the response's score or another specified characteristic.
 
 start, rows::
 `start` is the offset into the query result starting at which documents should be returned.
-The default value is 0, meaning that the query should return results starting with the first document that matches.
-This field accepts the same syntax as the start query parameter, which is described in <<query-guide.adoc#,Query Guide>>. `rows` is the number of rows to return.
+The default value is `0`, meaning that the query should return results starting with the first document that matches.
+This field accepts the same syntax as the `start` query parameter, which is described in xref:common-query-parameters.adoc[]. `rows` is the number of rows to return.
 
 fl::
 Defines the fields to return for each document.
-You can explicitly list the stored fields, <<function-queries.adoc#,functions>>, and <<document-transformers.adoc#,doc transformers>> you want to have returned by separating them with either a comma or a space.
+You can explicitly list the stored fields, xref:function-queries.adoc[functions], and xref:document-transformers.adoc[doc transformers] you want to have returned by separating them with either a comma or a space.
 
 wt::
 Specifies the Response Writer to be used to format the query response.
@@ -79,23 +79,23 @@ This debugging information is intended to be intelligible to the administrator o
 
 dismax::
 Click this button to enable the DisMax query parser.
-See <<dismax-query-parser.adoc#,The DisMax Query Parser>> for further information.
+See xref:dismax-query-parser.adoc[] for further information.
 
 edismax::
 Click this button to enable the Extended query parser.
-See <<edismax-query-parser.adoc#,The Extended DisMax Query Parser>> for further information.
+See xref:edismax-query-parser.adoc[] for further information.
 
 hl:: Click this button to enable highlighting in the query response.
-See <<highlighting.adoc#,Highlighting>> for more information.
+See xref:highlighting.adoc[]> for more information.
 
 facet::
 Enables faceting, the arrangement of search results into categories based on indexed terms.
-See <<faceting.adoc#,Faceting>> for more information.
+See xref:faceting.adoc[] for more information.
 
 spatial::
 Click to enable using location data for use in spatial or geospatial searches.
-See <<spatial-search.adoc#,Spatial Search>> for more information.
+See xref:spatial-search.adoc[] for more information.
 
 spellcheck::
 Click this button to enable the Spellchecker, which provides inline query suggestions based on other, similar, terms.
-See <<spell-checking.adoc#,Spell Checking>> for more information.
+See xref:spell-checking.adoc[] for more information.
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/query-syntax-and-parsers.adoc b/solr/solr-ref-guide/modules/query-guide/pages/query-syntax-and-parsers.adoc
index 16a7196..6390e94 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/query-syntax-and-parsers.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/query-syntax-and-parsers.adoc
@@ -38,7 +38,7 @@ Solr supports several query parsers, offering search application designers great
 
 This section explains how to specify a query parser and describes the syntax and features supported by several parsers available in Solr.
 
-There are some query parameters common to all Solr parsers; these are discussed in the section <<common-query-parameters.adoc#common-query-parameters,Common Query Parameters>>.
+There are some query parameters common to all Solr parsers; these are discussed in the section xref:common-query-parameters.adoc[].
 
 Query parsers are also called `QParserPlugins`.
 They are all subclasses of {solr-javadocs}/core/org/apache/solr/search/QParserPlugin.html[QParserPlugin].
@@ -49,21 +49,21 @@ If you have custom parsing needs, you may want to extend that class to create yo
 // tag::parser-sections[]
 [cols="1,1",frame=none,grid=none,stripes=none]
 |===
-| <<common-query-parameters.adoc#,Common Query Parameters>>: Query parameters that can be used with all query parsers.
-| <<standard-query-parser.adoc#,Standard Query Parser>>: The standard Lucene query parser.
-| <<dismax-query-parser.adoc#,DisMax Query Parser>>: The DisMax query parser.
-| <<edismax-query-parser.adoc#,Extended DisMax Query Parser>>: The Extended DisMax (eDisMax) Query Parser.
-| <<function-queries.adoc#,Function Queries>>: Parameters for generating relevancy scores using values from one or more numeric fields.
-| <<local-params.adoc#,Local Params>>: How to add local arguments to queries.
-| <<json-request-api.adoc#,JSON Request API>>: Solr's JSON Request API.
-| <<searching-nested-documents.adoc#,Searching Nested Documents>>: Constructing nested and hierarchical queries.
-| <<block-join-query-parser.adoc#,Block Join Query Parser>>: Query parser dedicated to searching nested documents.
-| <<join-query-parser.adoc#,Join Query Parser>>: Query parser to facilitate joins.
-| <<spatial-search.adoc#,Spatial Search>>: Solr's spatial search capabilities.
-| <<other-parsers.adoc#,Other Parsers>>: More parsers designed for use in specific situations.
-| <<sql-query.adoc#,SQL Query>>: SQL language support for Solr.
-| <<query-screen.adoc#,Query Screen>>: Form-based query builder.
-| <<sql-screen.adoc#,SQL Screen>>: SQL query runner with tabular results.
+| xref:common-query-parameters.adoc[]: Query parameters that can be used with all query parsers.
+| xref:standard-query-parser.adoc[]: The standard Lucene query parser.
+| xref:dismax-query-parser.adoc[]: The DisMax query parser.
+| xref:edismax-query-parser.adoc[]: The Extended DisMax (eDisMax) Query Parser.
+| xref:function-queries.adoc[]>: Parameters for generating relevancy scores using values from one or more numeric fields.
+| xref:local-params.adoc[]: How to add local arguments to queries.
+| xref:json-request-api.adoc[]: Solr's JSON Request API.
+| xref:searching-nested-documents.adoc[]: Constructing nested and hierarchical queries.
+| xref:block-join-query-parser.adoc[]: Query parser dedicated to searching nested documents.
+| xref:join-query-parser.adoc[]: Query parser to facilitate joins.
+| xref:spatial-search.adoc[]: Solr's spatial search capabilities.
+| xref:other-parsers.adoc[]: More parsers designed for use in specific situations.
+| xref:sql-query.adoc[]: SQL language support for Solr.
+| xref:query-screen.adoc[]: Form-based query builder.
+| xref:sql-screen.adoc[]: SQL query runner with tabular results.
 |
 |===
 // end::parser-sections[]
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/response-writers.adoc b/solr/solr-ref-guide/modules/query-guide/pages/response-writers.adoc
index de20edd..db57238 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/response-writers.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/response-writers.adoc
@@ -193,7 +193,7 @@ The default behavior is not to indent.
 The XSLT Response Writer applies an XML stylesheet to output.
 It can be used for tasks such as formatting results for an RSS feed.
 
-This response writer is part of the <<script-update-processor.adoc#,scripting contrib>>.
+This response writer is part of the xref:configuration-guide:script-update-processor.adoc[scripting contrib].
 Since it is a contrib, it requires configuration before it can be used.
 
 The XSLT Response Writer accepts one parameter:
@@ -212,7 +212,7 @@ The Content-Type of the response is set according to the `<xsl:output>` statemen
 
 === XSLT Configuration
 
-The example below, from the `sample_techproducts_configs` <<config-sets.adoc#,configset>> in the Solr distribution, shows how the XSLT Response Writer is configured.
+The example below, from the `sample_techproducts_configs` xref:configuration-guide:config-sets.adoc[configset] in the Solr distribution, shows how the XSLT Response Writer is configured.
 
 [source,xml]
 ----
@@ -277,7 +277,7 @@ Lastly, the `luke.xsl` transformation demonstrates that you can apply very sophi
 
 This is a custom binary format used by Solr for inter-node communication as well as client-server communication.
 SolrJ uses this as the default for indexing as well as querying.
-See <<client-apis.adoc#,Client APIs>> for more details.
+See xref:deployment-guide:client-apis.adoc[] for more details.
 
 == GeoJSON Response Writer
 
@@ -351,7 +351,7 @@ rsp['response']['docs'].each { |doc| puts 'name field = ' + doc['name'\] }
 The CSV response writer returns a list of documents in comma-separated values (CSV) format.
 Other information that would normally be included in a response, such as facet information, is excluded.
 
-The CSV response writer supports multi-valued fields, as well as <<document-transformers.adoc#,pseudo-fields>>, and the output of this CSV format is compatible with Solr's <<indexing-with-update-handlers.adoc#csv-formatted-index-updates,CSV update format>>.
+The CSV response writer supports multi-valued fields, as well as xref:document-transformers.adoc[pseudo-fields], and the output of this CSV format is compatible with Solr's xref:indexing-guide:indexing-with-update-handlers.adoc#csv-formatted-index-updates[CSV update format].
 
 === CSV Parameters
 
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/result-clustering.adoc b/solr/solr-ref-guide/modules/query-guide/pages/result-clustering.adoc
index a50c09f..367628a 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/result-clustering.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/result-clustering.adoc
@@ -34,7 +34,7 @@ image::result-clustering/carrot2.png[image,width=900]
 
 The query issued to the system was _Apache Solr_.
 It seems clear that faceting could not yield a similar set of groups, although the goals of both techniques are similar -- to let the user explore the set of search results and either rephrase the query or narrow the focus to a subset of current documents.
-Clustering is also similar to <<result-grouping.adoc#,Result Grouping>> in that it can help to look deeper into search results, beyond the top few hits.
+Clustering is also similar to xref:result-grouping.adoc[] in that it can help to look deeper into search results, beyond the top few hits.
 
 == Configuration Quick Starter
 
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/result-grouping.adoc b/solr/solr-ref-guide/modules/query-guide/pages/result-grouping.adoc
index f62a110..17069ac 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/result-grouping.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/result-grouping.adoc
@@ -24,12 +24,12 @@ In this case, the query term "DVD" appeared in all three categories, so Solr gro
 .Prefer Collapse & Expand instead
 [NOTE]
 ====
-Solr's <<collapse-and-expand-results.adoc#,Collapse and Expand>> feature is newer and mostly overlaps with Result Grouping.
+Solr's xref:collapse-and-expand-results.adoc[] feature is newer and mostly overlaps with Result Grouping.
 There are features unique to both, and they have different performance characteristics.
 That said, in most cases Collapse and Expand is preferable to Result Grouping.
 ====
 
-Result Grouping is separate from <<faceting.adoc#,Faceting>>.
+Result Grouping is separate from xref:faceting.adoc[].
 Though it is conceptually similar, faceting returns all relevant results and allows the user to refine the results based on the facet category.
 For example, if you search for "shoes" on a footwear retailer's e-commerce site, Solr would return all results for that query term, along with selectable facets such as "size," "color," "brand," and so on.
 
@@ -383,8 +383,8 @@ This is because one result for "memory" did not have a price assigned to it.
 
 == Distributed Result Grouping Caveats
 
-Grouping is supported for <<cluster-types.adoc#solrcloud-mode,distributed searches>>, with some caveats:
+Grouping is supported for xref:deployment-guide:cluster-types.adoc#solrcloud-mode[distributed searches], with some caveats:
 
 * Currently `group.func` is not supported in any distributed searches
 * `group.ngroups` and `group.facet` require that all documents in each group must be co-located on the same shard in order for accurate counts to be returned.
-<<solrcloud-shards-indexing.adoc#,Document routing via composite keys>> can be a useful solution in many situations.
+xref:deployment-guide:solrcloud-shards-indexing.adoc[Document routing via composite keys] can be a useful solution in many situations.
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/search-sample.adoc b/solr/solr-ref-guide/modules/query-guide/pages/search-sample.adoc
index b636633..9dbdade 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/search-sample.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/search-sample.adoc
@@ -66,7 +66,7 @@ The visualization examples below use small random samples, but Solr's random sam
 These larger samples can be used to build reliable statistical models that describe large data sets (billions of documents) with sub-second performance.
 
 The examples below demonstrate univariate and bivariate scatter plots of random samples.
-Statistical modeling with random samples is covered in the <<statistics.adoc#,Statistics>>, <<probability-distributions.adoc#,Probability>>, <<regression.adoc#,Linear Regression>>, <<curve-fitting.adoc#,Curve Fitting>>, and <<machine-learning.adoc#,Machine Learning>> sections.
+Statistical modeling with random samples is covered in the xref:statistics.adoc[], xref:probability-distributions.adoc[], xref:regression.adoc[], xref:curve-fitting.adoc[], and xref:machine-learning.adoc[] sections.
 
 === Univariate Scatter Plots
 
@@ -134,7 +134,7 @@ image::math-expressions/stats.png[]
 === facet
 
 The `facet` function performs single and multi-dimension aggregations that behave in a similar manner to SQL group by aggregations.
-Under the covers the `facet` function pushes down the aggregations to Solr's <<json-facet-api.adoc#,JSON Facet API>> for fast distributed execution.
+Under the covers the `facet` function pushes down the aggregations to Solr's xref:json-facet-api.adoc[] for fast distributed execution.
 
 The example below performs a single dimension aggregation from the nyc311 (NYC complaints) dataset.
 The aggregation returns the top five *complaint types* by *count* for records with a status of *Pending*.
@@ -223,7 +223,7 @@ image::math-expressions/sterms.png[]
 === nodes
 
 The `nodes` function performs aggregations of nodes during a breadth first search of a graph.
-This function is covered in detail in the section <<graph-traversal.adoc#,Graph Traversal>>.
+This function is covered in detail in the section xref:graph-traversal.adoc[].
 In this example the focus will be on finding correlated nodes in a time series graph using the `nodes` expressions.
 
 The example below finds stock tickers whose daily movements tend to be correlated with the ticker *jpm* (JP Morgan).
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/searching-nested-documents.adoc b/solr/solr-ref-guide/modules/query-guide/pages/searching-nested-documents.adoc
index 983cc7d..6650402 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/searching-nested-documents.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/searching-nested-documents.adoc
@@ -19,7 +19,7 @@
 This section exposes potential techniques which can be used for searching deeply nested documents, showcasing how more complex queries can be constructed using some of Solr's query parsers and document transformers.
 
 These features require `\_root_` and `\_nest_path_` to be declared in the schema.
-Please refer to <<indexing-nested-documents.adoc#, Indexing Nested Documents>> for details about schema and index configuration.
+Please refer to xref:indexing-guide:indexing-nested-documents.adoc[] for details about schema and index configuration.
 
 [NOTE]
 This section does not demonstrate faceting on nested documents.
@@ -36,7 +36,7 @@ include::indexing-nested-documents.adoc[tag=sample-indexing-deeply-nested-docume
 By default, documents that match a query do not include any of their nested children in the response.
 The `[child]` Doc Transformer Can be used enrich query results with the documents' descendants.
 
-For a detailed explanation of this transformer, and specifics on it's syntax & limitations, please refer to the section <<document-transformers.adoc#child-childdoctransformerfactory, [child] - ChildDocTransformerFactory>>.
+For a detailed explanation of this transformer, and specifics on it's syntax & limitations, please refer to the section xref:document-transformers.adoc#child-childdoctransformerfactory[[child] - ChildDocTransformerFactory].
 
 A simple query matching all documents with a description that includes "staplers":
 
@@ -105,7 +105,7 @@ $ curl 'http://localhost:8983/solr/gettingstarted/select?omitHeader=true&q=descr
 === Child Query Parser
 
 The `{!child}` query parser can be used to search for the _descendent_ documents of parent documents matching a wrapped query.
-For a detailed explanation of this parser, see the section <<block-join-query-parser.adoc#block-join-children-query-parser, Block Join Children Query Parser>>.
+For a detailed explanation of this parser, see the section xref:block-join-query-parser.adoc#block-join-children-query-parser[Block Join Children Query Parser].
 
 Let's consider again the `description_t:staplers` query used above -- if we wrap that query in a `{!child}` query parser then instead of "matching" & returning the product level documents, we instead match all of the _descendent_ child documents of the original query:
 
@@ -145,7 +145,7 @@ $ curl 'http://localhost:8983/solr/gettingstarted/select' -d 'omitHeader=true' -
   }}
 ----
 
-In this example we've used `\*:* -\_nest_path_:*` as our <<block-join-query-parser.adoc#block-mask,`of` parameter>> to indicate we want to consider all documents which don't have a nest path -- i.e., all "root" level document -- as the set of possible parents.
+In this example we've used `\*:* -\_nest_path_:*` as our xref:block-join-query-parser.adoc#block-mask[`of` parameter] to indicate we want to consider all documents which don't have a nest path -- i.e., all "root" level document -- as the set of possible parents.
 
 By changing the `of` parameter to match ancestors at specific `\_nest_path_` levels, we can narrow down the list of children we return.
 In the query below, we search for all descendants of `skus` (using an `of` parameter that identifies all documents that do _not_ have a `\_nest_path_` with the prefix `/skus/*`) with a `price_i` less then `50`:
@@ -186,7 +186,7 @@ $ curl 'http://localhost:8983/solr/gettingstarted/select' -d 'omitHeader=true' -
 === Parent Query Parser
 
 The inverse of the `{!child}` query parser is the `{!parent}` query parser, which lets you search for the _ancestor_ documents of some child documents matching a wrapped query.
-For a detailed explanation of this parser, see the section <<block-join-query-parser.adoc#block-join-parent-query-parser,Block Join Parent Query Parser>>.
+For a detailed explanation of this parser, see the section xref:block-join-query-parser.adoc#block-join-parent-query-parser[Block Join Parent Query Parser].
 
 Let's first consider this example of searching for all "manual" type documents that have exactly `1` page:
 
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/simulations.adoc b/solr/solr-ref-guide/modules/query-guide/pages/simulations.adoc
index 711dd40..ca5915f 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/simulations.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/simulations.adoc
@@ -51,7 +51,7 @@ Then the `conv` (convolution) function is used to autocorrelate the `change_d` v
 Notice that the `conv` function is simply "convolving" the `change_d` vector
 with a reversed copy of itself.
 This is the technique for performing autocorrelation using convolution.
-The <<dsp.adoc#,Signal Processing>> section of the user guide covers both convolution and autocorrelation in detail.
+The xref:dsp.adoc[] section of the user guide covers both convolution and autocorrelation in detail.
 In this section we'll just discuss the plot.
 
 The plot shows the intensity of correlation that is calculated as the `change_d` vector is slid across itself by the `conv` function.
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/spatial-search.adoc b/solr/solr-ref-guide/modules/query-guide/pages/spatial-search.adoc
index 243354f..78e79ef 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/spatial-search.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/spatial-search.adoc
@@ -126,7 +126,7 @@ A spatial indexed field.
 |Optional |Default: `none`
 |===
 +
-If the query is used in a scoring context (e.g., as the main query in `q`), this _<<local-params.adoc#,local param>>_ determines what scores will be produced.
+If the query is used in a scoring context (e.g., as the main query in `q`), this xref:local-params.adoc[_local param_] determines what scores will be produced.
 Advanced option; not supported by PointType.
 +
 Valid values are:
@@ -227,11 +227,11 @@ The only spatial types which stand to benefit from this technique are those with
 There are four distance function queries:
 
 * `geodist`, see below, usually the most appropriate;
-*  <<function-queries.adoc#dist-function,`dist`>>, to calculate the p-norm distance between multi-dimensional vectors;
-* <<function-queries.adoc#hsin-function,`hsin`>>, to calculate the distance between two points on a sphere;
-* <<function-queries.adoc#sqedist-function,`sqedist`>>, to calculate the squared Euclidean distance between two points.
+*  xref:function-queries.adoc#dist-function[`dist`], to calculate the p-norm distance between multi-dimensional vectors;
+* xref:function-queries.adoc#hsin-function[`hsin`], to calculate the distance between two points on a sphere;
+* xref:function-queries.adoc#sqedist-function[`sqedist`], to calculate the squared Euclidean distance between two points.
 
-For more information about these function queries, see the section on <<function-queries.adoc#,Function Queries>>.
+For more information about these function queries, see the section on xref:function-queries.adoc[].
 
 === geodist
 
@@ -270,7 +270,7 @@ There are other ways to do it too, like using a `{!geofilt}` in each facet.query
 
 === Boost Nearest Results
 
-Using the <<dismax-query-parser.adoc#,DisMax>> or <<edismax-query-parser.adoc#,Extended DisMax>>, you can combine spatial search with the boost function to boost the nearest results:
+Using the xref:dismax-query-parser.adoc[] or xref:edismax-query-parser.adoc[], you can combine spatial search with the boost function to boost the nearest results:
 
 [source,text]
 &q.alt=*:*&fq={!geofilt}&sfield=store&pt=45.15,-93.85&d=50&bf=recip(geodist(),2,200,20)&sort=score desc
@@ -564,7 +564,7 @@ Assuming you name your field "geom", you can configure an optional cache in `sol
 ----
 
 When using this field type, you will likely _not_ want to mark the field as stored because it's redundant with the DocValues data and surely larger because of the formatting (be it WKT or GeoJSON).
-To retrieve the spatial data in search results from DocValues, use the <<document-transformers.adoc#geo-geospatial-formatter,`[geo]` transformer>>.
+To retrieve the spatial data in search results from DocValues, use the xref:document-transformers.adoc#geo-geospatial-formatter[`[geo]` transformer].
 
 === Heatmap Faceting
 
@@ -574,7 +574,7 @@ The grid cells are determined at index-time based on RPT's configuration.
 At facet counting time, the indexed cells in the region of interest are traversed and a grid of counters corresponding to each cell are incremented.
 Solr can return the data in a straight-forward 2D array of integers or in a PNG which compresses better for larger data sets but must be decoded.
 
-The heatmap feature is accessible both from Solr's standard faceting feature, plus the newer more flexible <<json-facet-api.adoc#heatmap-facet,JSON Facet API>>.
+The heatmap feature is accessible both from Solr's standard faceting feature and the xref:json-facet-api.adoc#heatmap-facet[JSON Facet API].
 We'll proceed now with standard faceting.
 As a part of faceting, it supports the `key` local parameter as well as excluding tagged filter queries, just like other types of faceting do.
 This allows multiple heatmaps to be returned on the same field with different filters.
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/spell-checking.adoc b/solr/solr-ref-guide/modules/query-guide/pages/spell-checking.adoc
index 37789ea..40b4c63 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/spell-checking.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/spell-checking.adoc
@@ -173,7 +173,7 @@ The results are combined and collations can contain a mix of corrections from bo
 
 === Add It to a Request Handler
 
-Queries will be sent to a <<query-syntax-and-parsers.adoc#,RequestHandler>>.
+Queries will be sent to a xref:configuration-guide:request-handlers-and-search-components.adoc[request handler].
 If every request should generate a suggestion, then you would add the following to the `requestHandler` that you are using:
 
 [source,xml]
@@ -227,7 +227,7 @@ This parameter specifies the query to spellcheck.
 +
 If `spellcheck.q` is defined, then it is used; otherwise the original input query is used.
 The `spellcheck.q` parameter is intended to be the original query, minus any extra markup like field names, boosts, and so on.
-If the `q` parameter is specified, then the `SpellingQueryConverter` class is used to parse it into tokens; otherwise the <<tokenizers.adoc#white-space-tokenizer,`WhitespaceTokenizer`>> is used.
+If the `q` parameter is specified, then the `SpellingQueryConverter` class is used to parse it into tokens; otherwise the xref:indexing-guide:tokenizers.adoc#white-space-tokenizer[WhitespaceTokenizer] is used.
 +
 The choice of which one to use is up to the application.
 Essentially, if you have a spelling "ready" version in your application, then it is probably better to use `spellcheck.q`.
@@ -507,7 +507,7 @@ s|Required |Default: none
 |===
 +
 Specifies the shards in your distributed indexing configuration.
-For more information about distributed indexing, see <<user-managed-distributed-search.adoc#,User-Managed Distributed Search>>.
+For more information about distributed indexing, see xref:deployment-guide:cluster-types.adoc[].
 
 `shards.qt`::
 +
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/sql-query.adoc b/solr/solr-ref-guide/modules/query-guide/pages/sql-query.adoc
index 7b9cfc1..7a6ef1c 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/sql-query.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/sql-query.adoc
@@ -29,7 +29,7 @@ Both MapReduce style and JSON Facet API aggregations are supported, which means
 == SQL Architecture
 
 The SQL interface allows sending a SQL query to Solr and getting documents streamed back in response.
-Under the covers, Solr's SQL interface uses the Apache Calcite SQL engine to translate SQL queries to physical query plans implemented as <<streaming-expressions.adoc#,Streaming Expressions>>.
+Under the covers, Solr's SQL interface uses the Apache Calcite SQL engine to translate SQL queries to physical query plans implemented as xref:streaming-expressions.adoc[].
 
 More information about how to Solr supports SQL queries for Solr is described in the <<Configuration>> section below.
 
@@ -252,7 +252,7 @@ SELECT distinct fieldA as fa, fieldB as fb FROM tableA ORDER BY fa desc, fb desc
 The SQL interface supports simple statistics calculated on numeric fields.
 The supported functions are `COUNT(*)`, `COUNT(DISTINCT field)`, `APPROX_COUNT_DISTINCT(field)`, `MIN`, `MAX`, `SUM`, and `AVG`.
 
-Because these functions never require data to be shuffled, the aggregations are pushed down into the search engine and are generated by the <<stats-component.adoc#,StatsComponent>>.
+Because these functions never require data to be shuffled, the aggregations are pushed down into the search engine and are generated by the xref:stats-component.adoc[].
 
 [source,sql]
 ----
@@ -356,7 +356,7 @@ By default, the `/sql` request handler is configured as an implicit handler, mea
 
 ==== Authorization for SQL Requests
 
-If your Solr cluster is configured to use the <<rule-based-authorization-plugin.adoc#,Rule-based Authorization Plugin>>,
+If your Solr cluster is configured to use the xref:rule-based-authorization-plugin.adoc[],
 then you need to grant `GET` and `POST` permissions on the `/sql`, `/select`, and `/export` endpoints for all collections you intend to execute SQL queries against.
 The `/select` endpoint is used for `LIMIT` queries, whereas the `/export` handler is used for queries without a `LIMIT`, so in most cases, you'll want to grant access to both.
 If you're using a worker collection for the `/sql` handler, then you only need to grant access to the `/sql` endpoint for the worker collection and not the collections in the data tier.
@@ -372,7 +372,7 @@ If you have high cardinality fields and a large amount of data, please be sure t
 === /stream and /export Request Handlers
 
 The Streaming API is an extensible parallel computing framework for SolrCloud.
-<<streaming-expressions.adoc#,Streaming Expressions>> provide a query language and a serialization format for the Streaming API.
+xref:streaming-expressions.adoc[] provide a query language and a serialization format for the Streaming API.
 
 The Streaming API provides support for fast MapReduce allowing it to perform parallel relational algebra on extremely large data sets.
 Under the covers the SQL interface parses SQL queries using the Apache Calcite SQL Parser.
@@ -542,11 +542,11 @@ The SQL interface supports queries sent from SQL clients and database visualizat
 
 This Guide contains documentation to configure the following tools and clients:
 
-* <<jdbc-zeppelin.adoc#,Apache Zeppelin>>
-* <<jdbc-dbvisualizer.adoc#,DbVisualizer>>
-* <<jdbc-squirrel.adoc#,SQuirreL SQL>>
-* <<jdbc-r.adoc#,R>>
-* <<jdbc-python-jython.adoc#,Python/Jython>>
+* xref:jdbc-zeppelin.adoc[]
+* xref:jdbc-dbvisualizer.adoc[]
+* xref:jdbc-squirrel.adoc[]
+* xref:jdbc-r.adoc[]
+* xref:jdbc-python-jython.adoc[]
 
 === Generic Clients
 
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/sql-screen.adoc b/solr/solr-ref-guide/modules/query-guide/pages/sql-screen.adoc
index d46c333..a088e37 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/sql-screen.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/sql-screen.adoc
@@ -23,4 +23,4 @@ In the example in the screenshot, a SQL query has been submitted, and the screen
 .Results of a SQL Query
 image::sql-screen/sql-query-ui.png[image,height=400]
 
-Learn more about by reading the <<sql-query.adoc#,SQL details>>, including the specific <<sql-query.adoc#solr-sql-syntax,SQL syntax>> supported by Solr.
+Learn more by reading the xref:sql-query.adoc[], including the specific xref:sql-query.adoc#solr-sql-syntax[SQL syntax] supported by Solr.
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/standard-query-parser.adoc b/solr/solr-ref-guide/modules/query-guide/pages/standard-query-parser.adoc
index e7effed..8aac0d2 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/standard-query-parser.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/standard-query-parser.adoc
@@ -19,11 +19,11 @@
 Solr's default Query Parser is also known as the "```lucene```" parser.
 
 The key advantage of the standard query parser is that it supports a robust and fairly intuitive syntax allowing you to create a variety of structured queries.
-The largest disadvantage is that it's very intolerant of syntax errors, as compared with something like the <<dismax-query-parser.adoc#,DisMax>> query parser which is designed to throw as few errors as possible.
+The largest disadvantage is that it's very intolerant of syntax errors, as compared with something like the xref:dismax-query-parser.adoc[] which is designed to throw as few errors as possible.
 
 == Standard Query Parser Parameters
 
-In addition to the <<common-query-parameters.adoc#,Common Query Parameters>>, <<faceting.adoc#,Faceting Parameters>>, <<highlighting.adoc#,Highlighting Parameters>>, and <<morelikethis.adoc#,MoreLikeThis Parameters>>, the standard query parser supports the parameters described in the table below.
+In addition to the xref:common-query-parameters.adoc[], xref:faceting.adoc[] Parameters, xref:highlighting.adoc[] Parameters, and xref:morelikethis.adoc[] Parameters, the standard query parser supports the parameters described in the table below.
 
 `q`::
 Defines a query using standard query syntax.
@@ -46,9 +46,9 @@ Default parameter values are specified in `solrconfig.xml`, or overridden by que
 == Standard Query Parser Response
 
 By default, the response from the standard query parser contains one `<result>` block, which is unnamed.
-If the <<common-query-parameters.adoc#debug-parameter,`debug` parameter>> is used, then an additional `<lst>` block will be returned, using the name "debug".
+If the xref:common-query-parameters.adoc#debug-parameter[`debug` parameter] is used, then an additional `<lst>` block will be returned, using the name "debug".
 This will contain useful debugging info, including the original query string, the parsed query string, and explain info for each document in the <result> block.
-If the <<common-query-parameters.adoc#explainother-parameter,`explainOther` parameter>> is also used, then additional explain info will be provided for all the documents matching that query.
+If the xref:common-query-parameters.adoc#explainother-parameter[`explainOther` parameter] is also used, then additional explain info will be provided for all the documents matching that query.
 
 === Sample Responses
 
@@ -253,7 +253,7 @@ Example:
 
 == Querying Specific Fields
 
-Data indexed in Solr is organized in fields, which are <<fields.adoc#,defined in the Solr Schema>>.
+Data indexed in Solr is organized in xref:indexing-guide:fields.adoc[fields], which are defined in xref:indexing-guide:schema-element.adoc[a schema].
 Searches can take advantage of fields to add precision to queries.
 For example, you can search for a term only in a specific field, such as a title field.
 
@@ -438,7 +438,8 @@ There is no limitation on the number of terms that match (as there was in past v
 
 === Specifying Dates and Times
 
-Queries against date based fields must use the <<date-formatting-math.adoc#,appropriate date formating>>.  Queries for exact date values will require quoting or escaping since `:` is the parser syntax used to denote a field query:
+Queries against date based fields must use the xref:indexing-guide:date-formatting-math.adoc[appropriate date formating].
+Queries for exact date values will require quoting or escaping since `:` is the parser syntax used to denote a field query:
 
 * `createdate:1976-03-06T23\:59\:59.999Z`
 * `createdate:"1976-03-06T23:59:59.999Z"`
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/stats-component.adoc b/solr/solr-ref-guide/modules/query-guide/pages/stats-component.adoc
index 4a36eff..3261475 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/stats-component.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/stats-component.adoc
@@ -48,7 +48,7 @@ s|Required |Default: none
 Specifies a field for which statistics should be generated.
 This parameter may be invoked multiple times in a query in order to request statistics on multiple fields.
 +
-<<local-params.adoc#,Local Params>> may be used to indicate a subset of the supported statistics should be computed, and/or that statistics should be computed over the results of an arbitrary numeric function (or query) instead of a simple field name.
+xref:local-params.adoc[] may be used to indicate a subset of the supported statistics should be computed, and/or that statistics should be computed over the results of an arbitrary numeric function (or query) instead of a simple field name.
 See the examples below.
 
 
@@ -165,7 +165,7 @@ This statistic is computed for all field types but is not computed by default.
 
 == Local Params with the Stats Component
 
-Similar to the <<faceting.adoc#,Facet Component>>, the `stats.field` parameter supports local params for:
+Similar to the xref:faceting.adoc[Facet Component], the `stats.field` parameter supports local params for:
 
 * Tagging & Excluding Filters: `stats.field={!ex=filterA}price`
 * Changing the Output Key: `stats.field={!key=my_price_stats}price`
@@ -227,4 +227,4 @@ http://localhost:8983/solr/techproducts/select?q=*:*&fq={!tag=stock_check}inStoc
 
 Sets of `stats.field` parameters can be referenced by `tag` when using Pivot Faceting to compute multiple statistics at every level (i.e., field) in the tree of pivot constraints.
 
-For more information and a detailed example, please see <<faceting.adoc#combining-stats-component-with-pivots,Combining Stats Component With Pivots>>.
+For more information and a detailed example, please see xref:faceting.adoc#combining-stats-component-with-pivots[Combining Stats Component With Pivots].
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/stream-api.adoc b/solr/solr-ref-guide/modules/query-guide/pages/stream-api.adoc
index 55cada0..a8e4b8f 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/stream-api.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/stream-api.adoc
@@ -63,7 +63,7 @@ http://localhost:8983/solr/gettingstarted/stream?action=PLUGINS
 [[list]]
 == LIST: List Daemon processes
 
-The <<stream-decorator-reference.adoc#daemon,daemon>> function allows you to wrap a streaming expression and run it at intervals to provide both continuous push and pull streaming.
+The xref:stream-decorator-reference.adoc#daemon[`daemon`] function allows you to wrap a streaming expression and run it at intervals to provide both continuous push and pull streaming.
 This command lists out all the currently running daemon processes.
 
 `/stream?action=LIST`
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/stream-decorator-reference.adoc b/solr/solr-ref-guide/modules/query-guide/pages/stream-decorator-reference.adoc
index 0ec4f82..322c754 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/stream-decorator-reference.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/stream-decorator-reference.adoc
@@ -388,8 +388,8 @@ As you can see in the examples above, the `cartesianProduct` function does suppo
 == classify
 
 The `classify` function classifies tuples using a logistic regression text classification model.
-It was designed specifically to work with models trained using the <<stream-source-reference.adoc#train,train function>>.
-The `classify` function uses the <<stream-source-reference.adoc#model,model function>> to retrieve a stored model and then scores a stream of tuples using the model.
+It was designed specifically to work with models trained using the xref:stream-source-reference.adoc#train[`train` function].
+The `classify` function uses the xref:stream-source-reference.adoc#model[`model` function] to retrieve a stored model and then scores a stream of tuples using the model.
 The tuples read by the classifier must contain a text field that can be used for classification.
 The classify function uses a Lucene analyzer to extract the features from the text so the model can be applied.
 By default the `classify` function looks for the analyzer using the name of text field in the tuple.
@@ -528,7 +528,7 @@ daemon(id="uniqueId",
 The sample code above shows a `daemon` function wrapping an `update` function, which is wrapping a `topic` function.
 When this expression is sent to the `/stream` handler, the `/stream` hander sees the `daemon` function and keeps it in memory where it will run at intervals.
 In this particular example, the `daemon` function will run the `update` function every second.
-The `update` function is wrapping a <<stream-source-reference.adoc#topic,`topic` function>>, which will stream tuples that match the `topic` function query in batches.
+The `update` function is wrapping a xref:stream-source-reference.adoc#topic[`topic` function], which will stream tuples that match the `topic` function query in batches.
 Each subsequent call to the topic will return the next batch of tuples for the topic.
 The `update` function will send all the tuples matching the topic to another collection to be indexed.
 The `terminate` parameter tells the daemon to terminate when the `topic` function stops sending tuples.
@@ -539,7 +539,7 @@ Custom push functions can be plugged in that push documents out of Solr and into
 Push streaming can also be used for continuous background aggregation scenarios where aggregates are rolled up in the background at intervals and pushed to other Solr collections.
 Another use case is continuous background machine learning model optimization, where the optimized model is pushed to another Solr collection where it can be integrated into queries.
 
-The `/stream` handler supports a small <<stream-api.adoc#plugins,set of commands>> for listing and controlling daemon functions:
+The `/stream` handler supports a small xref:stream-api.adoc#plugins[set of commands] for listing and controlling daemon functions:
 
 [source,text]
 ----
@@ -630,7 +630,7 @@ daemonStream.close();
 
 == delete
 
-The `delete` function wraps other functions and uses the `id` and `\_version_` values found to send the tuples to a SolrCloud collection as <<indexing-with-update-handlers.adoc#delete-operations,Delete By Id>> commands.
+The `delete` function wraps other functions and uses the `id` and `\_version_` values found to send the tuples to a SolrCloud collection as xref:indexing-guide:indexing-with-update-handlers.adoc#delete-operations[Delete By Id] commands.
 
 This is similar to the `<<#update,update()>>` function described below.
 
@@ -660,7 +660,7 @@ The example above consumes the tuples returned by the `search` function against
 [NOTE]
 ====
 Unlike the `update()` function, `delete()` defaults to `pruneVersionField=false` -- preserving any `\_version_` values found in the inner stream when converting the tuples to "Delete By ID" requests.
-This ensures that using this stream will not (by default) result in deleting any documents that were updated _after_ the `search(...)` was executed, but _before_ the `delete(...)` processed that tuple (leveraging <<partial-document-updates.adoc#optimistic-concurrency,Optimistic concurrency>> constraints).
+This ensures that using this stream will not (by default) result in deleting any documents that were updated _after_ the `search(...)` was executed, but _before_ the `delete(...)` processed that tuple (leveraging xref:indexing-guide:partial-document-updates.adoc#optimistic-concurrency[optimistic concurrency] constraints).
 
 Users who wish to ignore concurrent updates and delete all matched documents should set `pruneVersionField=true` (or ensure that the inner stream tuples do not include any `\_version_` values).
 
@@ -723,7 +723,7 @@ daemon(id="myDaemon",
                       id="myTopic")))
 ----
 
-In the example above a <<daemon,daemon>> wraps an executor, which wraps a <<stream-source-reference.adoc#topic,topic>> that is returning tuples with expressions to execute.
+In the example above a <<daemon,`daemon`>> wraps an executor, which wraps a xref:stream-source-reference.adoc#topic[`topic`] that is returning tuples with expressions to execute.
 When sent to the stream handler, the daemon will call the executor at intervals which will cause the executor to read from the topic and execute the expressions found in the `expr_s` field.
 The daemon will repeatedly call the executor until all the tuples that match the topic have been iterated, then it will terminate.
 This is the approach for executing batches of streaming expressions from a `topic` queue.
@@ -1229,7 +1229,7 @@ The `priority` function is a simple priority scheduler for the <<executor>> func
 The `executor` function doesn't directly have a concept of task prioritization; instead it simply executes tasks in the order that they are read from its underlying stream.
 The `priority` function provides the ability to schedule a higher priority task ahead of lower priority tasks that were submitted earlier.
 
-The `priority` function wraps two <<stream-source-reference.adoc#topic,topics>> that are both emitting tuples that contain streaming expressions to execute.
+The `priority` function wraps two xref:stream-source-reference.adoc#topic[`topic` functions] that are both emitting tuples that contain streaming expressions to execute.
 The first topic is considered the higher priority task queue.
 
 Each time the `priority` function is called, it checks the higher priority task queue to see if there are any tasks to execute.
@@ -1334,7 +1334,7 @@ This allows the rollup function to rollup the over the `a_s` field, one group at
 
 == scoreNodes
 
-See section in <<graph-traversal.adoc#using-the-scorenodes-function-to-make-a-recommendation,graph traversal>>.
+See section in xref:graph-traversal.adoc#using-the-scorenodes-function-to-make-a-recommendation[graph traversal].
 
 == select
 
@@ -1483,4 +1483,4 @@ The `update` function wraps another functions and sends the tuples to a SolrClou
 The example above sends the tuples returned by the `search` function to the `destinationCollection` to be indexed.
 
 Wrapping `search(...)` as showing in this example is the common case usage of this decorator: to read documents from a collection as tuples, process or modify them in some way, and then add them back to a new collection.
-For this reason, `pruneVersionField=true` is the default behavior -- stripping any `\_version_` values found in the inner stream when converting the tuples to Solr documents to prevent any unexpected errors from <<partial-document-updates.adoc#optimistic-concurrency,Optimistic concurrency>> constraints.
+For this reason, `pruneVersionField=true` is the default behavior -- stripping any `\_version_` values found in the inner stream when converting the tuples to Solr documents to prevent any unexpected errors from xref:indexing-guide:partial-document-updates.adoc#optimistic-concurrency[optimistic concurrency] constraints.
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/stream-evaluator-reference.adoc b/solr/solr-ref-guide/modules/query-guide/pages/stream-evaluator-reference.adoc
index 04877ed..5e3accf 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/stream-evaluator-reference.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/stream-evaluator-reference.adoc
@@ -101,7 +101,7 @@ add(fieldA,if(gt(fieldA,fieldB),fieldA,fieldB)) // if fieldA > fieldB then field
 == analyze
 
 The `analyze` function analyzes text using a Lucene/Solr analyzer and returns a list of tokens emitted by the analyzer.
-The `analyze` function can be called on its own or within the `<<stream-decorator-reference.adoc#select,select>>` and `<<stream-decorator-reference.adoc#cartesianproduct,cartesianProduct>>` streaming expressions.
+The `analyze` function can be called on its own or within the xref:stream-decorator-reference.adoc#select[`select`] and xref:stream-decorator-reference.adoc#cartesianproduct[`cartesianProduct`] streaming expressions.
 
 === analyze Parameters
 
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/stream-screen.adoc b/solr/solr-ref-guide/modules/query-guide/pages/stream-screen.adoc
index 5267c31..7fc2e48 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/stream-screen.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/stream-screen.adoc
@@ -16,8 +16,8 @@
 // specific language governing permissions and limitations
 // under the License.
 
-The Stream screen allows you to enter a <<streaming-expressions.adoc#,streaming expression>> and see the results.
-It is very similar to the <<query-screen.adoc#,Query Screen>>, except the input box is at the top and all options must be declared in the expression.
+The Stream screen allows you to enter a xref:streaming-expressions.adoc[] and see the results.
+It is very similar to the xref:query-screen.adoc[], except the input box is at the top and all options must be declared in the expression.
 
 The screen will insert everything up to the streaming expression itself, so you do not need to enter the full URI with the hostname, port, collection, etc.
 Simply input the expression after the `expr=` part, and the URL will be constructed dynamically as appropriate.
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/stream-source-reference.adoc b/solr/solr-ref-guide/modules/query-guide/pages/stream-source-reference.adoc
index 9a9de29..cfaf4ac 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/stream-source-reference.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/stream-source-reference.adoc
@@ -27,7 +27,7 @@ By default, the `/select` handler is used.
 The `/select` handler can be used for simple rapid prototyping of expressions.
 For production, however, you will most likely want to use the `/export` handler which is designed to `sort` and `export` entire result sets.
 The `/export` handler is not used by default because it has stricter requirements then the `/select` handler so it's not as easy to get started working with.
-To read more about the `/export` handler requirements review the section <<exporting-result-sets.adoc#,Exporting Result Sets>>.
+To read more about the `/export` handler requirements review the section xref:exporting-result-sets.adoc[].
 
 === search Parameters
 
@@ -43,7 +43,7 @@ The default is `/select`.
 This parameter is only needed with the `/select` handler (which is the default) since the `/export` handler always returns all rows.
 * `partitionKeys`: Comma delimited list of keys to partition the search results by.
 To be used with the parallel function for parallelizing operations across worker nodes.
-See the <<stream-decorator-reference.adoc#parallel,parallel>> function for details.
+See the xref:stream-decorator-reference.adoc#parallel[parallel] function for details.
 
 === search Syntax
 
@@ -347,7 +347,7 @@ cat("authors.txt,fiction/scifi/", maxLines=500)
 == nodes
 
 The `nodes` function provides breadth-first graph traversal.
-For details, see the section <<graph-traversal.adoc#,Graph Traversal>>.
+For details, see the section xref:graph-traversal.adoc[].
 
 == knnSearch
 
@@ -384,7 +384,7 @@ knnSearch(collection1,
 
 The `model` function retrieves and caches logistic regression text classification models that are stored in a SolrCloud collection.
 The `model` function is designed to work with models that are created by the <<train,train function>>, but can also be used to retrieve text classification models trained outside of Solr, as long as they conform to the specified format.
-After the model is retrieved it can be used by the <<stream-decorator-reference.adoc#classify,classify function>> to classify documents.
+After the model is retrieved it can be used by the xref:stream-decorator-reference.adoc#classify[classify function] to classify documents.
 
 A single model tuple is fetched and returned based on the *id* parameter.
 The model is retrieved by matching the *id* parameter with a model name in the index.
@@ -558,7 +558,7 @@ When used in parallel mode the partitionKeys parameter must be provided.
 * `zkHost`: Only needs to be defined if the collection being searched is found in a different zkHost than the local stream handler.
 * `partitionKeys`: Comma delimited list of keys to partition the search results by.
 To be used with the parallel function for parallelizing operations across worker nodes.
-See the <<stream-decorator-reference.adoc#parallel,parallel>> function for details.
+See the xref:stream-decorator-reference.adoc#parallel[parallel] function for details.
 
 === shuffle Syntax
 
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/streaming-expressions.adoc b/solr/solr-ref-guide/modules/query-guide/pages/streaming-expressions.adoc
index 0d5dc49..f61bca2 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/streaming-expressions.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/streaming-expressions.adoc
@@ -43,7 +43,7 @@ Streaming expressions are comprised of streaming functions which work with a Sol
 They emit a stream of tuples (key/value Maps).
 
 Some of the provided streaming functions are designed to work with entire result sets rather than the top N results like normal search.
-This is supported by the <<exporting-result-sets.adoc#,/export handler>>.
+This is supported by the xref:exporting-result-sets.adoc[/export handler].
 
 Some streaming functions act as stream sources to originate the stream flow.
 Other streaming functions act as stream decorators to wrap other stream functions and perform operations on the stream of tuples.
@@ -53,7 +53,7 @@ This can be particularly powerful for relational algebra functions.
 === Streaming Requests and Responses
 
 Solr has a `/stream` request handler that takes streaming expression requests and returns the tuples as a JSON stream.
-This request handler is implicitly defined, meaning there is nothing that has to be defined in `solrconfig.xml` - see <<implicit-requesthandlers.adoc#,Implicit Request Handlers>>.
+This request handler is implicitly defined, meaning there is nothing that has to be defined in `solrconfig.xml` - see xref:configuration-guide:implicit-requesthandlers.adoc[].
 
 The `/stream` request handler takes one parameter, `expr`, which is used to specify the streaming expression.
 For example, this curl command encodes and POSTs a simple `search()` expression to the `/stream` handler:
@@ -107,14 +107,14 @@ Timeouts for Streaming Expressions can be configured with the `socketTimeout` an
 Stream sources originate streams.
 There are rich set of searching, sampling and aggregation stream sources to choose from.
 
-A full reference to all available source expressions is available in <<stream-source-reference.adoc#,Stream Source Reference>>.
+A full reference to all available source expressions is available in xref:stream-source-reference.adoc[].
 
 
 === Stream Decorators
 
 Stream decorators wrap stream sources and other stream decorators to transform a stream.
 
-A full reference to all available decorator expressions is available in <<stream-decorator-reference.adoc#,Stream Decorator Reference>>.
+A full reference to all available decorator expressions is available in xref:stream-decorator-reference.adoc[].
 
 === Math Expressions
 
@@ -129,17 +129,17 @@ The math expressions user guide is available in <<>>
 
 From a language standpoint math expressions are referred to as *stream evaluators*.
 
-A full reference to all available evaluator expressions is available in <<stream-evaluator-reference.adoc#,Stream Evaluator Reference>>.
+A full reference to all available evaluator expressions is available in xref:stream-evaluator-reference.adoc[].
 
 === Visualization
 
 
 Visualization of both streaming expressions and math expressions is done using Apache Zeppelin and the Zeppelin-Solr Interpreter.
 
-Visualizing Streaming expressions and setting up of Apache Zeppelin is documented in <<math-start.adoc#zeppelin-solr-interpreter,Zeppelin-Solr Interpreter>>.
+Visualizing Streaming expressions and setting up of Apache Zeppelin is documented in xref:math-start.adoc#zeppelin-solr-interpreter[Zeppelin-Solr Interpreter].
 
-The <<math-expressions.adoc#,Math Expressions User Guide>> has in depth coverage of visualization techniques.
+The xref:math-expressions.adoc[] has in depth coverage of visualization techniques.
 
 === Stream Screen
 
-* <<stream-screen.adoc#,Stream Screen>>: Submit streaming expressions and see results and parsing explanations.
+* xref:stream-screen.adoc[]: Submit streaming expressions and see results and parsing explanations.
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/suggester.adoc b/solr/solr-ref-guide/modules/query-guide/pages/suggester.adoc
index e21888d..3907ecf 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/suggester.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/suggester.adoc
@@ -20,7 +20,7 @@ The SuggestComponent in Solr provides users with automatic suggestions for query
 
 You can use this to implement a powerful auto-suggest feature in your search application.
 
-Although it is possible to use the <<spell-checking.adoc#,Spell Checking>> functionality to power autosuggest behavior, Solr has a dedicated {solr-javadocs}/core/org/apache/solr/handler/component/SuggestComponent.html[SuggestComponent] designed for this functionality.
+Although it is possible to use the xref:spell-checking.adoc[] functionality to power autosuggest behavior, Solr has a dedicated {solr-javadocs}/core/org/apache/solr/handler/component/SuggestComponent.html[SuggestComponent] designed for this functionality.
 
 This approach utilizes Lucene's Suggester implementation and supports all of the lookup implementations available in Lucene.
 
@@ -31,7 +31,7 @@ The main features of this Suggester are:
 * Distributed support
 
 The `solrconfig.xml` found in Solr's "techproducts" example has a Suggester implementation configured already.
-For more on search components, see the section <<requesthandlers-searchcomponents.adoc#,Request Handlers and Search Components>>.
+For more on search components, see the section xref:configuration-guide:requesthandlers-searchcomponents.adoc[].
 
 The "techproducts" example `solrconfig.xml` has a `suggest` search component and a `/suggest` request handler already configured.
 You can use that as the basis for your configuration, or create it from scratch, as detailed below.
@@ -130,7 +130,7 @@ A field from the index to use as the basis of suggestion terms.
 If `sourceLocation` is empty (meaning any dictionary implementation other than `FileDictionaryFactory`), then terms from this field in the index will be used.
 +
 To be used as the basis for a suggestion, the field must be stored.
-You may want to <<copy-fields.adoc#,use copyField rules>> to create a special 'suggest' field comprised of terms from other fields in documents.
+You may want to xref:indexing-guide:copy-fields.adoc[use copyField rules] to create a special 'suggest' field comprised of terms from other fields in documents.
 +
 You usually want a minimal amount of analysis on the field (no stemming, no synonyms, etc.), so an option is to create a field type in your schema that only uses basic tokenizers or filters.
 An example of such a field type is shown here:
@@ -689,7 +689,7 @@ See the <<Example Usages>> section below for an example request and response.
 == Adding the Suggest Request Handler
 
 After adding the search component, a request handler must be added to `solrconfig.xml`.
-This request handler works the <<requesthandlers-searchcomponents.adoc#,same as any other request handler>>, and allows you to configure default parameters for serving suggestion requests.
+This request handler works the xref:configuration-guide:requesthandlers-searchcomponents.adoc[same as any other request handler], and allows you to configure default parameters for serving suggestion requests.
 The request handler definition must incorporate the "suggest" search component defined previously.
 
 [source,xml]
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/tagger-handler.adoc b/solr/solr-ref-guide/modules/query-guide/pages/tagger-handler.adoc
index ca9d4a2..3676e53 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/tagger-handler.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/tagger-handler.adoc
@@ -39,7 +39,7 @@ be supported; the maximum is mostly limited only by memory.
 
 To configure the tagger, your Solr schema needs 2 fields:
 
-* A unique key field (see <<schema-elements.adoc#unique-key,Unique Key>> for how to define a unique key in your schema).
+* A unique key field (see xref:indexing-guide:schema-elements.adoc#unique-key[Unique Key] for how to define a unique key in your schema).
   Recommended field settings: set `docValues=true`.
 * A tag field, which must be a `TextField`, with `ConcatenateGraphFilterFactory` at the end of the index chain (not the query chain):
   Set `preservePositionIncrements=false` on that filter.
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/term-vector-component.adoc b/solr/solr-ref-guide/modules/query-guide/pages/term-vector-component.adoc
index c2eb593..cd11b86 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/term-vector-component.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/term-vector-component.adoc
@@ -230,7 +230,7 @@ This can be computationally expensive.
 
 To see an example of TermVector component output, see the Wiki page: https://cwiki.apache.org/confluence/display/solr/TermVectorComponentExampleOptions
 
-For schema requirements, see also the section  <<field-properties-by-use-case.adoc#, Field Properties by Use Case>>.
+For schema requirements, see also the section xref:indexing-guide:field-properties-by-use-case.adoc[].
 
 == SolrJ and the Term Vector Component
 
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/terms-component.adoc b/solr/solr-ref-guide/modules/query-guide/pages/terms-component.adoc
index 897891c..f69cb02 100644
--- a/solr/solr-ref-guide/modules/query-guide/pages/terms-component.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/pages/terms-component.adoc
@@ -25,7 +25,7 @@ The document frequencies returned are the number of documents that match the ter
 
 == Configuring the Terms Component
 
-Terms Component is one of  <<requesthandlers-searchcomponents.adoc#defining-search-components,the default search components>>
+Terms Component is one of xref:configuration-guide:requesthandlers-searchcomponents.adoc#defining-search-components[the default search components]
 and does not need to be defined in `solrconfig.xml`.
 
 The definition is equivalent to:
@@ -37,7 +37,7 @@ The definition is equivalent to:
 
 === Using the Terms Component in a Request Handler
 
-Solr comes with an <<implicit-requesthandlers.adoc#query-handlers,Implicit RequestHandler>> definition `/terms`, which enables (only) Terms component.
+Solr comes with an xref:configuration-guide:implicit-requesthandlers.adoc#query-handlers[implicit request handler] definition `/terms`, which enables (only) Terms component.
 
 If you want to enable Terms component when using another Request Handler, `terms=true` parameter needs to be passed during the request or be set in the handler's defaults.
 
@@ -425,7 +425,7 @@ Results (notice that the term counts are not affected by the query):
 
 == Using the Terms Component for an Auto-Suggest Feature
 
-If the <<suggester.adoc#,Suggester>> doesn't suit your needs, you can use the Terms component in Solr to build a similar feature for your own search application.
+If the xref:suggester.adoc[] doesn't suit your needs, you can use the Terms component in Solr to build a similar feature for your own search application.
 Simply submit a query specifying whatever characters the user has typed so far as a prefix.
 For example, if the user has typed "at", the search engine's interface would submit the following query:
 
diff --git a/solr/solr-ref-guide/modules/query-guide/querying-nav.adoc b/solr/solr-ref-guide/modules/query-guide/querying-nav.adoc
index 6fb26ec..07dd59b 100644
--- a/solr/solr-ref-guide/modules/query-guide/querying-nav.adoc
+++ b/solr/solr-ref-guide/modules/query-guide/querying-nav.adoc
@@ -1,85 +1,84 @@
 .Query Guide
-* xref:query-guide.adoc[]
 
-** xref:query-syntax-and-parsers.adoc[]
-*** xref:common-query-parameters.adoc[]
-*** xref:standard-query-parser.adoc[]
-*** xref:dismax-query-parser.adoc[]
-*** xref:edismax-query-parser.adoc[]
-*** xref:function-queries.adoc[]
-*** xref:local-params.adoc[]
-*** xref:json-request-api.adoc[]
-**** xref:json-query-dsl.adoc[]
-*** xref:searching-nested-documents.adoc[]
-*** xref:block-join-query-parser.adoc[]
-*** xref:join-query-parser.adoc[]
-*** xref:spatial-search.adoc[]
-*** xref:other-parsers.adoc[]
-*** xref:sql-query.adoc[]
-**** xref:jdbc-dbvisualizer.adoc[]
-**** xref:jdbc-squirrel.adoc[]
-**** xref:jdbc-zeppelin.adoc[]
-**** xref:jdbc-python-jython.adoc[]
-**** xref:jdbc-r.adoc[]
-*** xref:query-screen.adoc[]
-*** xref:sql-screen.adoc[]
+* xref:query-syntax-and-parsers.adoc[]
+** xref:common-query-parameters.adoc[]
+** xref:standard-query-parser.adoc[]
+** xref:dismax-query-parser.adoc[]
+** xref:edismax-query-parser.adoc[]
+** xref:function-queries.adoc[]
+** xref:local-params.adoc[]
+** xref:json-request-api.adoc[]
+*** xref:json-query-dsl.adoc[]
+** xref:searching-nested-documents.adoc[]
+** xref:block-join-query-parser.adoc[]
+** xref:join-query-parser.adoc[]
+** xref:spatial-search.adoc[]
+** xref:other-parsers.adoc[]
+** xref:sql-query.adoc[]
+*** xref:jdbc-dbvisualizer.adoc[]
+*** xref:jdbc-squirrel.adoc[]
+*** xref:jdbc-zeppelin.adoc[]
+*** xref:jdbc-python-jython.adoc[]
+*** xref:jdbc-r.adoc[]
+** xref:query-screen.adoc[]
+** xref:sql-screen.adoc[]
 
-** xref:enhancing-queries.adoc[]
-*** xref:spell-checking.adoc[]
-*** xref:suggester.adoc[]
-*** xref:morelikethis.adoc[]
-*** xref:query-re-ranking.adoc[]
-*** xref:learning-to-rank.adoc[]
-*** xref:tagger-handler.adoc[]
-*** xref:analytics.adoc[]
-**** xref:analytics-expression-sources.adoc[]
-**** xref:analytics-mapping-functions.adoc[]
-**** xref:analytics-reduction-functions.adoc[]
-*** xref:terms-component.adoc[]
-*** xref:term-vector-component.adoc[]
-*** xref:stats-component.adoc[]
+* Enhancing Queries
+** xref:spell-checking.adoc[]
+** xref:suggester.adoc[]
+** xref:morelikethis.adoc[]
+** xref:query-re-ranking.adoc[]
+** xref:learning-to-rank.adoc[]
+** xref:tagger-handler.adoc[]
+** xref:analytics.adoc[]
+*** xref:analytics-expression-sources.adoc[]
+*** xref:analytics-mapping-functions.adoc[]
+*** xref:analytics-reduction-functions.adoc[]
+** xref:terms-component.adoc[]
+** xref:term-vector-component.adoc[]
+** xref:stats-component.adoc[]
 
-** xref:controlling-results.adoc[]
-*** xref:faceting.adoc[]
-*** xref:json-facet-api.adoc[]
-**** xref:json-faceting-domain-changes.adoc[]
-*** xref:collapse-and-expand-results.adoc[]
-*** xref:result-grouping.adoc[]
-*** xref:result-clustering.adoc[]
-*** xref:highlighting.adoc[]
-*** xref:query-elevation-component.adoc[]
-*** xref:document-transformers.adoc[]
-*** xref:response-writers.adoc[]
-*** xref:exporting-result-sets.adoc[]
-*** xref:pagination-of-results.adoc[]
+* Controlling Results
+** xref:faceting.adoc[]
+** xref:json-facet-api.adoc[]
+*** xref:json-faceting-domain-changes.adoc[]
+** xref:collapse-and-expand-results.adoc[]
+** xref:result-grouping.adoc[]
+** xref:result-clustering.adoc[]
+** xref:highlighting.adoc[]
+** xref:query-elevation-component.adoc[]
+** xref:document-transformers.adoc[]
+** xref:response-writers.adoc[]
+** xref:exporting-result-sets.adoc[]
+** xref:pagination-of-results.adoc[]
 
-** xref:streaming-expressions.adoc[]
-*** xref:stream-source-reference.adoc[]
-*** xref:stream-decorator-reference.adoc[]
-*** xref:stream-evaluator-reference.adoc[]
-*** xref:math-expressions.adoc[]
-**** xref:visualization.adoc[]
-**** xref:math-start.adoc[]
-**** xref:loading.adoc[]
-**** xref:search-sample.adoc[]
-**** xref:transform.adoc[]
-**** xref:scalar-math.adoc[]
-**** xref:vector-math.adoc[]
-**** xref:variables.adoc[]
-**** xref:matrix-math.adoc[]
-**** xref:term-vectors.adoc[]
-**** xref:probability-distributions.adoc[]
-**** xref:statistics.adoc[]
-**** xref:regression.adoc[]
-**** xref:curve-fitting.adoc[]
-**** xref:time-series.adoc[]
-**** xref:numerical-analysis.adoc[]
-**** xref:dsp.adoc[]
-**** xref:simulations.adoc[]
-**** xref:machine-learning.adoc[]
-**** xref:graph.adoc[]
-**** xref:computational-geometry.adoc[]
-**** xref:logs.adoc[]
-*** xref:graph-traversal.adoc[]
-*** xref:stream-api.adoc[]
-*** xref:stream-screen.adoc[]
+* xref:streaming-expressions.adoc[]
+** xref:stream-source-reference.adoc[]
+** xref:stream-decorator-reference.adoc[]
+** xref:stream-evaluator-reference.adoc[]
+** xref:math-expressions.adoc[]
+*** xref:visualization.adoc[]
+*** xref:math-start.adoc[]
+*** xref:loading.adoc[]
+*** xref:search-sample.adoc[]
+*** xref:transform.adoc[]
+*** xref:scalar-math.adoc[]
+*** xref:vector-math.adoc[]
+*** xref:variables.adoc[]
+*** xref:matrix-math.adoc[]
+*** xref:term-vectors.adoc[]
+*** xref:probability-distributions.adoc[]
+*** xref:statistics.adoc[]
+*** xref:regression.adoc[]
+*** xref:curve-fitting.adoc[]
+*** xref:time-series.adoc[]
+*** xref:numerical-analysis.adoc[]
+*** xref:dsp.adoc[]
+*** xref:simulations.adoc[]
+*** xref:machine-learning.adoc[]
+*** xref:graph.adoc[]
+*** xref:computational-geometry.adoc[]
+*** xref:logs.adoc[]
+** xref:graph-traversal.adoc[]
+** xref:stream-api.adoc[]
+** xref:stream-screen.adoc[]
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/controlling-results.adoc b/solr/solr-ref-guide/src/old-pages/controlling-results.adoc
similarity index 100%
rename from solr/solr-ref-guide/modules/query-guide/pages/controlling-results.adoc
rename to solr/solr-ref-guide/src/old-pages/controlling-results.adoc
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/enhancing-queries.adoc b/solr/solr-ref-guide/src/old-pages/enhancing-queries.adoc
similarity index 100%
rename from solr/solr-ref-guide/modules/query-guide/pages/enhancing-queries.adoc
rename to solr/solr-ref-guide/src/old-pages/enhancing-queries.adoc
diff --git a/solr/solr-ref-guide/modules/query-guide/pages/query-guide.adoc b/solr/solr-ref-guide/src/old-pages/query-guide.adoc
similarity index 100%
rename from solr/solr-ref-guide/modules/query-guide/pages/query-guide.adoc
rename to solr/solr-ref-guide/src/old-pages/query-guide.adoc