You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ho...@apache.org on 2017/05/09 04:36:45 UTC

[1/4] lucene-solr:jira/solr-10290: SOLR-10640: make CheckLinksAndAnchors smarter

Repository: lucene-solr
Updated Branches:
  refs/heads/jira/solr-10290 db902f5c9 -> 4a0b9b539


SOLR-10640: make CheckLinksAndAnchors smarter

now checks for relative links to files/anchors that don't exist

in the process of adding that, I realize the duplicat ID checking wasn't accounting for the 'implicit' page.shortname ids that are used in the PDF - so some dups were getting over looked

example: suggester.adoc implicitly has a 'suggester' id, but glossary.adoc might explicitly definie a '[[suggester]]' anchor


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/83b966e2
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/83b966e2
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/83b966e2

Branch: refs/heads/jira/solr-10290
Commit: 83b966e2cab5427e6afb2b4e468d4b3e833494a5
Parents: db902f5
Author: Chris Hostetter <ho...@apache.org>
Authored: Mon May 8 18:38:12 2017 -0700
Committer: Chris Hostetter <ho...@apache.org>
Committed: Mon May 8 18:38:12 2017 -0700

----------------------------------------------------------------------
 .../tools/CheckLinksAndAnchors.java             | 77 +++++++++++++++++---
 1 file changed, 65 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/83b966e2/solr/solr-ref-guide/tools/CheckLinksAndAnchors.java
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/tools/CheckLinksAndAnchors.java b/solr/solr-ref-guide/tools/CheckLinksAndAnchors.java
index 6ed7aa4..9d9daeb 100644
--- a/solr/solr-ref-guide/tools/CheckLinksAndAnchors.java
+++ b/solr/solr-ref-guide/tools/CheckLinksAndAnchors.java
@@ -38,11 +38,10 @@ import org.jsoup.select.NodeVisitor;
  * </p>
  * <p>
  * This tool parses the generated HTML site, looking for these situations in order to fail the build -- since the 
- * equivilent PDF will be broken
+ * equivilent PDF will be broken.  It also does sme general check of the relative URLs to ensure the destination 
+ * files/anchors actaully exist.
  * </p>
  * 
- * TODO: This class could also generally check that no (relative) links are broken?
- *
  * @see https://github.com/asciidoctor/asciidoctor/issues/1865
  * @see https://github.com/asciidoctor/asciidoctor/issues/1866
  */
@@ -69,11 +68,16 @@ public class CheckLinksAndAnchors {
       System.exit(-1);
     }
 
-    final Map<String,List<File>> knownIds = new HashMap<>();
-    final Set<String> problemIds = new HashSet<>(0);
+    final Map<String,List<File>> idsToFiles = new HashMap<>();
+    final Map<File,List<URI>> filesToRelativeLinks = new HashMap<>();
+    final Set<String> idsInMultiFiles = new HashSet<>(0);
     
     for (File file : pages) {
       //System.out.println("input File URI: " + file.toURI().toString());
+
+      assert ! filesToRelativeLinks.containsKey(file);
+      final List<URI> linksInThisFile = new ArrayList<URI>(17);
+      filesToRelativeLinks.put(file, linksInThisFile);
       
       final String fileContents = readFile(file.getPath());
       final Document doc = Jsoup.parse(fileContents);
@@ -82,7 +86,7 @@ public class CheckLinksAndAnchors {
         throw new RuntimeException(file.getName() + " has no main-content div");
       }
 
-      // Add all of the IDs in this doc to knownIds (and problemIds if needed)
+      // Add all of the IDs in this doc to idsToFiles (and idsInMultiFiles if needed)
       final Elements nodesWithIds = mainContent.select("[id]");
       for (Element node : nodesWithIds) {
         final String id = node.id();
@@ -94,13 +98,32 @@ public class CheckLinksAndAnchors {
           continue;
         }
         
-        if (knownIds.containsKey(id)) {
-          problemIds.add(id);
+        if (idsToFiles.containsKey(id)) {
+          idsInMultiFiles.add(id);
         } else {
-          knownIds.put(id, new ArrayList<File>(1));
+          idsToFiles.put(id, new ArrayList<File>(1));
         }
-        knownIds.get(id).add(file);
+        idsToFiles.get(id).add(file);
       }
+
+      {
+        // special case: implicitly assume each file contains an id matching it's filename
+        // since that's the convention used in linking - for the HTML links these ID's don't
+        // exist but we don't care since #frags pointed at non-existend IDs are ignored br browsers.
+        // in the PDF these *will* exist and we need to ensure there won't be any dups / misdirected links
+        // in that case.
+        final String id = file.getName().substring(0, file.getName().lastIndexOf("."));
+        if (0 == mainContent.select("[id=\""+id+"\"]").size()) {
+          if (idsToFiles.containsKey(id)) {
+            idsInMultiFiles.add(id);
+          } else {
+            idsToFiles.put(id, new ArrayList<File>(1));
+          }
+          idsToFiles.get(id).add(file);
+        }
+      }
+          
+      
       
       // check for (relative) links that don't include a fragment
       final Elements links = mainContent.select("a[href]");
@@ -118,6 +141,9 @@ public class CheckLinksAndAnchors {
               // we must have a fragment for intra-page links to work correctly
               problems++;
               System.err.println(file.toURI().toString() + " contains relative link w/o an '#anchor': " + href);
+            } else {
+              // track the link to validate it exists in the target doc
+              linksInThisFile.add(uri);
             }
           }
         } catch (URISyntaxException uri_ex) {
@@ -140,14 +166,41 @@ public class CheckLinksAndAnchors {
     }
 
     // if there are problematic ids, report them
-    for (String id : problemIds) {
+    for (String id : idsInMultiFiles) {
       problems++;
       System.err.println("ID occurs multiple times: " + id);
-      for (File file : knownIds.get(id)) {
+      for (File file : idsToFiles.get(id)) {
         System.err.println(" ... " + file.toURI().toString());
       }
     }
 
+    // check every (realtive) link in every file to ensure the frag exists in the target page
+    for (Map.Entry<File,List<URI>> entry : filesToRelativeLinks.entrySet()) {
+      final File source = entry.getKey();
+      for (URI link : entry.getValue()) {
+        final String path = (null == link.getPath() || "".equals(link.getPath())) ? source.getName() : link.getPath();
+        final String frag = link.getFragment();
+        if ( ! idsInMultiFiles.contains(frag) ) { // skip problematic dups already reported
+          final File dest = new File(htmlDir, path);
+          if ( ! dest.exists() ) {
+            problems++;
+            System.err.println("Relative link points at dest file that doesn't exist: " + link);
+            System.err.println(" ... source: " + source.toURI().toString());
+          } else if ( ( ! idsToFiles.containsKey(frag) ) || // no file contains this id, or...
+                      // id exists, but not in linked file
+                      ( ! idsToFiles.get(frag).get(0).getName().equals(path) )) { 
+            problems++;
+            System.err.println("Relative link points at id that doesn't exist in dest: " + link);
+            System.err.println(" ... source: " + source.toURI().toString());
+            try {
+              System.err.println(" ... nocommit: " + idsToFiles.get(frag).get(0).getName());
+              System.err.println(" ... nocommit: " + path);
+            } catch (NullPointerException ignored) { /* nocommit: Noop */ }
+          }
+        }
+      }
+    }
+
     
     if (0 < problems) {
       System.err.println("Total of " + problems + " problems found");


[2/4] lucene-solr:jira/solr-10290: Fix dup anchors found by improved check-links-and-anchors

Posted by ho...@apache.org.
Fix dup anchors found by improved check-links-and-anchors


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/53f38012
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/53f38012
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/53f38012

Branch: refs/heads/jira/solr-10290
Commit: 53f380125d022c99fa9f1200dc433b5f13f6ff87
Parents: 83b966e
Author: Chris Hostetter <ho...@apache.org>
Authored: Mon May 8 20:29:26 2017 -0700
Committer: Chris Hostetter <ho...@apache.org>
Committed: Mon May 8 20:29:26 2017 -0700

----------------------------------------------------------------------
 .../src/major-changes-from-solr-5-to-solr-6.adoc               | 3 +++
 solr/solr-ref-guide/src/solr-glossary.adoc                     | 6 +++---
 2 files changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53f38012/solr/solr-ref-guide/src/major-changes-from-solr-5-to-solr-6.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/major-changes-from-solr-5-to-solr-6.adoc b/solr/solr-ref-guide/src/major-changes-from-solr-5-to-solr-6.adoc
index 3ae6f6c..1f06131 100644
--- a/solr/solr-ref-guide/src/major-changes-from-solr-5-to-solr-6.adoc
+++ b/solr/solr-ref-guide/src/major-changes-from-solr-5-to-solr-6.adoc
@@ -8,6 +8,7 @@ There are some major changes in Solr 6 to consider before starting to migrate yo
 
 Some of the major improvements in Solr 6 include:
 
+[[major-5-6-streaming]]
 === Streaming Expressions
 
 Introduced in Solr 5, <<streaming-expressions.adoc#streaming-expressions,Streaming Expressions>> allow querying Solr and getting results as a stream of data, sorted and aggregated as requested.
@@ -20,6 +21,7 @@ Several new expression types have been added in Solr 6:
 * Publish/Subscribe messaging.
 * JDBC connections to pull data from other systems and join with documents in the Solr index.
 
+[[major-5-6-parallel-sql]]
 === Parallel SQL Interface
 
 Built on streaming expressions, new in Solr 6 is a <<parallel-sql-interface.adoc#parallel-sql-interface,Parallel SQL interface>> to be able to send SQL queries to Solr. SQL statements are compiled to streaming expressions on the fly, providing the full range of aggregations available to streaming expression requests. A JDBC driver is included, which allows using SQL clients and database visualization tools to query your Solr index and import data to other systems.
@@ -32,6 +34,7 @@ Replication across data centers is now possible with <<cross-data-center-replica
 
 A new <<other-parsers.adoc#OtherParsers-GraphQueryParser,`graph` query parser>> makes it possible to to graph traversal queries of Directed (Cyclic) Graphs modelled using Solr documents.
 
+[[major-5-6-docvalues]]
 === DocValues
 
 Most non-text field types in the Solr sample configsets now default to using <<docvalues.adoc#docvalues,DocValues>>.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53f38012/solr/solr-ref-guide/src/solr-glossary.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/solr-glossary.adoc b/solr/solr-ref-guide/src/solr-glossary.adoc
index 27c7918..d95ffb9 100644
--- a/solr/solr-ref-guide/src/solr-glossary.adoc
+++ b/solr/solr-ref-guide/src/solr-glossary.adoc
@@ -112,7 +112,7 @@ A single node in <<solrclouddef,SolrCloud>> that is responsible for processing a
 [[SolrGlossary-Q]]
 == Q
 
-[[query parser]]Query parser::
+[[query-parser]]Query parser::
 A query parser processes the terms entered by a user.
 
 [[SolrGlossary-R]]
@@ -121,7 +121,7 @@ A query parser processes the terms entered by a user.
 [[recall]]Recall::
 The ability of a search engine to retrieve _all_ of the possible matches to a user's query.
 
-[[relevance]]Relevance::
+[[relevancedef]]Relevance::
 The appropriateness of a document to the search conducted by the user.
 
 [[replica]]Replica::
@@ -159,7 +159,7 @@ The ability to suggest alternative spellings of search terms to a user, as a che
 [[stopwords]]Stopwords::
 Generally, words that have little meaning to a user's search but which may have been entered as part of a <<Naturallanguagequery,natural language>> query. Stopwords are generally very small pronouns, conjunctions and prepositions (such as, "the", "with", or "and")
 
-[[suggester]]<<suggester.adoc#suggester,Suggester>>::
+[[suggesterdef]]<<suggester.adoc#suggester,Suggester>>::
 Functionality in Solr that provides the ability to suggest possible query terms to users as they type.
 
 [[synonyms]]Synonyms::


[4/4] lucene-solr:jira/solr-10290: batch remove OLD_CONFLUENCE_ID comments now that all inter-page links have been fixed

Posted by ho...@apache.org.
batch remove OLD_CONFLUENCE_ID comments now that all inter-page links have been fixed


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/4a0b9b53
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/4a0b9b53
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/4a0b9b53

Branch: refs/heads/jira/solr-10290
Commit: 4a0b9b5395f07961ded6ed9e041a8772de690b24
Parents: 10152ba
Author: Chris Hostetter <ho...@apache.org>
Authored: Mon May 8 21:36:34 2017 -0700
Committer: Chris Hostetter <ho...@apache.org>
Committed: Mon May 8 21:36:34 2017 -0700

----------------------------------------------------------------------
 solr/solr-ref-guide/src/collections-api.adoc              |  1 -
 .../src/cross-data-center-replication-cdcr-.adoc          |  6 ------
 solr/solr-ref-guide/src/faceting.adoc                     |  1 -
 solr/solr-ref-guide/src/other-schema-elements.adoc        |  1 -
 solr/solr-ref-guide/src/phonetic-matching.adoc            |  1 -
 solr/solr-ref-guide/src/rule-based-replica-placement.adoc |  3 ---
 solr/solr-ref-guide/src/running-solr-on-hdfs.adoc         |  1 -
 .../solr-ref-guide/src/solr-control-script-reference.adoc |  5 -----
 solr/solr-ref-guide/src/spatial-search.adoc               |  2 --
 solr/solr-ref-guide/src/spell-checking.adoc               |  2 --
 solr/solr-ref-guide/src/streaming-expressions.adoc        |  1 -
 solr/solr-ref-guide/src/the-dismax-query-parser.adoc      |  8 --------
 .../src/the-extended-dismax-query-parser.adoc             |  2 --
 solr/solr-ref-guide/src/the-standard-query-parser.adoc    |  6 ------
 solr/solr-ref-guide/src/the-terms-component.adoc          |  1 -
 .../solr-ref-guide/src/transforming-result-documents.adoc | 10 ----------
 .../understanding-analyzers-tokenizers-and-filters.adoc   |  2 --
 solr/solr-ref-guide/src/update-request-processors.adoc    |  1 -
 solr/solr-ref-guide/src/upgrading-a-solr-cluster.adoc     |  5 -----
 .../uploading-data-with-solr-cell-using-apache-tika.adoc  |  1 -
 .../using-zookeeper-to-manage-configuration-files.adoc    |  1 -
 solr/solr-ref-guide/src/zookeeper-access-control.adoc     |  1 -
 22 files changed, 62 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/collections-api.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/collections-api.adoc b/solr/solr-ref-guide/src/collections-api.adoc
index 0e27baa..1937368 100644
--- a/solr/solr-ref-guide/src/collections-api.adoc
+++ b/solr/solr-ref-guide/src/collections-api.adoc
@@ -1651,7 +1651,6 @@ In this example, two replicas in the "alreadyLeaders" section already had the le
 
 Examining the clusterstate after issuing this call should show that every live node that has the "preferredLeader" property should also have the "leader" property set to _true_.
 
-// OLD_CONFLUENCE_ID: CollectionsAPI-FORCELEADER:ForceShardLeader
 
 [[CollectionsAPI-FORCELEADER_ForceShardLeader]]
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/cross-data-center-replication-cdcr-.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/cross-data-center-replication-cdcr-.adoc b/solr/solr-ref-guide/src/cross-data-center-replication-cdcr-.adoc
index a555287..4efaf4c 100644
--- a/solr/solr-ref-guide/src/cross-data-center-replication-cdcr-.adoc
+++ b/solr/solr-ref-guide/src/cross-data-center-replication-cdcr-.adoc
@@ -124,7 +124,6 @@ The CDCR replication logic requires modification to the maintenance logic of the
 
 If the communication with one of the target data center is slow, the Updates Log on the source data center can grow to a substantial size. In such a scenario, it is necessary for the Updates Log to be able to efficiently find a given update operation given its identifier. Given that its identifier is an incremental number, it is possible to implement an efficient search strategy. Each transaction log file contains as part of its filename the version number of the first element. This is used to quickly traverse all the transaction log files and find the transaction log file containing one specific version number.
 
-// OLD_CONFLUENCE_ID: CrossDataCenterReplication(CDCR)-Monitoring
 
 [[CrossDataCenterReplication_CDCR_-Monitoring]]
 === Monitoring
@@ -140,7 +139,6 @@ Information about the lifecycle and statistics will be provided on a per-shard b
 
 The CDC Replicator is a background thread that is responsible for replicating updates from a Source data center to one or more target data centers. It is responsible in providing monitoring information on a per-shard basis. As there can be a large number of collections and shards in a cluster, we will use a fixed-size pool of CDC Replicator threads that will be shared across shards.
 
-// OLD_CONFLUENCE_ID: CrossDataCenterReplication(CDCR)-Limitations
 
 [[CrossDataCenterReplication_CDCR_-Limitations]]
 === Limitations
@@ -151,14 +149,12 @@ The current design of CDCR has some limitations. CDCR will continue to evolve ov
 * CDCR is currently only active-passive; data is pushed from the Source cluster to the target cluster. There is active work being done in this area in the 6x code line to remove this limitation.
 * CDCR works most robustly with the same number of shards in the Source and target collection. The shards in the two collections may have different numbers of replicas.
 
-// OLD_CONFLUENCE_ID: CrossDataCenterReplication(CDCR)-Configuration
 
 [[CrossDataCenterReplication_CDCR_-Configuration]]
 == Configuration
 
 The source and target configurations differ in the case of the data centers being in separate clusters. "Cluster" here means separate ZooKeeper ensembles controlling disjoint Solr instances. Whether these data centers are physically separated or not is immaterial for this discussion.
 
-// OLD_CONFLUENCE_ID: CrossDataCenterReplication(CDCR)-SourceConfiguration
 
 [[CrossDataCenterReplication_CDCR_-SourceConfiguration]]
 === Source Configuration
@@ -195,7 +191,6 @@ Here is a sample of a source configuration file, a section in `solrconfig.xml`.
 </updateHandler>
 ----
 
-// OLD_CONFLUENCE_ID: CrossDataCenterReplication(CDCR)-TargetConfiguration
 
 [[CrossDataCenterReplication_CDCR_-TargetConfiguration]]
 === Target Configuration
@@ -503,7 +498,6 @@ http://host:8983/solr/<collection_name>/cdcr?action=START
 }
 ----
 
-// OLD_CONFLUENCE_ID: CrossDataCenterReplication(CDCR)-Monitoringcommands
 
 [[CrossDataCenterReplication_CDCR_-Monitoringcommands]]
 === Monitoring commands

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/faceting.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/faceting.adoc b/solr/solr-ref-guide/src/faceting.adoc
index b6a0ebf..26bfbe2 100644
--- a/solr/solr-ref-guide/src/faceting.adoc
+++ b/solr/solr-ref-guide/src/faceting.adoc
@@ -341,7 +341,6 @@ For more information, see the examples in the <<working-with-dates.adoc#working-
 
 ====
 
-// OLD_CONFLUENCE_ID: Faceting-Pivot(DecisionTree)Faceting
 
 [[Faceting-Pivot_DecisionTree_Faceting]]
 == Pivot (Decision Tree) Faceting

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/other-schema-elements.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/other-schema-elements.adoc b/solr/solr-ref-guide/src/other-schema-elements.adoc
index 4c8496f..42254eb 100644
--- a/solr/solr-ref-guide/src/other-schema-elements.adoc
+++ b/solr/solr-ref-guide/src/other-schema-elements.adoc
@@ -20,7 +20,6 @@ Schema defaults and `copyFields` cannot be used to populate the `uniqueKey` fiel
 
 Further, the operation will fail if the `uniqueKey` field is used, but is multivalued (or inherits the multivalue-ness from the `fieldtype`). However, `uniqueKey` will continue to work, as long as the field is properly used.
 
-// OLD_CONFLUENCE_ID: OtherSchemaElements-DefaultSearchField&QueryOperator
 
 [[OtherSchemaElements-DefaultSearchField_QueryOperator]]
 == Default Search Field & Query Operator

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/phonetic-matching.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/phonetic-matching.adoc b/solr/solr-ref-guide/src/phonetic-matching.adoc
index a3154dd..1f2f3c0 100644
--- a/solr/solr-ref-guide/src/phonetic-matching.adoc
+++ b/solr/solr-ref-guide/src/phonetic-matching.adoc
@@ -6,7 +6,6 @@ Phonetic matching algorithms may be used to encode tokens so that two different
 
 For overviews of and comparisons between algorithms, see http://en.wikipedia.org/wiki/Phonetic_algorithm and http://ntz-develop.blogspot.com/2011/03/phonetic-algorithms.html
 
-// OLD_CONFLUENCE_ID: PhoneticMatching-Beider-MorsePhoneticMatching(BMPM)
 
 [[PhoneticMatching-Beider-MorsePhoneticMatching_BMPM_]]
 == Beider-Morse Phonetic Matching (BMPM)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/rule-based-replica-placement.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/rule-based-replica-placement.adoc b/solr/solr-ref-guide/src/rule-based-replica-placement.adoc
index 59e8300..8ca438d 100644
--- a/solr/solr-ref-guide/src/rule-based-replica-placement.adoc
+++ b/solr/solr-ref-guide/src/rule-based-replica-placement.adoc
@@ -48,7 +48,6 @@ A condition can have one of the following operators to set the parameters for th
 * *less than (<)*: `tag:<x` means tag value less than ‘x’. x must be a number
 * *not equal (!)*: `tag:!x` means tag value MUST NOT be equal to ‘x’. The equals check is performed on String value
 
-// OLD_CONFLUENCE_ID: Rule-basedReplicaPlacement-FuzzyOperator(~)
 
 [[Rule-basedReplicaPlacement-FuzzyOperator_]]
 === Fuzzy Operator (~)
@@ -102,7 +101,6 @@ snitch=class:fqn.ClassName,key1:val1,key2:val2,key3:val3
 [[Rule-basedReplicaPlacement-Examples]]
 == Examples
 
-// OLD_CONFLUENCE_ID: Rule-basedReplicaPlacement-Keeplessthan2replicas(atmost1replica)ofthiscollectiononanynode
 
 [[Rule-basedReplicaPlacement-Keeplessthan2replicas_atmost1replica_ofthiscollectiononanynode]]
 === Keep less than 2 replicas (at most 1 replica) of this collection on any node
@@ -115,7 +113,6 @@ replica:<2,node:*
 // this is equivalent to replica:<2,node:*,shard:**. We can omit shard:** because ** is the default value of shard
 ----
 
-// OLD_CONFLUENCE_ID: Rule-basedReplicaPlacement-Foragivenshard,keeplessthan2replicasonanynode
 
 [[Rule-basedReplicaPlacement-Foragivenshard_keeplessthan2replicasonanynode]]
 === For a given shard, keep less than 2 replicas on any node

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/running-solr-on-hdfs.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/running-solr-on-hdfs.adoc b/solr/solr-ref-guide/src/running-solr-on-hdfs.adoc
index e6097c5..de0ecde 100644
--- a/solr/solr-ref-guide/src/running-solr-on-hdfs.adoc
+++ b/solr/solr-ref-guide/src/running-solr-on-hdfs.adoc
@@ -53,7 +53,6 @@ bin/solr start -c -Dsolr.directoryFactory=HdfsDirectoryFactory
 
 This command starts Solr in SolrCloud mode, using the defined JVM properties.
 
-// OLD_CONFLUENCE_ID: RunningSolronHDFS-Modifyingsolr.in.sh(*nix)orsolr.in.cmd(Windows)
 
 [[RunningSolronHDFS-Modifyingsolr.in.sh_nix_orsolr.in.cmd_Windows_]]
 === Modifying solr.in.sh (*nix) or solr.in.cmd (Windows)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/solr-control-script-reference.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/solr-control-script-reference.adoc b/solr/solr-ref-guide/src/solr-control-script-reference.adoc
index d7f4286..3d5a7f7 100644
--- a/solr/solr-ref-guide/src/solr-control-script-reference.adoc
+++ b/solr/solr-ref-guide/src/solr-control-script-reference.adoc
@@ -421,7 +421,6 @@ NOTE: Solr should have been started at least once before issuing these commands
 
 Use the `zk upconfig` command to upload one of the pre-configured configuration set or a customized configuration set to ZooKeeper.
 
-// OLD_CONFLUENCE_ID: SolrControlScriptReference-AvailableParameters(allparametersarerequired)
 
 [[SolrControlScriptReference-AvailableParameters_allparametersarerequired_]]
 ==== Available Parameters (all parameters are required)
@@ -467,7 +466,6 @@ This command does *not* automatically make changes effective! It simply uploads
 
 Use the `zk downconfig` command to download a configuration set from ZooKeeper to the local filesystem.
 
-// OLD_CONFLUENCE_ID: SolrControlScriptReference-AvailableParameters(allparametersarerequired).1
 
 [[SolrControlScriptReference-AvailableParameters_allparametersarerequired_.1]]
 ==== Available Parameters (all parameters are required)
@@ -563,7 +561,6 @@ An example of this command with the parameters is:
 
 `bin/solr zk rm /configs/myconfigset/schema.xml`
 
-// OLD_CONFLUENCE_ID: SolrControlScriptReference-MoveOneZooKeeperznodetoAnother(Rename)
 
 [[SolrControlScriptReference-MoveOneZooKeeperznodetoAnother_Rename_]]
 === Move One ZooKeeper znode to Another (Rename)
@@ -587,7 +584,6 @@ An example of this command is:
 
 `bin/solr zk mv /configs/oldconfigset /configs/newconfigset`
 
-// OLD_CONFLUENCE_ID: SolrControlScriptReference-ListaZooKeeperznode'sChildren
 
 [[SolrControlScriptReference-ListaZooKeeperznode_sChildren]]
 === List a ZooKeeper znode's Children
@@ -613,7 +609,6 @@ An example of this command with the parameters is:
 
 `bin/solr zk ls /collections`
 
-// OLD_CONFLUENCE_ID: SolrControlScriptReference-Createaznode(supportschroot)
 
 [[SolrControlScriptReference-Createaznode_supportschroot_]]
 === Create a znode (supports chroot)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/spatial-search.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/spatial-search.adoc b/solr/solr-ref-guide/src/spatial-search.adoc
index f4268a4..af1d0ba 100644
--- a/solr/solr-ref-guide/src/spatial-search.adoc
+++ b/solr/solr-ref-guide/src/spatial-search.adoc
@@ -120,7 +120,6 @@ Here's an example:
 
 LatLonType (deprecated) does *not* support rectangles that cross the dateline. For RPT and BBoxField, if you are non-geospatial coordinates (`geo="false"`) then you must quote the points due to the space, e.g. `"x y"`.
 
-// OLD_CONFLUENCE_ID: SpatialSearch-Optimizing:CacheorNot
 
 [[SpatialSearch-Optimizing_CacheorNot]]
 === Optimizing: Cache or Not
@@ -133,7 +132,6 @@ If you know the filter query (be it spatial or not) is fairly unique and not lik
 
 LLPSF does not support Solr's "PostFilter".
 
-// OLD_CONFLUENCE_ID: SpatialSearch-DistanceSortingorBoosting(FunctionQueries)
 
 [[SpatialSearch-DistanceSortingorBoosting_FunctionQueries_]]
 == Distance Sorting or Boosting (Function Queries)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/spell-checking.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/spell-checking.adoc b/solr/solr-ref-guide/src/spell-checking.adoc
index e6f8618..770ea44 100644
--- a/solr/solr-ref-guide/src/spell-checking.adoc
+++ b/solr/solr-ref-guide/src/spell-checking.adoc
@@ -276,7 +276,6 @@ This parameter specifies the maximum number of documents that should be collect
 
 The default value for this parameter is *0*, but when `spellcheck.collateExtendedResults` is *false*, the optimization is always used as if a *1* had been specified.
 
-// OLD_CONFLUENCE_ID: SpellChecking-Thespellcheck.collateParam.*ParameterPrefix
 
 [[SpellChecking-Thespellcheck.collateParam._ParameterPrefix]]
 === The `spellcheck.collateParam.*` Parameter Prefix
@@ -293,7 +292,6 @@ This parameter causes Solr to use the dictionary named in the parameter's argume
 
 Specifies an accuracy value to be used by the spell checking implementation to decide whether a result is worthwhile or not. The value is a float between 0 and 1. Defaults to `Float.MIN_VALUE`.
 
-// OLD_CONFLUENCE_ID: SpellChecking-Thespellcheck.<DICT_NAME>.keyParameter
 
 [[spellcheck_DICT_NAME]]
 === The `spellcheck.<DICT_NAME>.key` Parameter

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/streaming-expressions.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/streaming-expressions.adoc b/solr/solr-ref-guide/src/streaming-expressions.adoc
index 6dbf77f..665fbf9 100644
--- a/solr/solr-ref-guide/src/streaming-expressions.adoc
+++ b/solr/solr-ref-guide/src/streaming-expressions.adoc
@@ -551,7 +551,6 @@ topic(checkpointCollection,
 
 Stream decorators wrap other stream functions or perform operations on the stream.
 
-// OLD_CONFLUENCE_ID: StreamingExpressions-cartesianProduct(6.6)
 
 === cartesianProduct
 //TODO

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/the-dismax-query-parser.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/the-dismax-query-parser.adoc b/solr/solr-ref-guide/src/the-dismax-query-parser.adoc
index f10bd4d..585e222 100644
--- a/solr/solr-ref-guide/src/the-dismax-query-parser.adoc
+++ b/solr/solr-ref-guide/src/the-dismax-query-parser.adoc
@@ -58,7 +58,6 @@ The `q` parameter does not support wildcard characters such as *.
 
 If specified, the `q.alt` parameter defines a query (which by default will be parsed using standard query parsing syntax) when the main q parameter is not specified or is blank. The `q.alt` parameter comes in handy when you need something like a query to match all documents (don't forget `&rows=0` for that one!) in order to get collection-wide faceting counts.
 
-// OLD_CONFLUENCE_ID: TheDisMaxQueryParser-Theqf(QueryFields)Parameter
 
 [[TheDisMaxQueryParser-Theqf_QueryFields_Parameter]]
 === The `qf` (Query Fields) Parameter
@@ -69,7 +68,6 @@ The `qf` parameter introduces a list of fields, each of which is assigned a boos
 
 assigns `fieldOne` a boost of 2.3, leaves `fieldTwo` with the default boost (because no boost factor is specified), and `fieldThree` a boost of 0.4. These boost factors make matches in `fieldOne` much more significant than matches in `fieldTwo`, which in turn are much more significant than matches in `fieldThree`.
 
-// OLD_CONFLUENCE_ID: TheDisMaxQueryParser-Themm(MinimumShouldMatch)Parameter
 
 [[TheDisMaxQueryParser-Themm_MinimumShouldMatch_Parameter]]
 === The `mm` (Minimum Should Match) Parameter
@@ -100,7 +98,6 @@ When specifying `mm` values, keep in mind the following:
 
 The default value of `mm` is 100% (meaning that all clauses must match).
 
-// OLD_CONFLUENCE_ID: TheDisMaxQueryParser-Thepf(PhraseFields)Parameter
 
 [[TheDisMaxQueryParser-Thepf_PhraseFields_Parameter]]
 === The `pf` (Phrase Fields) Parameter
@@ -109,21 +106,18 @@ Once the list of matching documents has been identified using the `fq` and `qf`
 
 The format is the same as that used by the `qf` parameter: a list of fields and "boosts" to associate with each of them when making phrase queries out of the entire q parameter.
 
-// OLD_CONFLUENCE_ID: TheDisMaxQueryParser-Theps(PhraseSlop)Parameter
 
 [[TheDisMaxQueryParser-Theps_PhraseSlop_Parameter]]
 === The `ps` (Phrase Slop) Parameter
 
 The `ps` parameter specifies the amount of "phrase slop" to apply to queries specified with the pf parameter. Phrase slop is the number of positions one token needs to be moved in relation to another token in order to match a phrase specified in a query.
 
-// OLD_CONFLUENCE_ID: TheDisMaxQueryParser-Theqs(QueryPhraseSlop)Parameter
 
 [[TheDisMaxQueryParser-Theqs_QueryPhraseSlop_Parameter]]
 === The `qs` (Query Phrase Slop) Parameter
 
 The `qs` parameter specifies the amount of slop permitted on phrase queries explicitly included in the user's query string with the `qf` parameter. As explained above, slop refers to the number of positions one token needs to be moved in relation to another token in order to match a phrase specified in a query.
 
-// OLD_CONFLUENCE_ID: TheDisMaxQueryParser-Thetie(TieBreaker)Parameter
 
 [[TheDisMaxQueryParser-Thetie_TieBreaker_Parameter]]
 === The `tie` (Tie Breaker) Parameter
@@ -134,7 +128,6 @@ When a term from the user's input is tested against multiple fields, more than o
 
 A value of "0.0" - the default - makes the query a pure "disjunction max query": that is, only the maximum scoring subquery contributes to the final score. A value of "1.0" makes the query a pure "disjunction sum query" where it doesn't matter what the maximum scoring sub query is, because the final score will be the sum of the subquery scores. Typically a low value, such as 0.1, is useful.
 
-// OLD_CONFLUENCE_ID: TheDisMaxQueryParser-Thebq(BoostQuery)Parameter
 
 [[TheDisMaxQueryParser-Thebq_BoostQuery_Parameter]]
 === The `bq` (Boost Query) Parameter
@@ -149,7 +142,6 @@ bq=date:[NOW/DAY-1YEAR TO NOW/DAY]
 
 You can specify multiple `bq` parameters. If you want your query to be parsed as separate clauses with separate boosts, use multiple `bq` parameters.
 
-// OLD_CONFLUENCE_ID: TheDisMaxQueryParser-Thebf(BoostFunctions)Parameter
 
 [[TheDisMaxQueryParser-Thebf_BoostFunctions_Parameter]]
 === The `bf` (Boost Functions) Parameter

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/the-extended-dismax-query-parser.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/the-extended-dismax-query-parser.adoc b/solr/solr-ref-guide/src/the-extended-dismax-query-parser.adoc
index b13287d..3a9481b 100644
--- a/solr/solr-ref-guide/src/the-extended-dismax-query-parser.adoc
+++ b/solr/solr-ref-guide/src/the-extended-dismax-query-parser.adoc
@@ -145,7 +145,6 @@ f.name.qf=last_name first_name
 
 Negative query boosts have been supported at the "Query" object level for a long time (resulting in negative scores for matching documents). Now the QueryParsers have been updated to handle this too.
 
-// OLD_CONFLUENCE_ID: TheExtendedDisMaxQueryParser-Using'slop'
 
 [[TheExtendedDisMaxQueryParser-Using_slop_]]
 == Using 'slop'
@@ -205,7 +204,6 @@ A document that contains "Hans Anderson" will match, but a document that contain
 
 Finally, in addition to the phrase fields (`pf`) parameter, `edismax` also supports the `pf2` and `pf3` parameters, for fields over which to create bigram and trigram phrase queries. The phrase slop for these parameters' queries can be specified using the `ps2` and `ps3` parameters, respectively. If you use `pf2`/`pf3` but `ps2`/`ps3`, then the phrase slop for these parameters' queries will be taken from the `ps` parameter, if any.
 
-// OLD_CONFLUENCE_ID: TheExtendedDisMaxQueryParser-Usingthe'magicfields'_val_and_query_
 
 [[TheExtendedDisMaxQueryParser-Usingthe_magicfields__val_and_query_]]
 == Using the 'magic fields' `\_val_` and `\_query_`

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/the-standard-query-parser.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/the-standard-query-parser.adoc b/solr/solr-ref-guide/src/the-standard-query-parser.adoc
index 11ff935..3f67c4a 100644
--- a/solr/solr-ref-guide/src/the-standard-query-parser.adoc
+++ b/solr/solr-ref-guide/src/the-standard-query-parser.adoc
@@ -24,7 +24,6 @@ In addition to the <<common-query-parameters.adoc#common-query-parameters,Common
 
 Default parameter values are specified in `solrconfig.xml`, or overridden by query-time values in the request.
 
-// OLD_CONFLUENCE_ID: TheStandardQueryParser-TheStandardQueryParser'sResponse
 
 [[TheStandardQueryParser-TheStandardQueryParser_sResponse]]
 == The Standard Query Parser's Response
@@ -170,7 +169,6 @@ The brackets around a query determine its inclusiveness.
 * Curly brackets `{` & `}` denote an exclusive range query that matches values between the upper and lower bounds, but excluding the upper and lower bounds themselves.
 * You can mix these types so one end of the range is inclusive and the other is exclusive. Here's an example: `count:{1 TO 10]`
 
-// OLD_CONFLUENCE_ID: TheStandardQueryParser-BoostingaTermwith^
 
 [[TheStandardQueryParser-BoostingaTermwith_]]
 === Boosting a Term with `^`
@@ -189,7 +187,6 @@ This will make documents with the term jakarta appear more relevant. You can als
 
 By default, the boost factor is 1. Although the boost factor must be positive, it can be less than 1 (for example, it could be 0.2).
 
-// OLD_CONFLUENCE_ID: TheStandardQueryParser-ConstantScorewith^=
 
 [[TheStandardQueryParser-ConstantScorewith_]]
 === Constant Score with `^=`
@@ -264,7 +261,6 @@ or
 
 `"jakarta apache" OR jakarta`
 
-// OLD_CONFLUENCE_ID: TheStandardQueryParser-TheBooleanOperator+
 
 [[TheStandardQueryParser-TheBooleanOperator_]]
 === The Boolean Operator `+`
@@ -282,7 +278,6 @@ This operator is supported by both the standard query parser and the DisMax quer
 
 ====
 
-// OLD_CONFLUENCE_ID: TheStandardQueryParser-TheBooleanOperatorAND(&&)
 
 [[TheStandardQueryParser-TheBooleanOperatorAND_]]
 === The Boolean Operator AND (`&&`)
@@ -295,7 +290,6 @@ To search for documents that contain "jakarta apache" and "Apache Lucene," use e
 
 `"jakarta apache" && "Apache Lucene"`
 
-// OLD_CONFLUENCE_ID: TheStandardQueryParser-TheBooleanOperatorNOT(!)
 
 [[TheStandardQueryParser-TheBooleanOperatorNOT_]]
 === The Boolean Operator NOT (`!`)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/the-terms-component.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/the-terms-component.adoc b/solr/solr-ref-guide/src/the-terms-component.adoc
index 3a8b79e..1625be2 100644
--- a/solr/solr-ref-guide/src/the-terms-component.adoc
+++ b/solr/solr-ref-guide/src/the-terms-component.adoc
@@ -182,7 +182,6 @@ Results:
 </response>
 ----
 
-// OLD_CONFLUENCE_ID: TheTermsComponent-GetFirst10TermsStartingwithLetter'a'
 
 [[TheTermsComponent-GetFirst10TermsStartingwithLetter_a_]]
 === Get First 10 Terms Starting with Letter 'a'

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/transforming-result-documents.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/transforming-result-documents.adoc b/solr/solr-ref-guide/src/transforming-result-documents.adoc
index 47a7177..33bb88e 100644
--- a/solr/solr-ref-guide/src/transforming-result-documents.adoc
+++ b/solr/solr-ref-guide/src/transforming-result-documents.adoc
@@ -33,7 +33,6 @@ The sections below discuss exactly what these various transformers do.
 [[TransformingResultDocuments-AvailableTransformers]]
 == Available Transformers
 
-// OLD_CONFLUENCE_ID: TransformingResultDocuments-[value]-ValueAugmenterFactory
 
 [[TransformingResultDocuments-_value_-ValueAugmenterFactory]]
 === `[value]` - ValueAugmenterFactory
@@ -78,7 +77,6 @@ In addition to using these request parameters, you can configure additional name
 
 The "```value```" option forces an explicit value to always be used, while the "```defaultValue```" option provides a default that can still be overridden using the "```v```" and "```t```" local parameters.
 
-// OLD_CONFLUENCE_ID: TransformingResultDocuments-[explain]-ExplainAugmenterFactory
 
 [[TransformingResultDocuments-_explain_-ExplainAugmenterFactory]]
 === `[explain]` - ExplainAugmenterFactory
@@ -114,7 +112,6 @@ A default style can be configured by specifying an "args" parameter in your conf
 </transformer>
 ----
 
-// OLD_CONFLUENCE_ID: TransformingResultDocuments-[child]-ChildDocTransformerFactory
 
 [[TransformingResultDocuments-_child_-ChildDocTransformerFactory]]
 === `[child]` - ChildDocTransformerFactory
@@ -133,7 +130,6 @@ When using this transformer, the `parentFilter` parameter must be specified, and
 * `childFilter` - query to filter which child documents should be included, this can be particularly useful when you have multiple levels of hierarchical documents (default: all children)
 * `limit` - the maximum number of child documents to be returned per parent document (default: 10)
 
-// OLD_CONFLUENCE_ID: TransformingResultDocuments-[shard]-ShardAugmenterFactory
 
 [[TransformingResultDocuments-_shard_-ShardAugmenterFactory]]
 === `[shard]` - ShardAugmenterFactory
@@ -142,7 +138,6 @@ This transformer adds information about what shard each individual document came
 
 ShardAugmenterFactory does not support any request parameters, or configuration options.
 
-// OLD_CONFLUENCE_ID: TransformingResultDocuments-[docid]-DocIdAugmenterFactory
 
 [[TransformingResultDocuments-_docid_-DocIdAugmenterFactory]]
 === `[docid]` - DocIdAugmenterFactory
@@ -151,7 +146,6 @@ This transformer adds the internal Lucene document id to each document – this
 
 DocIdAugmenterFactory does not support any request parameters, or configuration options.
 
-// OLD_CONFLUENCE_ID: TransformingResultDocuments-[elevated]and[excluded]
 
 [[TransformingResultDocuments-_elevated_and_excluded_]]
 === `[elevated]` and `[excluded]`
@@ -184,7 +178,6 @@ fl=id,[elevated],[excluded]&excludeIds=GB18030TEST&elevateIds=6H500F0&markExclud
 ...
 ----
 
-// OLD_CONFLUENCE_ID: TransformingResultDocuments-[json]/[xml]
 
 [[TransformingResultDocuments-_json_xml_]]
 === `[json]` / `[xml]`
@@ -196,7 +189,6 @@ These transformers replace field value containing a string representation of a v
 fl=id,source_s:[json]&wt=json
 ----
 
-// OLD_CONFLUENCE_ID: TransformingResultDocuments-[subquery]
 
 [[TransformingResultDocuments-_subquery_]]
 === `[subquery]`
@@ -301,7 +293,6 @@ If subquery collection has a different unique key field name (let's say `foo_id`
 
 ====
 
-// OLD_CONFLUENCE_ID: TransformingResultDocuments-[geo]-Geospatialformatter
 
 [[TransformingResultDocuments-_geo_-Geospatialformatter]]
 === `[geo]` - Geospatial formatter
@@ -312,7 +303,6 @@ Normally you'll simply be consistent in choosing the format type you want by set
 
 In addition, this feature is very useful with the `RptWithGeometrySpatialField` to avoid double-storage of the potentially large vector geometry. This transformer will detect that field type and fetch the geometry from an internal compact binary representation on disk (in docValues), and then format it as desired. As such, you needn't mark the field as stored, which would be redundant. In a sense this double-storage between docValues and stored-value storage isn't unique to spatial but with polygonal geometry it can be a lot of data, and furthermore you'd like to avoid storing it in a verbose format (like GeoJSON or WKT).
 
-// OLD_CONFLUENCE_ID: TransformingResultDocuments-[features]-LTRFeatureLoggerTransformerFactory
 
 [[TransformingResultDocuments-_features_-LTRFeatureLoggerTransformerFactory]]
 === `[features]` - LTRFeatureLoggerTransformerFactory

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/understanding-analyzers-tokenizers-and-filters.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/understanding-analyzers-tokenizers-and-filters.adoc b/solr/solr-ref-guide/src/understanding-analyzers-tokenizers-and-filters.adoc
index b84afa0..e3e8aea 100644
--- a/solr/solr-ref-guide/src/understanding-analyzers-tokenizers-and-filters.adoc
+++ b/solr/solr-ref-guide/src/understanding-analyzers-tokenizers-and-filters.adoc
@@ -9,7 +9,6 @@ The following sections describe how Solr breaks down and works with textual data
 * <<about-tokenizers.adoc#about-tokenizers,Tokenizers>> break field data into lexical units, or __tokens__.
 * <<about-filters.adoc#about-filters,Filters>> examine a stream of tokens and keep them, transform or discard them, or create new ones. Tokenizers and filters may be combined to form pipelines, or __chains__, where the output of one is input to the next. Such a sequence of tokenizers and filters is called an _analyzer_ and the resulting output of an analyzer is used to match query results or build indices.
 
-// OLD_CONFLUENCE_ID: UnderstandingAnalyzers,Tokenizers,andFilters-UsingAnalyzers,Tokenizers,andFilters
 
 [[UnderstandingAnalyzers_Tokenizers_andFilters-UsingAnalyzers_Tokenizers_andFilters]]
 == Using Analyzers, Tokenizers, and Filters
@@ -18,7 +17,6 @@ Although the analysis process is used for both indexing and querying, the same a
 
 The tokens output by the analysis process define the values, or __terms__, of that field and are used either to build an index of those terms when a new document is added, or to identify which documents contain the terms you are querying for.
 
-// OLD_CONFLUENCE_ID: UnderstandingAnalyzers,Tokenizers,andFilters-ForMoreInformation
 
 [[UnderstandingAnalyzers_Tokenizers_andFilters-ForMoreInformation]]
 == For More Information

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/update-request-processors.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/update-request-processors.adoc b/solr/solr-ref-guide/src/update-request-processors.adoc
index 3a0cc49..e69adbb 100644
--- a/solr/solr-ref-guide/src/update-request-processors.adoc
+++ b/solr/solr-ref-guide/src/update-request-processors.adoc
@@ -178,7 +178,6 @@ curl "http://localhost:8983/solr/gettingstarted/update/json?update.chain=dedupe&
 
 The above should dedupe the two identical documents and index only one of them.
 
-// OLD_CONFLUENCE_ID: UpdateRequestProcessors-Processor&Post-ProcessorRequestParameters
 
 [[UpdateRequestProcessors-Processor_Post-ProcessorRequestParameters]]
 === Processor & Post-Processor Request Parameters

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/upgrading-a-solr-cluster.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/upgrading-a-solr-cluster.adoc b/solr/solr-ref-guide/src/upgrading-a-solr-cluster.adoc
index b520633..c51b55d 100644
--- a/solr/solr-ref-guide/src/upgrading-a-solr-cluster.adoc
+++ b/solr/solr-ref-guide/src/upgrading-a-solr-cluster.adoc
@@ -38,14 +38,12 @@ You should now be ready to upgrade your cluster. Please verify this process in a
 
 The approach we recommend is to perform the upgrade of each Solr node, one-by-one. In other words, you will need to stop a node, upgrade it to the new version of Solr, and restart it before moving on to the next node. This means that for a short period of time, there will be a mix of "Old Solr" and "New Solr" nodes running in your cluster. We also assume that you will point the new Solr node to your existing Solr home directory where the Lucene index files are managed for each collection on the node. This means that you won't need to move any index files around to perform the upgrade.
 
-// OLD_CONFLUENCE_ID: UpgradingaSolrCluster-Step1:StopSolr
 
 [[UpgradingaSolrCluster-Step1_StopSolr]]
 === Step 1: Stop Solr
 
 Begin by stopping the Solr node you want to upgrade. After stopping the node, if using a replication, (ie: collections with replicationFactor > 1) verify that all leaders hosted on the downed node have successfully migrated to other replicas; you can do this by visiting the <<cloud-screens.adoc#cloud-screens,Cloud panel in the Solr Admin UI>>. If not using replication, then any collections with shards hosted on the downed node will be temporarily off-line.
 
-// OLD_CONFLUENCE_ID: UpgradingaSolrCluster-Step2:InstallSolrasaService
 
 [[UpgradingaSolrCluster-Step2_InstallSolrasaService]]
 === Step 2: Install Solr as a Service
@@ -59,7 +57,6 @@ If you have a `/var/solr/solr.in.sh` file for your existing Solr install, runnin
 
 ====
 
-// OLD_CONFLUENCE_ID: UpgradingaSolrCluster-Step3:SetEnvironmentVariableOverrides
 
 [[UpgradingaSolrCluster-Step3_SetEnvironmentVariableOverrides]]
 === Step 3: Set Environment Variable Overrides
@@ -70,14 +67,12 @@ Open `/etc/default/solr.in.sh` with a text editor and verify that the following
 
 Make sure the user you plan to own the Solr process is the owner of the `SOLR_HOME` directory. For instance, if you plan to run Solr as the "solr" user and `SOLR_HOME` is `/var/solr/data`, then you would do: `sudo chown -R solr: /var/solr/data`
 
-// OLD_CONFLUENCE_ID: UpgradingaSolrCluster-Step4:StartSolr
 
 [[UpgradingaSolrCluster-Step4_StartSolr]]
 === Step 4: Start Solr
 
 You are now ready to start the upgraded Solr node by doing: `sudo service solr start`. The upgraded instance will join the existing cluster because you're using the same `SOLR_HOME`, `SOLR_PORT`, and `SOLR_HOST` settings used by the old Solr node; thus, the new server will look like the old node to the running cluster. Be sure to look in `/var/solr/logs/solr.log` for errors during startup.
 
-// OLD_CONFLUENCE_ID: UpgradingaSolrCluster-Step5:RunHealthcheck
 
 [[UpgradingaSolrCluster-Step5_RunHealthcheck]]
 === Step 5: Run Healthcheck

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/uploading-data-with-solr-cell-using-apache-tika.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/uploading-data-with-solr-cell-using-apache-tika.adoc b/solr/solr-ref-guide/src/uploading-data-with-solr-cell-using-apache-tika.adoc
index fc678d6..0247e98 100644
--- a/solr/solr-ref-guide/src/uploading-data-with-solr-cell-using-apache-tika.adoc
+++ b/solr/solr-ref-guide/src/uploading-data-with-solr-cell-using-apache-tika.adoc
@@ -256,7 +256,6 @@ The command below captures `<div>` tags separately, and then maps all the instan
 bin/post -c techproducts example/exampledocs/sample.html -params "literal.id=doc2&captureAttr=true&defaultField=_text_&fmap.div=foo_t&capture=div"
 ----
 
-// OLD_CONFLUENCE_ID: UploadingDatawithSolrCellusingApacheTika-Capture&Mapping
 
 [[UploadingDatawithSolrCellusingApacheTika-Capture_Mapping]]
 ==== Capture & Mapping

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/using-zookeeper-to-manage-configuration-files.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/using-zookeeper-to-manage-configuration-files.adoc b/solr/solr-ref-guide/src/using-zookeeper-to-manage-configuration-files.adoc
index 77bc78f..53cdbc3 100644
--- a/solr/solr-ref-guide/src/using-zookeeper-to-manage-configuration-files.adoc
+++ b/solr/solr-ref-guide/src/using-zookeeper-to-manage-configuration-files.adoc
@@ -40,7 +40,6 @@ It's a good idea to keep these files under version control.
 
 ====
 
-// OLD_CONFLUENCE_ID: UsingZooKeepertoManageConfigurationFiles-UploadingConfigurationFilesusingbin/solrorSolrJ
 
 [[UsingZooKeepertoManageConfigurationFiles-UploadingConfigurationFilesusingbin_solrorSolrJ]]
 == Uploading Configuration Files using `bin/solr` or SolrJ

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a0b9b53/solr/solr-ref-guide/src/zookeeper-access-control.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/zookeeper-access-control.adoc b/solr/solr-ref-guide/src/zookeeper-access-control.adoc
index c5befa6..ef17062 100644
--- a/solr/solr-ref-guide/src/zookeeper-access-control.adoc
+++ b/solr/solr-ref-guide/src/zookeeper-access-control.adoc
@@ -81,7 +81,6 @@ Notice the overlap in system property names with credentials provider `VMParamsS
 
 You can give the readonly credentials to "clients" of your SolrCloud cluster - e.g. to be used by SolrJ clients. They will be able to read whatever is necessary to run a functioning SolrJ client, but they will not be able to modify any content in ZooKeeper.
 
-// OLD_CONFLUENCE_ID: ZooKeeperAccessControl-bin/solr&solr.cmd,server/scripts/cloud-scripts/zkcli.sh&zkcli.bat
 
 [[ZooKeeperAccessControl-bin_solr_solr.cmd_server_scripts_cloud-scripts_zkcli.sh_zkcli.bat]]
 === `bin/solr` & `solr.cmd`, `server/scripts/cloud-scripts/zkcli.sh` & `zkcli.bat`


[3/4] lucene-solr:jira/solr-10290: fix broken links/anchors found with improved link checker

Posted by ho...@apache.org.
fix broken links/anchors found with improved link checker


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/10152ba8
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/10152ba8
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/10152ba8

Branch: refs/heads/jira/solr-10290
Commit: 10152ba8cf96c49f3f2e24acf5fbc99dcb508500
Parents: 53f3801
Author: Chris Hostetter <ho...@apache.org>
Authored: Mon May 8 21:32:03 2017 -0700
Committer: Chris Hostetter <ho...@apache.org>
Committed: Mon May 8 21:32:03 2017 -0700

----------------------------------------------------------------------
 solr/solr-ref-guide/src/collections-api.adoc    | 54 ++++++++++----------
 solr/solr-ref-guide/src/coreadmin-api.adoc      |  2 +-
 solr/solr-ref-guide/src/defining-fields.adoc    |  4 +-
 solr/solr-ref-guide/src/faceting.adoc           |  2 +-
 .../field-type-definitions-and-properties.adoc  |  1 -
 solr/solr-ref-guide/src/graph-traversal.adoc    |  2 +-
 .../src/implicit-requesthandlers.adoc           | 12 ++---
 solr/solr-ref-guide/src/index-replication.adoc  |  2 +-
 .../src/indexconfig-in-solrconfig.adoc          |  1 +
 solr/solr-ref-guide/src/language-analysis.adoc  |  2 +-
 .../src/parallel-sql-interface.adoc             |  1 +
 .../src/rule-based-authorization-plugin.adoc    |  2 +-
 .../src/running-solr-on-hdfs.adoc               |  4 +-
 solr/solr-ref-guide/src/solr-glossary.adoc      |  2 +-
 solr/solr-ref-guide/src/spell-checking.adoc     |  4 +-
 .../src/streaming-expressions.adoc              |  2 +-
 solr/solr-ref-guide/src/upgrading-solr.adoc     |  2 +-
 17 files changed, 49 insertions(+), 50 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/10152ba8/solr/solr-ref-guide/src/collections-api.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/collections-api.adoc b/solr/solr-ref-guide/src/collections-api.adoc
index a5457b8..0e27baa 100644
--- a/solr/solr-ref-guide/src/collections-api.adoc
+++ b/solr/solr-ref-guide/src/collections-api.adoc
@@ -25,7 +25,7 @@ The Collections API is used to enable you to create, remove, or reload collectio
 |shards |string |No |empty |A comma separated list of shard names, e.g., shard-x,shard-y,shard-z. This is a required parameter when using the 'implicit' router.
 |replicationFactor |integer |No |1 |The number of replicas to be created for each shard.
 |maxShardsPerNode |integer |No |1 |When creating collections, the shards and/or replicas are spread across all available (i.e., live) nodes, and two replicas of the same shard will never be on the same node. If a node is not live when the CREATE operation is called, it will not get any parts of the new collection, which could lead to too many replicas being created on a single live node. Defining `maxShardsPerNode` sets a limit on the number of replicas CREATE will spread to each node. If the entire collection can not be fit into the live nodes, no collection will be created at all.
-|createNodeSet |string |No | |Allows defining the nodes to spread the new collection across. If not provided, the CREATE operation will create shard-replica spread across all live Solr nodes. The format is a comma-separated list of node_names, such as `localhost:8983_solr,` `localhost:8984_solr,` `localhost:8985_solr`. Alternatively, use the special value of `EMPTY` to initially create no shard-replica within the new collection and then later use the <<CollectionsAPI-api_addreplica,ADDREPLICA>> operation to add shard-replica when and where required.
+|createNodeSet |string |No | |Allows defining the nodes to spread the new collection across. If not provided, the CREATE operation will create shard-replica spread across all live Solr nodes. The format is a comma-separated list of node_names, such as `localhost:8983_solr,` `localhost:8984_solr,` `localhost:8985_solr`. Alternatively, use the special value of `EMPTY` to initially create no shard-replica within the new collection and then later use the <<CollectionsAPI-addreplica,ADDREPLICA>> operation to add shard-replica when and where required.
 |createNodeSet.shuffle |boolean |No |true a|
 Controls wether or not the shard-replicas created for this collection will be assigned to the nodes specified by the createNodeSet in a sequential manner, or if the list of nodes should be shuffled prior to creating individual replicas. A 'false' value makes the results of a collection creation predictible and gives more exact control over the location of the individual shard-replicas, but 'true' can be a better choice for ensuring replicas are distributed evenly across nodes.
 
@@ -34,8 +34,8 @@ Ignored if createNodeSet is not also specified.
 |collection.configName |string |No |empty |Defines the name of the configurations (which must already be stored in ZooKeeper) to use for this collection. If not provided, Solr will default to the collection name as the configuration name.
 |router.field |string |No |empty |If this field is specified, the router will look at the value of the field in an input document to compute the hash and identify a shard instead of looking at the `uniqueKey` field. If the field specified is null in the document, the document will be rejected. Please note that <<realtime-get.adoc#realtime-get,RealTime Get>> or retrieval by id would also require the parameter `\_route_` (or `shard.keys`) to avoid a distributed search.
 |property._name_=_value_ |string |No | |Set core property _name_ to _value_. See the section <<defining-core-properties.adoc#defining-core-properties,Defining core.properties>> for details on supported properties and values.
-|autoAddReplicas |boolean |No |false |When set to true, enables auto addition of replicas on shared file systems. See the section <<running-solr-on-hdfs.adoc#RunningSolronHDFS-autoAddReplicasSettings,autoAddReplicas Settings>> for more details on settings and overrides.
-|async |string |No | |Request ID to track this action which will be <<CollectionsAPI-AsynchronousCalls,processed asynchronously>>.
+|autoAddReplicas |boolean |No |false |When set to true, enables auto addition of replicas on shared file systems. See the section <<running-solr-on-hdfs.adoc#RunningSolronHDFS-AutomaticallyAddReplicasinSolrCloud,autoAddReplicas Settings>> for more details on settings and overrides.
+|async |string |No | |Request ID to track this action which will be <<CollectionsAPI-async,processed asynchronously>>.
 |rule |string |No | |Replica placement rules. See the section <<rule-based-replica-placement.adoc#rule-based-replica-placement,Rule-based Replica Placement>> for details.
 |snitch |string |No | |Details of the snitch provider. See the section <<rule-based-replica-placement.adoc#rule-based-replica-placement,Rule-based Replica Placement>> for details.
 |===
@@ -110,7 +110,7 @@ The attributes that can be modified are:
 * rule
 * snitch
 
-See the <<CollectionsAPI-api1,CREATE>> section above for details on these attributes.
+See the <<CollectionsAPI-create,CREATE>> section above for details on these attributes.
 
 |===
 
@@ -130,7 +130,7 @@ The RELOAD action is used when you have changed a configuration in ZooKeeper.
 |===
 |Key |Type |Required |Description
 |name |string |Yes |The name of the collection to reload.
-|async |string |No |Request ID to track this action which will be <<CollectionsAPI-AsynchronousCalls,processed asynchronously>>.
+|async |string |No |Request ID to track this action which will be <<CollectionsAPI-async,processed asynchronously>>.
 |===
 
 [[CollectionsAPI-Output.1]]
@@ -189,7 +189,7 @@ One can also specify an optional `ranges` parameter to divide the original shard
 
 Another optional parameter `split.key` can be used to split a shard using a route key such that all documents of the specified route key end up in a single dedicated sub-shard. Providing the 'shard' parameter is not required in this case because the route key is enough to figure out the right shard. A route key which spans more than one shard is not supported. For example, suppose `split.key=A!` hashes to the range 12-15 and belongs to shard 'shard1' with range 0-20 then splitting by this route key would yield three sub-shards with ranges 0-11, 12-15 and 16-20. Note that the sub-shard with the hash range of the route key may also contain documents for other route keys whose hash ranges overlap.
 
-Shard splitting can be a long running process. In order to avoid timeouts, you should run this as an <<CollectionsAPI-AsynchronousCalls,asynchronous call>>.
+Shard splitting can be a long running process. In order to avoid timeouts, you should run this as an <<CollectionsAPI-async,asynchronous call>>.
 
 [[CollectionsAPI-Input.2]]
 === Input
@@ -204,7 +204,7 @@ Shard splitting can be a long running process. In order to avoid timeouts, you s
 |ranges |string |No |A comma-separated list of hash ranges in hexadecimal, such as `ranges=0-1f4,1f5-3e8,3e9-5dc`.
 |split.key |string |No |The key to use for splitting the index.
 |property._name_=_value_ |string |No |Set core property _name_ to _value_. See the section <<defining-core-properties.adoc#defining-core-properties,Defining core.properties>> for details on supported properties and values.
-|async |string |No |Request ID to track this action which will be <<CollectionsAPI-AsynchronousCalls,processed asynchronously>>
+|async |string |No |Request ID to track this action which will be <<CollectionsAPI-async,processed asynchronously>>
 |===
 
 [[CollectionsAPI-Output.2]]
@@ -305,7 +305,7 @@ Shards can only created with this API for collections that use the 'implicit' ro
 |shard |string |Yes |The name of the shard to be created.
 |createNodeSet |string |No |Allows defining the nodes to spread the new collection across. If not provided, the CREATE operation will create shard-replica spread across all live Solr nodes. The format is a comma-separated list of node_names, such as `localhost:8983_solr,` `localhost:8984_solr,` `localhost:8985_solr`.
 |property._name_=_value_ |string |No |Set core property _name_ to _value_. See the section <<defining-core-properties.adoc#defining-core-properties,Defining core.properties>> for details on supported properties and values.
-|async |string |No |Request ID to track this action which will be <<CollectionsAPI-AsynchronousCalls,processed asynchronously>>.
+|async |string |No |Request ID to track this action which will be <<CollectionsAPI-async,processed asynchronously>>.
 |===
 
 [[CollectionsAPI-Output.3]]
@@ -357,7 +357,7 @@ Deleting a shard will unload all replicas of the shard, remove them from `cluste
 |deleteInstanceDir |boolean |No |By default Solr will delete the entire instanceDir of each replica that is deleted. Set this to `false` to prevent the instance directory from being deleted.
 |deleteDataDir |boolean |No |By default Solr will delete the dataDir of each replica that is deleted. Set this to `false` to prevent the data directory from being deleted.
 |deleteIndex |boolean |No |By default Solr will delete the index of each replica that is deleted. Set this to `false` to prevent the index directory from being deleted.
-|async |string |No |Request ID to track this action which will be <<CollectionsAPI-AsynchronousCalls,processed asynchronously>>.
+|async |string |No |Request ID to track this action which will be <<CollectionsAPI-async,processed asynchronously>>.
 |===
 
 [[CollectionsAPI-Output.4]]
@@ -414,7 +414,7 @@ The `CREATEALIAS` action will create a new alias pointing to one or more collect
 |Key |Type |Required |Description
 |name |string |Yes |The alias name to be created.
 |collections |string |Yes |The list of collections to be aliased, separated by commas. They must already exist in the cluster.
-|async |string |No |Request ID to track this action which will be <<CollectionsAPI-AsynchronousCalls,processed asynchronously>>.
+|async |string |No |Request ID to track this action which will be <<CollectionsAPI-async,processed asynchronously>>.
 |===
 
 [[CollectionsAPI-Output.5]]
@@ -460,7 +460,7 @@ http://localhost:8983/solr/admin/collections?action=CREATEALIAS&name=testalias&c
 |===
 |Key |Type |Required |Description
 |name |string |Yes |The name of the alias to delete.
-|async |string |No |Request ID to track this action which will be <<CollectionsAPI-AsynchronousCalls,processed asynchronously>>.
+|async |string |No |Request ID to track this action which will be <<CollectionsAPI-async,processed asynchronously>>.
 |===
 
 [[CollectionsAPI-Output.6]]
@@ -506,7 +506,7 @@ http://localhost:8983/solr/admin/collections?action=DELETEALIAS&name=testalias
 |===
 |Key |Type |Required |Description
 |name |string |Yes |The name of the collection to delete.
-|async |string |No |Request ID to track this action which will be <<CollectionsAPI-AsynchronousCalls,processed asynchronously>>.
+|async |string |No |Request ID to track this action which will be <<CollectionsAPI-async,processed asynchronously>>.
 |===
 
 [[CollectionsAPI-Output.7]]
@@ -575,7 +575,7 @@ Delete a named replica from the specified collection and shard. If the correspon
 |deleteDataDir |boolean |No |By default Solr will delete the dataDir of the replica that is deleted. Set this to `false` to prevent the data directory from being deleted.
 |deleteIndex |boolean |No |By default Solr will delete the index of the replica that is deleted. Set this to `false` to prevent the index directory from being deleted.
 |onlyIfDown |boolean |No |When set to 'true' will not take any action if the replica is active. Default 'false'
-|async |string |No |Request ID to track this action which will be <<CollectionsAPI-AsynchronousCalls,processed asynchronously>>.
+|async |string |No |Request ID to track this action which will be <<CollectionsAPI-async,processed asynchronously>>.
 |===
 
 [[CollectionsAPI-Examples.8]]
@@ -632,7 +632,7 @@ Ignored if the shard param is also specified.
 |instanceDir |string |No |The instanceDir for the core that will be created
 |dataDir |string |No |The directory in which the core should be created
 |property._name_=_value_ |string |No |Set core property _name_ to _value_. See <<defining-core-properties.adoc#defining-core-properties,Defining core.properties>>.
-|async |string |No |Request ID to track this action which will be <<CollectionsAPI-AsynchronousCalls,processed asynchronously>>
+|async |string |No |Request ID to track this action which will be <<CollectionsAPI-async,processed asynchronously>>
 |===
 
 [[CollectionsAPI-Examples.9]]
@@ -740,7 +740,7 @@ Please note that the migrate API does not perform any de-duplication on the docu
 |split.key |string |Yes |The routing key prefix. For example, if uniqueKey is a!123, then you would use `split.key=a!`.
 |forward.timeout |int |No |The timeout, in seconds, until which write requests made to the source collection for the given `split.key` will be forwarded to the target shard. The default is 60 seconds.
 |property._name_=_value_ |string |No |Set core property _name_ to _value_. See the section <<defining-core-properties.adoc#defining-core-properties,Defining core.properties>> for details on supported properties and values.
-|async |string |No |Request ID to track this action which will be <<CollectionsAPI-AsynchronousCalls,processed asynchronously>>.
+|async |string |No |Request ID to track this action which will be <<CollectionsAPI-async,processed asynchronously>>.
 |===
 
 [[CollectionsAPI-Output.9]]
@@ -1178,7 +1178,7 @@ http://localhost:8983/solr/admin/collections?action=clusterstatus&wt=json
 [[CollectionsAPI-requeststatus]]
 == REQUESTSTATUS: Request Status of an Async Call
 
-Request the status and response of an already submitted <<CollectionsAPI-AsynchronousCalls,Asynchronous Collection API>> (below) call. This call is also used to clear up the stored statuses.
+Request the status and response of an already submitted <<CollectionsAPI-async,Asynchronous Collection API>> (below) call. This call is also used to clear up the stored statuses.
 
 `/admin/collections?action=REQUESTSTATUS&requestid=_request-id_`
 
@@ -1245,7 +1245,7 @@ http://localhost:8983/solr/admin/collections?action=REQUESTSTATUS&requestid=1004
 [[CollectionsAPI-deletestatus]]
 == DELETESTATUS: Delete Status
 
-Delete the stored response of an already failed or completed <<CollectionsAPI-AsynchronousCalls,Asynchronous Collection API>> call.
+Delete the stored response of an already failed or completed <<CollectionsAPI-async,Asynchronous Collection API>> call.
 
 `/admin/collections?action=DELETESTATUS&requestid=_request-id_`
 
@@ -1690,7 +1690,7 @@ A expert level utility API to move a collection from shared `clusterstate.json`
 |===
 |Key |Type |Required |Description
 |collection |string |Yes |The name of the collection to be migrated from `clusterstate.json` to its own `state.json` zookeeper node
-|async |string |No |Request ID to track this action which will be <<CollectionsAPI-AsynchronousCalls,processed asynchronously>>.
+|async |string |No |Request ID to track this action which will be <<CollectionsAPI-async,processed asynchronously>>.
 |===
 
 This API is useful in migrating any collections created prior to Solr 5.0 to the more scalable cluster state format now used by default. If a collection was created in any Solr 5.x version or higher, then executing this command is not necessary.
@@ -1713,8 +1713,8 @@ The backup command will backup Solr indexes and configurations for a specified c
 |===
 |Key |Type |Required |Description
 |collection |string |Yes |The name of the collection that needs to be backed up
-|location |string |No |The location on the shared drive for the backup command to write to. Alternately it can be set as a <<CollectionsAPI-api11,cluster property>>
-|async |string |No |Request ID to track this action which will be <<CollectionsAPI-AsynchronousCalls,processed asynchronously>>
+|location |string |No |The location on the shared drive for the backup command to write to. Alternately it can be set as a <<CollectionsAPI-clusterprop,cluster property>>
+|async |string |No |Request ID to track this action which will be <<CollectionsAPI-async,processed asynchronously>>
 |repository |string |No |The name of the repository to be used for the backup. If no repository is specified then the local filesystem repository will be used automatically.
 |===
 
@@ -1729,7 +1729,7 @@ The restore operation will create a collection with the specified name in the co
 
 The collection created will be of the same number of shards and replicas as the original collection, preserving routing information, etc. Optionally, you can override some parameters documented below. While restoring, if a configSet with the same name exists in ZooKeeper then Solr will reuse that, or else it will upload the backed up configSet in ZooKeeper and use that.
 
-You can use the collection <<CollectionsAPI-api4,alias>> API to make sure client's don't need to change the endpoint to query or index against the newly restored collection.
+You can use the collection <<CollectionsAPI-createalias,alias>> API to make sure client's don't need to change the endpoint to query or index against the newly restored collection.
 
 [[CollectionsAPI-Input.23]]
 === Input
@@ -1740,8 +1740,8 @@ You can use the collection <<CollectionsAPI-api4,alias>> API to make sure client
 |===
 |Key |Type |Required |Description
 |collection |string |Yes |The collection where the indexes will be restored into.
-|location |string |No |The location on the shared drive for the restore command to read from. Alternately it can be set as a <<CollectionsAPI-api11,cluster property>>.
-|async |string |No |Request ID to track this action which will be <<CollectionsAPI-AsynchronousCalls,processed asynchronously>>.
+|location |string |No |The location on the shared drive for the restore command to read from. Alternately it can be set as a <<CollectionsAPI-clusterprop,cluster property>>.
+|async |string |No |Request ID to track this action which will be <<CollectionsAPI-async,processed asynchronously>>.
 |repository |string |No |The name of the repository to be used for the backup. If no repository is specified then the local filesystem repository will be used automatically.
 |===
 
@@ -1775,7 +1775,7 @@ Deletes all replicas of all collections in that node. Please note that the node
 |===
 |Key |Type |Required |Description
 |node |string |Yes |The node to be cleaned up
-|async |string |No |Request ID to track this action which will be <<CollectionsAPI-AsynchronousCalls,processed asynchronously>>.
+|async |string |No |Request ID to track this action which will be <<CollectionsAPI-async,processed asynchronously>>.
 |===
 
 [[CollectionsAPI-replacenode]]
@@ -1796,7 +1796,7 @@ This command recreates replicas in the source node to the target node. After eac
 |source |string |Yes |The source node from which the replicas need to be copied from
 |target |string |Yes |The target node
 |parallel |boolean |No |default=false. if this flag is set to true, all replicas are created inseparatee threads. Keep in mind that this can lead to very high network and disk I/O if the replicas have very large indices.
-|async |string |No |Request ID to track this action which will be <<CollectionsAPI-AsynchronousCalls,processed asynchronously>>.
+|async |string |No |Request ID to track this action which will be <<CollectionsAPI-async,processed asynchronously>>.
 |===
 
 [IMPORTANT]
@@ -1804,12 +1804,10 @@ This command recreates replicas in the source node to the target node. After eac
 This operation does not hold necessary locks on the replicas that belong to on the source node. So don't perform other collection operations in this period.
 ====
 
-[[CollectionsAPI-AsynchronousCalls]]
-
 [[CollectionsAPI-async]]
 == Asynchronous Calls
 
-Since some collection API calls can be long running tasks e.g. Shard Split, you can optionally have the calls run asynchronously. Specifying `async=<request-id>` enables you to make an asynchronous call, the status of which can be requested using the <<CollectionsAPI-RequestStatus,REQUESTSTATUS>> call at any time.
+Since some collection API calls can be long running tasks e.g. Shard Split, you can optionally have the calls run asynchronously. Specifying `async=<request-id>` enables you to make an asynchronous call, the status of which can be requested using the <<CollectionsAPI-requeststatus,REQUESTSTATUS>> call at any time.
 
 As of now, REQUESTSTATUS does not automatically clean up the tracking data structures, meaning the status of completed or failed tasks stays stored in ZooKeeper unless cleared manually. DELETESTATUS can be used to clear the stored statuses. However, there is a limit of 10,000 on the number of async call responses stored in a cluster.
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/10152ba8/solr/solr-ref-guide/src/coreadmin-api.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/coreadmin-api.adoc b/solr/solr-ref-guide/src/coreadmin-api.adoc
index dc74fc3..0d59396 100644
--- a/solr/solr-ref-guide/src/coreadmin-api.adoc
+++ b/solr/solr-ref-guide/src/coreadmin-api.adoc
@@ -283,7 +283,7 @@ This example uses the `ranges` parameter with hash ranges 0-500, 501-1000 and 10
 
 The `targetCore` must already exist and must have a compatible schema with the `core` index. A commit is automatically called on the `core` index before it is split.
 
-This command is used as part of the <<collections-api.adoc#CollectionsAPI-SplitaShard,SPLITSHARD>> command but it can be used for non-cloud Solr cores as well. When used against a non-cloud core without `split.key` parameter, this action will split the source index and distribute its documents alternately so that each split piece contains an equal number of documents. If the `split.key` parameter is specified then only documents having the same route key will be split from the source index.
+This command is used as part of the <<collections-api.adoc#CollectionsAPI-splitshard,SPLITSHARD>> command but it can be used for non-cloud Solr cores as well. When used against a non-cloud core without `split.key` parameter, this action will split the source index and distribute its documents alternately so that each split piece contains an equal number of documents. If the `split.key` parameter is specified then only documents having the same route key will be split from the source index.
 
 [[CoreAdminAPI-REQUESTSTATUS]]
 == REQUESTSTATUS

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/10152ba8/solr/solr-ref-guide/src/defining-fields.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/defining-fields.adoc b/solr/solr-ref-guide/src/defining-fields.adoc
index 7641d96..ed5e665 100644
--- a/solr/solr-ref-guide/src/defining-fields.adoc
+++ b/solr/solr-ref-guide/src/defining-fields.adoc
@@ -39,7 +39,7 @@ Fields can have many of the same properties as field types. Properties from the
 |Property |Description |Values |Implicit Default
 |indexed |If true, the value of the field can be used in queries to retrieve matching documents. |true or false |true
 |stored |If true, the actual value of the field can be retrieved by queries. |true or false |true
-|docValues |If true, the value of the field will be put in a column-oriented <<docvalues.adoc#DocValues,DocValues>> structure. |true or false |false
+|docValues |If true, the value of the field will be put in a column-oriented <<docvalues.adoc#docvalues,DocValues>> structure. |true or false |false
 |sortMissingFirst sortMissingLast |Control the placement of documents when a sort field is not present. |true or false |false
 |multiValued |If true, indicates that a single document might contain multiple values for this field type. |true or false |false
 |omitNorms |If true, omits the norms associated with this field (this disables length normalization for the field, and saves some memory). *Defaults to true for all primitive (non-analyzed) field types, such as int, float, data, bool, and string.* Only full-text fields or fields need norms. |true or false |*
@@ -47,6 +47,6 @@ Fields can have many of the same properties as field types. Properties from the
 |omitPositions |Similar to `omitTermFreqAndPositions` but preserves term frequency information. |true or false |*
 |termVectors termPositions termOffsets termPayloads |These options instruct Solr to maintain full term vectors for each document, optionally including position, offset and payload information for each term occurrence in those vectors. These can be used to accelerate highlighting and other ancillary functionality, but impose a substantial cost in terms of index size. They are not necessary for typical uses of Solr. |true or false |false
 |required |Instructs Solr to reject any attempts to add a document which does not have a value for this field. This property defaults to false. |true or false |false
-|useDocValuesAsStored |If the field has `<<docvalues.adoc#DocValues,docValues>>` enabled, setting this to true would allow the field to be returned as if it were a stored field (even if it has `stored=false`) when matching "`*`" in an <<common-query+parameters.adoc#CommonQueryParameters-Thefl_FieldList_Parameter,fl parameter>>. |true or false |true
+|useDocValuesAsStored |If the field has `<<docvalues.adoc#docvalues,docValues>>` enabled, setting this to true would allow the field to be returned as if it were a stored field (even if it has `stored=false`) when matching "`*`" in an <<common-query-parameters.adoc#CommonQueryParameters-Thefl_FieldList_Parameter,fl parameter>>. |true or false |true
 |large |Large fields are always lazy loaded and will only take up space in the document cache if the actual value is < 512KB. This option requires `stored="true"` and `multiValued="false"`. It's intended for fields that might have very large values so that they don't get cached in memory. |true or false |false
 |===

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/10152ba8/solr/solr-ref-guide/src/faceting.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/faceting.adoc b/solr/solr-ref-guide/src/faceting.adoc
index e82e9fb..b6a0ebf 100644
--- a/solr/solr-ref-guide/src/faceting.adoc
+++ b/solr/solr-ref-guide/src/faceting.adoc
@@ -210,7 +210,7 @@ This param will cause loading the underlying fields used in faceting to be execu
 [[Faceting-RangeFaceting]]
 == Range Faceting
 
-You can use Range Faceting on any date field or any numeric field that supports range queries. This is particularly useful for stitching together a series of range queries (as facet by query) for things like prices. As of Solr 3.1, Range Faceting is preferred over <<Faceting-DateFacetingParameters,Date Faceting>> (described below).
+You can use Range Faceting on any date field or any numeric field that supports range queries. This is particularly useful for stitching together a series of range queries (as facet by query) for things like prices.
 
 // TODO: Change column width to %autowidth.spread when https://github.com/asciidoctor/asciidoctor-pdf/issues/599 is fixed
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/10152ba8/solr/solr-ref-guide/src/field-type-definitions-and-properties.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/field-type-definitions-and-properties.adoc b/solr/solr-ref-guide/src/field-type-definitions-and-properties.adoc
index b2adacb..78f7ef7 100644
--- a/solr/solr-ref-guide/src/field-type-definitions-and-properties.adoc
+++ b/solr/solr-ref-guide/src/field-type-definitions-and-properties.adoc
@@ -47,7 +47,6 @@ Field types are defined in `schema.xml`. Each field type is defined between `fie
 
 The implementing class is responsible for making sure the field is handled correctly. In the class names in `schema.xml`, the string `solr` is shorthand for `org.apache.solr.schema` or `org.apache.solr.analysis`. Therefore, `solr.TextField` is really `org.apache.solr.schema.TextField.`.
 
-[[FieldTypeDefinitionsandProperties-FieldTypeProperties]]
 == Field Type Properties
 
 The field type `class` determines most of the behavior of a field type, but optional properties can also be defined. For example, the following definition of a date field type defines two properties, `sortMissingLast` and `omitNorms`.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/10152ba8/solr/solr-ref-guide/src/graph-traversal.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/graph-traversal.adoc b/solr/solr-ref-guide/src/graph-traversal.adoc
index 5a56949..164e0f6 100644
--- a/solr/solr-ref-guide/src/graph-traversal.adoc
+++ b/solr/solr-ref-guide/src/graph-traversal.adoc
@@ -365,7 +365,7 @@ In a nutshell this expression finds the products that most frequently co-occur w
 [[GraphTraversal-UsingthescoreNodesFunctiontoMakeaRecommendation]]
 === Using the scoreNodes Function to Make a Recommendation
 
-This use case builds on the market basket example <<GraphTraversal-CalculateMarketBasketCo-occurance,above>> that calculates which products co-occur most frequently with productID:ABC. The ranked co-occurrence counts provide candidates for a recommendation. The `scoreNodes` function can be used to score the candidates to find the best recommendation.
+This use case builds on the market basket example <<GraphTraversal-CalculateMarketBasketCo-occurrence,above>> that calculates which products co-occur most frequently with productID:ABC. The ranked co-occurrence counts provide candidates for a recommendation. The `scoreNodes` function can be used to score the candidates to find the best recommendation.
 
 Before diving into the syntax of the `scoreNodes` function it's useful to understand why the raw co-occurrence counts may not produce the best recommendation. The reason is that raw co-occurrence counts favor items that occur frequently across all baskets. A better recommendation would find the product that has the most significant relationship with productID ABC. The `scoreNodes` function uses a term frequency-inverse document frequency (TF-IDF) algorithm to find the most significant relationship.
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/10152ba8/solr/solr-ref-guide/src/implicit-requesthandlers.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/implicit-requesthandlers.adoc b/solr/solr-ref-guide/src/implicit-requesthandlers.adoc
index 2e06c5b..74cea37 100644
--- a/solr/solr-ref-guide/src/implicit-requesthandlers.adoc
+++ b/solr/solr-ref-guide/src/implicit-requesthandlers.adoc
@@ -26,15 +26,15 @@ Solr ships with many out-of-the-box RequestHandlers, which are called implicit b
 |`/debug/dump` |{solr-javadocs}/solr-core/org/apache/solr/handler/DumpRequestHandler.html[DumpRequestHandler] |`_DEBUG_DUMP` |Echo the request contents back to the client.
 |<<exporting-result-sets.adoc#exporting-result-sets,`/export`>> |{solr-javadocs}/solr-core/org/apache/solr/handler/component/SearchHandler.html[SearchHandler] |`_EXPORT` |Export full sorted result sets.
 |<<realtime-get.adoc#realtime-get,`/get`>> |{solr-javadocs}/solr-core/org/apache/solr/handler/RealTimeGetHandler.html[RealTimeGetHandler] |`_GET` |Real-time get: low-latency retrieval of the latest version of a document.
-|<<graph-traversal.adoc#GraphTraversal-GraphTraversal-ExportingGraphMLtoSupportGraphVisualization,`/graph`>> |{solr-javadocs}/solr-core/org/apache/solr/handler/GraphHandler.html[GraphHandler] |`_ADMIN_GRAPH` |Return http://graphml.graphdrawing.org/[GraphML] formatted output from a <<graph-traversal.adoc#graph-traversal,`gather` `Nodes` streaming expression>>.
+|<<graph-traversal.adoc#GraphTraversal-ExportingGraphMLtoSupportGraphVisualization,`/graph`>> |{solr-javadocs}/solr-core/org/apache/solr/handler/GraphHandler.html[GraphHandler] |`_ADMIN_GRAPH` |Return http://graphml.graphdrawing.org/[GraphML] formatted output from a <<graph-traversal.adoc#graph-traversal,`gather` `Nodes` streaming expression>>.
 |<<index-replication.adoc#index-replication,`/replication`>> |{solr-javadocs}/solr-core/org/apache/solr/handler/ReplicationHandler.html[ReplicationHandler] |`_REPLICATION` |Replicate indexes for SolrCloud recovery and Master/Slave index distribution.
 |<<schema-api.adoc#schema-api,`/schema`>> |{solr-javadocs}/solr-core/org/apache/solr/handler/SchemaHandler.html[SchemaHandler] |`_SCHEMA` |Retrieve/modify Solr schema.
-|<<parallel-sql-interface.adoc#sql-Request-Handler,`/sql`>> |{solr-javadocs}/solr-core/org/apache/solr/handler/SQLHandler.html[SQLHandler] |`_SQL` |Front end of the Parallel SQL interface.
-|<<streaming-expressions.adoc#StreamingExpressions-StreamingExpressions-StreamingRequestsandResponses,`/stream`>> |{solr-javadocs}/solr-core/org/apache/solr/handler/StreamHandler.html[StreamHandler] |`_STREAM` |Distributed stream processing.
-|<<the-terms-component.adoc#TheTermsComponent-TheTermsComponent-UsingtheTermsComponentinaRequestHandler,`/terms`>> |{solr-javadocs}/solr-core/org/apache/solr/handler/component/SearchHandler.html[SearchHandler] |`_TERMS` |Return a field's indexed terms and the number of documents containing each term.
+|<<parallel-sql-interface.adoc#sql-request-handler,`/sql`>> |{solr-javadocs}/solr-core/org/apache/solr/handler/SQLHandler.html[SQLHandler] |`_SQL` |Front end of the Parallel SQL interface.
+|<<streaming-expressions.adoc#StreamingExpressions-StreamingRequestsandResponses,`/stream`>> |{solr-javadocs}/solr-core/org/apache/solr/handler/StreamHandler.html[StreamHandler] |`_STREAM` |Distributed stream processing.
+|<<the-terms-component.adoc#TheTermsComponent-UsingtheTermsComponentinaRequestHandler,`/terms`>> |{solr-javadocs}/solr-core/org/apache/solr/handler/component/SearchHandler.html[SearchHandler] |`_TERMS` |Return a field's indexed terms and the number of documents containing each term.
 |<<uploading-data-with-index-handlers.adoc#uploading-data-with-index-handlers,`/update`>> |{solr-javadocs}/solr-core/org/apache/solr/handler/UpdateRequestHandler.html[UpdateRequestHandler] |`_UPDATE` |Add, delete and update indexed documents formatted as SolrXML, CSV, SolrJSON or javabin.
-|<<uploading-data-with-index-handlers.adoc#UploadingDatawithIndexHandlers-UploadingDatawithIndexHandlers-CSVUpdateConveniencePaths,`/update/csv`>> |{solr-javadocs}/solr-core/org/apache/solr/handler/UpdateRequestHandler.html[UpdateRequestHandler] |`_UPDATE_CSV` |Add and update CSV-formatted documents.
-|<<uploading-data-with-index-handlers.adoc#UploadingDatawithIndexHandlers-UploadingDatawithIndexHandlers-CSVUpdateConveniencePaths,`/update/json`>> |{solr-javadocs}/solr-core/org/apache/solr/handler/UpdateRequestHandler.html[UpdateRequestHandler] |`_UPDATE_JSON` |Add, delete and update SolrJSON-formatted documents.
+|<<uploading-data-with-index-handlers.adoc#UploadingDatawithIndexHandlers-CSVUpdateConveniencePaths,`/update/csv`>> |{solr-javadocs}/solr-core/org/apache/solr/handler/UpdateRequestHandler.html[UpdateRequestHandler] |`_UPDATE_CSV` |Add and update CSV-formatted documents.
+|<<uploading-data-with-index-handlers.adoc#UploadingDatawithIndexHandlers-CSVUpdateConveniencePaths,`/update/json`>> |{solr-javadocs}/solr-core/org/apache/solr/handler/UpdateRequestHandler.html[UpdateRequestHandler] |`_UPDATE_JSON` |Add, delete and update SolrJSON-formatted documents.
 |<<transforming-and-indexing-custom-json.adoc#transforming-and-indexing-custom-json,`/update/json/docs`>> |{solr-javadocs}/solr-core/org/apache/solr/handler/UpdateRequestHandler.html[UpdateRequestHandler] |`_UPDATE_JSON_DOCS ` |Add and update custom JSON-formatted documents.
 |===
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/10152ba8/solr/solr-ref-guide/src/index-replication.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/index-replication.adoc b/solr/solr-ref-guide/src/index-replication.adoc
index e04222a..94c8b6a 100644
--- a/solr/solr-ref-guide/src/index-replication.adoc
+++ b/solr/solr-ref-guide/src/index-replication.adoc
@@ -286,4 +286,4 @@ Copying an optimized index means that the *entire* index will need to be transfe
 
 Optimizing on the master allows for a straight-forward optimization operation. No query slaves need to be taken out of service. The optimized index can be distributed in the background as queries are being normally serviced. The optimization can occur at any time convenient to the application providing index updates.
 
-While optimizing may have some benefits in some situations, a rapidly changing index will not retain those benefits for long, and since optimization is an intensive process, it may be better to consider other options, such as lowering the merge factor (discussed in the section on <<indexconfig-in-solrconfig.adoc#IndexConfiginSolrConfig-mergeFactor,Index Configuration>>).
+While optimizing may have some benefits in some situations, a rapidly changing index will not retain those benefits for long, and since optimization is an intensive process, it may be better to consider other options, such as lowering the merge factor (discussed in the section on <<indexconfig-in-solrconfig.adoc#merge-factors,Index Configuration>>).

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/10152ba8/solr/solr-ref-guide/src/indexconfig-in-solrconfig.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/indexconfig-in-solrconfig.adoc b/solr/solr-ref-guide/src/indexconfig-in-solrconfig.adoc
index d8c84bb..f77909c 100644
--- a/solr/solr-ref-guide/src/indexconfig-in-solrconfig.adoc
+++ b/solr/solr-ref-guide/src/indexconfig-in-solrconfig.adoc
@@ -66,6 +66,7 @@ Other policies available are the `LogByteSizeMergePolicy` and `LogDocMergePolicy
 </mergePolicyFactory>
 ----
 
+[[merge-factors]]
 === Controlling Segment Sizes: Merge Factors
 
 The most common adjustment users make to the configuration of TieredMergePolicy (or LogByteSizeMergePolicy) are the "merge factors" to change how many segments should be merged at one time.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/10152ba8/solr/solr-ref-guide/src/language-analysis.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/language-analysis.adoc b/solr/solr-ref-guide/src/language-analysis.adoc
index 34fbe28..ea12424 100644
--- a/solr/solr-ref-guide/src/language-analysis.adoc
+++ b/solr/solr-ref-guide/src/language-analysis.adoc
@@ -1365,7 +1365,7 @@ Solr includes two stemmers for Russian: one in the `solr.SnowballPorterFilterFac
 [[LanguageAnalysis-Scandinavian]]
 === Scandinavian
 
-Scandinavian is a language group spanning three languages <<LanguageAnalysis-Norwegian,Norwegian>>, <<LanguageAnalysis-Swed,Swedish>> and <<LanguageAnalysis-Danish,Danish>> which are very similar.
+Scandinavian is a language group spanning three languages <<LanguageAnalysis-Norwegian,Norwegian>>, <<LanguageAnalysis-Swedish,Swedish>> and <<LanguageAnalysis-Danish,Danish>> which are very similar.
 
 Swedish å, ä, ö are in fact the same letters as Norwegian and Danish å, æ, ø and thus interchangeable when used between these languages. They are however folded differently when people type them on a keyboard lacking these characters.
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/10152ba8/solr/solr-ref-guide/src/parallel-sql-interface.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/parallel-sql-interface.adoc b/solr/solr-ref-guide/src/parallel-sql-interface.adoc
index b2f8f37..d32f990 100644
--- a/solr/solr-ref-guide/src/parallel-sql-interface.adoc
+++ b/solr/solr-ref-guide/src/parallel-sql-interface.adoc
@@ -45,6 +45,7 @@ As noted, the choice between aggregation modes depends on the cardinality of the
 
 The request handlers used for the SQL interface are configured to load implicitly, meaning there is little to do to start using this feature.
 
+[[sql-request-handler]]
 === /sql Request Handler
 
 The `/sql` handler is the front end of the Parallel SQL interface. All SQL queries are sent to the `/sql` handler to be processed. The handler also coordinates the distributed MapReduce jobs when running `GROUP BY` and `SELECT DISTINCT` queries in `map_reduce` mode. By default the `/sql` handler will choose worker nodes from its own collection to handle the distributed operations. In this default scenario the collection where the `/sql` handler resides acts as the default worker collection for MapReduce queries.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/10152ba8/solr/solr-ref-guide/src/rule-based-authorization-plugin.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/rule-based-authorization-plugin.adoc b/solr/solr-ref-guide/src/rule-based-authorization-plugin.adoc
index 9ff0999..15604ad3 100644
--- a/solr/solr-ref-guide/src/rule-based-authorization-plugin.adoc
+++ b/solr/solr-ref-guide/src/rule-based-authorization-plugin.adoc
@@ -13,7 +13,7 @@ Once defined through the API, roles are stored in `security.json`.
 [[Rule-BasedAuthorizationPlugin-EnabletheAuthorizationPlugin]]
 == Enable the Authorization Plugin
 
-The plugin must be enabled in `security.json`. This file and where to put it in your system is described in detail in the section <<authentication-and-authorization-plugins.adoc#AuthenticationandAuthorizationPlugins-EnabledPluginswithsecurity.json,Enable Plugins with security.json>>.
+The plugin must be enabled in `security.json`. This file and where to put it in your system is described in detail in the section <<authentication-and-authorization-plugins.adoc#AuthenticationandAuthorizationPlugins-EnablePluginswithsecurity.json,Enable Plugins with security.json>>.
 
 This file has two parts, the `authentication` part and the `authorization` part. The `authentication` part stores information about the class being used for authentication.
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/10152ba8/solr/solr-ref-guide/src/running-solr-on-hdfs.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/running-solr-on-hdfs.adoc b/solr/solr-ref-guide/src/running-solr-on-hdfs.adoc
index 0e2016a..e6097c5 100644
--- a/solr/solr-ref-guide/src/running-solr-on-hdfs.adoc
+++ b/solr/solr-ref-guide/src/running-solr-on-hdfs.adoc
@@ -18,7 +18,7 @@ To use HDFS rather than a local filesystem, you must be using Hadoop 2.x and you
 [[RunningSolronHDFS-StandaloneSolrInstances]]
 === Standalone Solr Instances
 
-For standalone Solr instances, there are a few parameters you should be sure to modify before starting Solr. These can be set in `solrconfig.xml`(more on that <<RunningSolronHDFS-Settings,below>>), or passed to the `bin/solr` script at startup.
+For standalone Solr instances, there are a few parameters you should be sure to modify before starting Solr. These can be set in `solrconfig.xml`(more on that <<RunningSolronHDFS-HdfsDirectoryFactoryParameters,below>>), or passed to the `bin/solr` script at startup.
 
 * You need to use an `HdfsDirectoryFactory` and a data dir of the form `hdfs://host:port/path`
 * You need to specify an UpdateLog location of the form `hdfs://host:port/path`
@@ -34,7 +34,7 @@ bin/solr start -Dsolr.directoryFactory=HdfsDirectoryFactory
      -Dsolr.updatelog=hdfs://host:port/path
 ----
 
-This example will start Solr in standalone mode, using the defined JVM properties (explained in more detail <<RunningSolronHDFS-Settings,below>>).
+This example will start Solr in standalone mode, using the defined JVM properties (explained in more detail <<RunningSolronHDFS-HdfsDirectoryFactoryParameters,below>>).
 
 [[RunningSolronHDFS-SolrCloudInstances]]
 === SolrCloud Instances

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/10152ba8/solr/solr-ref-guide/src/solr-glossary.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/solr-glossary.adoc b/solr/solr-ref-guide/src/solr-glossary.adoc
index d95ffb9..334dbec 100644
--- a/solr/solr-ref-guide/src/solr-glossary.adoc
+++ b/solr/solr-ref-guide/src/solr-glossary.adoc
@@ -157,7 +157,7 @@ The Apache Solr configuration file. Defines indexing options, RequestHandlers, h
 The ability to suggest alternative spellings of search terms to a user, as a check against spelling errors causing few or zero results.
 
 [[stopwords]]Stopwords::
-Generally, words that have little meaning to a user's search but which may have been entered as part of a <<Naturallanguagequery,natural language>> query. Stopwords are generally very small pronouns, conjunctions and prepositions (such as, "the", "with", or "and")
+Generally, words that have little meaning to a user's search but which may have been entered as part of a <<naturallanguagequery,natural language>> query. Stopwords are generally very small pronouns, conjunctions and prepositions (such as, "the", "with", or "and")
 
 [[suggesterdef]]<<suggester.adoc#suggester,Suggester>>::
 Functionality in Solr that provides the ability to suggest possible query terms to users as they type.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/10152ba8/solr/solr-ref-guide/src/spell-checking.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/spell-checking.adoc b/solr/solr-ref-guide/src/spell-checking.adoc
index 241d8d1..e6f8618 100644
--- a/solr/solr-ref-guide/src/spell-checking.adoc
+++ b/solr/solr-ref-guide/src/spell-checking.adoc
@@ -185,7 +185,7 @@ The SpellCheck component accepts the parameters described in the table below.
 |<<SpellChecking-Thespellcheck.alternativeTermCountParameter,spellcheck.alternativeTermCount>> |The count of suggestions to return for each query term existing in the index and/or dictionary.
 |<<SpellChecking-Thespellcheck.reloadParameter,spellcheck.reload>> |Reloads the spellchecker.
 |<<SpellChecking-Thespellcheck.accuracyParameter,spellcheck.accuracy>> |Specifies an accuracy value to help decide whether a result is worthwhile.
-|<<SpellChecking-Thespellcheck.%3CDICT_NAME%3E.keyParameter,spellcheck.<DICT_NAME>.key>> |Specifies a key/value pair for the implementation handling a given dictionary.
+|<<spellcheck_DICT_NAME,spellcheck.<DICT_NAME>.key>> |Specifies a key/value pair for the implementation handling a given dictionary.
 |===
 
 [[SpellChecking-ThespellcheckParameter]]
@@ -295,7 +295,7 @@ Specifies an accuracy value to be used by the spell checking implementation to d
 
 // OLD_CONFLUENCE_ID: SpellChecking-Thespellcheck.<DICT_NAME>.keyParameter
 
-[[SpellChecking-Thespellcheck._DICT_NAME_.keyParameter]]
+[[spellcheck_DICT_NAME]]
 === The `spellcheck.<DICT_NAME>.key` Parameter
 
 Specifies a key/value pair for the implementation handling a given dictionary. The value that is passed through is just `key=value` (`spellcheck.<DICT_NAME>.` is stripped off.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/10152ba8/solr/solr-ref-guide/src/streaming-expressions.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/streaming-expressions.adoc b/solr/solr-ref-guide/src/streaming-expressions.adoc
index b5b1326..6dbf77f 100644
--- a/solr/solr-ref-guide/src/streaming-expressions.adoc
+++ b/solr/solr-ref-guide/src/streaming-expressions.adoc
@@ -562,7 +562,7 @@ Stream decorators wrap other stream functions or perform operations on the strea
 [[StreamingExpressions-classify]]
 === classify
 
-The `classify` function classifies tuples using a logistic regression text classification model. It was designed specifically to work with models trained using the <<StreamingExpressions-tr,train function>>. The `classify` function uses the <<StreamingExpressions-m,model function>> to retrieve a stored model and then scores a stream of tuples using the model. The tuples read by the classifier must contain a text field that can be used for classification. The classify function uses a Lucene analyzer to extract the features from the text so the model can be applied. By default the `classify` function looks for the analyzer using the name of text field in the tuple. If the Solr schema on the worker node does not contain this field, the analyzer can be looked up in another field by specifying the `analyzerField` parameter.
+The `classify` function classifies tuples using a logistic regression text classification model. It was designed specifically to work with models trained using the <<StreamingExpressions-train,train function>>. The `classify` function uses the <<StreamingExpressions-model,model function>> to retrieve a stored model and then scores a stream of tuples using the model. The tuples read by the classifier must contain a text field that can be used for classification. The classify function uses a Lucene analyzer to extract the features from the text so the model can be applied. By default the `classify` function looks for the analyzer using the name of text field in the tuple. If the Solr schema on the worker node does not contain this field, the analyzer can be looked up in another field by specifying the `analyzerField` parameter.
 
 Each tuple that is classified is assigned two scores:
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/10152ba8/solr/solr-ref-guide/src/upgrading-solr.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/upgrading-solr.adoc b/solr/solr-ref-guide/src/upgrading-solr.adoc
index d4bb87f..6ec78f1 100644
--- a/solr/solr-ref-guide/src/upgrading-solr.adoc
+++ b/solr/solr-ref-guide/src/upgrading-solr.adoc
@@ -13,7 +13,7 @@ If you are already using Solr 6.5, Solr 6.6 should not present any major problem
 == Upgrading from earlier 6.x versions
 
 * If you use historical dates, specifically on or before the year 1582, you should re-index after upgrading to this version.
-* If you use the <<faceted-search.adoc#faceted-search,JSON Facet API>> (json.facet) with `method=stream`, you must now set `sort='index asc'` to get the streaming behavior; otherwise it won't stream. Reminder: "method" is a hint that doesn't change defaults of other parameters.
+* If you use the JSON Facet API (json.facet) with `method=stream`, you must now set `sort='index asc'` to get the streaming behavior; otherwise it won't stream. Reminder: "method" is a hint that doesn't change defaults of other parameters.
 * If you use the JSON Facet API (json.facet) to facet on a numeric field and if you use `mincount=0` or if you set the prefix, then you will now get an error as these options are incompatible with numeric faceting.
 * Solr's logging verbosity at the INFO level has been greatly reduced, and you may need to update the log configs to use the DEBUG level to see all the logging messages you used to see at INFO level before.
 * We are no longer backing up `solr.log` and `solr_gc.log` files in date-stamped copies forever. If you relied on the `solr_log_<date>` or `solr_gc_log_<date>` being in the logs folder that will no longer be the case. See the section <<configuring-logging.adoc#configuring-logging,Configuring Logging>> for details on how log rotation works as of Solr 6.3.