You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by da...@apache.org on 2018/09/14 03:30:18 UTC

[01/43] lucene-solr:jira/http2: [LUCENE-8343] introduced weight 0 check and positional coefficient scaling + tests

Repository: lucene-solr
Updated Branches:
  refs/heads/jira/http2 802f36786 -> 9c65fe4fe


[LUCENE-8343] introduced weight 0 check and positional coefficient scaling + tests


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e83e8ee1
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e83e8ee1
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e83e8ee1

Branch: refs/heads/jira/http2
Commit: e83e8ee1a42388606fffd10330ed1aeec9518098
Parents: 1d33130
Author: Alessandro Benedetti <a....@sease.io>
Authored: Fri Jun 1 12:52:41 2018 +0100
Committer: Alessandro Benedetti <a....@sease.io>
Committed: Fri Jun 1 12:52:41 2018 +0100

----------------------------------------------------------------------
 .../analyzing/BlendedInfixSuggester.java        |  7 +-
 .../analyzing/BlendedInfixSuggesterTest.java    | 79 +++++++++++++-------
 2 files changed, 58 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e83e8ee1/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
----------------------------------------------------------------------
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
index 413d401..dc65f7a 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
@@ -224,8 +224,11 @@ public class BlendedInfixSuggester extends AnalyzingInfixSuggester {
       } else {
         coefficient = createCoefficient(searcher, fd.doc, matchedTokens, prefixToken);
       }
-
-      long score = (long) (weight * coefficient);
+      if (weight == 0) {
+        weight = 1;
+      }
+      long scaledCoefficient = (long) (coefficient * 10);
+      long score = weight * scaledCoefficient;
 
       LookupResult result;
       if (doHighlight) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e83e8ee1/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java
----------------------------------------------------------------------
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java
index ace4467..1e5a5da 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java
@@ -44,22 +44,44 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase {
    * of the matching term.
    */
   public void testBlendedSort() throws IOException {
-
     BytesRef payload = new BytesRef("star");
-
     Input keys[] = new Input[]{
         new Input("star wars: episode v - the empire strikes back", 8, payload)
     };
+    BlendedInfixSuggester suggester = getBlendedInfixSuggester(keys);
 
-    Path tempDir = createTempDir("BlendedInfixSuggesterTest");
+    assertSuggestionsRanking(payload, suggester);
+  }
 
-    Analyzer a = new StandardAnalyzer(CharArraySet.EMPTY_SET);
-    BlendedInfixSuggester suggester = new BlendedInfixSuggester(newFSDirectory(tempDir), a, a,
-                                                                AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS,
-                                                                BlendedInfixSuggester.BlenderType.POSITION_LINEAR,
-                                                                BlendedInfixSuggester.DEFAULT_NUM_FACTOR, false);
-    suggester.build(new InputArrayIterator(keys));
+  /**
+   * Test to validate the suggestions ranking according to the position coefficient,
+   * even if the weight associated to the suggestion is unitary.
+   */
+  public void testBlendedSort_fieldWeightUnitary_shouldRankSuggestionsByPositionMatch() throws IOException {
+    BytesRef payload = new BytesRef("star");
+    Input keys[] = new Input[]{
+        new Input("star wars: episode v - the empire strikes back", 1, payload)
+    };
+    BlendedInfixSuggester suggester = getBlendedInfixSuggester(keys);
+
+    assertSuggestionsRanking(payload, suggester);
+  }
 
+  /**
+   * Test to validate the suggestions ranking according to the position coefficient,
+   * even if the weight associated to the suggestion is zero.
+   */
+  public void testBlendedSort_fieldWeightZero_shouldRankSuggestionsByPositionMatch() throws IOException {
+    BytesRef payload = new BytesRef("star");
+    Input keys[] = new Input[]{
+        new Input("star wars: episode v - the empire strikes back", 0, payload)
+    };
+    BlendedInfixSuggester suggester = getBlendedInfixSuggester(keys);
+
+    assertSuggestionsRanking(payload, suggester);
+  }
+
+  private void assertSuggestionsRanking(BytesRef payload, BlendedInfixSuggester suggester) throws IOException {
     // we query for star wars and check that the weight
     // is smaller when we search for tokens that are far from the beginning
 
@@ -78,6 +100,18 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase {
     suggester.close();
   }
 
+  private BlendedInfixSuggester getBlendedInfixSuggester(Input[] keys) throws IOException {
+    Path tempDir = createTempDir("BlendedInfixSuggesterTest");
+
+    Analyzer a = new StandardAnalyzer(CharArraySet.EMPTY_SET);
+    BlendedInfixSuggester suggester = new BlendedInfixSuggester(newFSDirectory(tempDir), a, a,
+        AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS,
+        BlendedInfixSuggester.BlenderType.POSITION_LINEAR,
+        BlendedInfixSuggester.DEFAULT_NUM_FACTOR, false);
+    suggester.build(new InputArrayIterator(keys));
+    return suggester;
+  }
+
   /**
    * Verify the different flavours of the blender types
    */
@@ -97,9 +131,9 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase {
     BlendedInfixSuggester suggester = new BlendedInfixSuggester(newFSDirectory(tempDir), a);
     suggester.build(new InputArrayIterator(keys));
 
-    assertEquals(w, getInResults(suggester, "top", pl, 1));
-    assertEquals((int) (w * (1 - 0.10 * 2)), getInResults(suggester, "the", pl, 1));
-    assertEquals((int) (w * (1 - 0.10 * 3)), getInResults(suggester, "lake", pl, 1));
+    assertEquals(10 * w, getInResults(suggester, "top", pl, 1));
+    assertEquals(w * (long) (10 * (1 - 0.10 * 2)), getInResults(suggester, "the", pl, 1));
+    assertEquals(w * (long) (10 * (1 - 0.10 * 3)), getInResults(suggester, "lake", pl, 1));
 
     suggester.close();
 
@@ -109,9 +143,9 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase {
                                           BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL, 1, false);
     suggester.build(new InputArrayIterator(keys));
 
-    assertEquals(w, getInResults(suggester, "top", pl, 1));
-    assertEquals((int) (w * 1 / (1 + 2)), getInResults(suggester, "the", pl, 1));
-    assertEquals((int) (w * 1 / (1 + 3)), getInResults(suggester, "lake", pl, 1));
+    assertEquals(10 * w, getInResults(suggester, "top", pl, 1));
+    assertEquals(w * (long) (10 * 1 / (1 + 2)), getInResults(suggester, "the", pl, 1));
+    assertEquals(w * (long) (10 * 1 / (1 + 3)), getInResults(suggester, "lake", pl, 1));
     suggester.close();
 
     // BlenderType.EXPONENTIAL_RECIPROCAL is using 1/(pow(1+p, exponent)) * w where w is weight and p the position of the word
@@ -121,9 +155,9 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase {
 
     suggester.build(new InputArrayIterator(keys));
 
-    assertEquals(w, getInResults(suggester, "top", pl, 1));
-    assertEquals((int) (w * 1 / (Math.pow(1 + 2, 4.0))), getInResults(suggester, "the", pl, 1));
-    assertEquals((int) (w * 1 / (Math.pow(1 + 3, 4.0))), getInResults(suggester, "lake", pl, 1));
+    assertEquals(10 * w, getInResults(suggester, "top", pl, 1));
+    assertEquals(w * (long) (10 * 1 / (Math.pow(1 + 2, 4.0))), getInResults(suggester, "the", pl, 1));
+    assertEquals(w * (long) (10 * 1 / (Math.pow(1 + 3, 4.0))), getInResults(suggester, "lake", pl, 1));
 
     suggester.close();
   }
@@ -195,14 +229,7 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase {
         new Input("top of the lake", 8, payload)
     };
 
-    Path tempDir = createTempDir("BlendedInfixSuggesterTest");
-
-    Analyzer a = new StandardAnalyzer(CharArraySet.EMPTY_SET);
-    BlendedInfixSuggester suggester = new BlendedInfixSuggester(newFSDirectory(tempDir), a, a,
-                                                                AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS,
-                                                                BlendedInfixSuggester.BlenderType.POSITION_LINEAR,
-                                                                BlendedInfixSuggester.DEFAULT_NUM_FACTOR, false);
-    suggester.build(new InputArrayIterator(keys));
+    BlendedInfixSuggester suggester = getBlendedInfixSuggester(keys);
 
     getInResults(suggester, "of ", payload, 1);
     getInResults(suggester, "the ", payload, 1);


[23/43] lucene-solr:jira/http2: SOLR-6387: Remove/revert dated assertFalse in ExtractingRequestHandlerTest Since this problem does not appear anymore in the latest JVMs.

Posted by da...@apache.org.
SOLR-6387: Remove/revert dated assertFalse in ExtractingRequestHandlerTest
Since this problem does not appear anymore in the latest JVMs.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a537aa20
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a537aa20
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a537aa20

Branch: refs/heads/jira/http2
Commit: a537aa20c104234e19407087202bec9e6c5c1a21
Parents: 3a71bf3
Author: David Smiley <ds...@apache.org>
Authored: Mon Sep 10 11:33:15 2018 -0400
Committer: David Smiley <ds...@apache.org>
Committed: Mon Sep 10 11:33:15 2018 -0400

----------------------------------------------------------------------
 .../solr/handler/extraction/ExtractingRequestHandlerTest.java     | 3 ---
 1 file changed, 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a537aa20/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java
----------------------------------------------------------------------
diff --git a/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java b/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java
index dc84b51..3241210 100644
--- a/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java
+++ b/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java
@@ -17,7 +17,6 @@
 package org.apache.solr.handler.extraction;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.Locale;
 
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.common.SolrException;
@@ -42,8 +41,6 @@ public class ExtractingRequestHandlerTest extends SolrTestCaseJ4 {
 
   @BeforeClass
   public static void beforeClass() throws Exception {
-    assumeFalse("This test fails on UNIX with Turkish default locale (https://issues.apache.org/jira/browse/SOLR-6387)",
-        new Locale("tr").getLanguage().equals(Locale.getDefault().getLanguage()));
     initCore("solrconfig.xml", "schema.xml", getFile("extraction/solr").getAbsolutePath());
   }
 


[33/43] lucene-solr:jira/http2: SOLR-11836: add all contributors for the patch

Posted by da...@apache.org.
SOLR-11836: add all contributors for the patch


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/2b553f03
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/2b553f03
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/2b553f03

Branch: refs/heads/jira/http2
Commit: 2b553f03bec74f65604334b5047a9ce68dd5d344
Parents: d614386
Author: Varun Thacker <va...@apache.org>
Authored: Tue Sep 11 12:37:35 2018 -0700
Committer: Varun Thacker <va...@apache.org>
Committed: Tue Sep 11 12:37:50 2018 -0700

----------------------------------------------------------------------
 solr/CHANGES.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2b553f03/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index c3ab65b..b0da693 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -340,7 +340,7 @@ Bug Fixes
 * SOLR-12733: SolrMetricReporterTest failure (Erick Erickson, David Smiley)
 
 * SOLR-11836: FacetStream works with bucketSizeLimit of -1 which will fetch all the buckets.
-  (Alfonso Muñoz-Pomer Fuentes via Varun Thacker)
+  (Alfonso Muñoz-Pomer Fuentes, Amrit Sarkar via Varun Thacker)
 
 Optimizations
 ----------------------


[34/43] lucene-solr:jira/http2: LUCENE-5143: Fix smoketester, fix RM PGP key check, fix solr DOAP file, add CHANGES entry Remove unused/stale 'copy-to-stage' and '-dist-keys' targets from ant build

Posted by da...@apache.org.
LUCENE-5143: Fix smoketester, fix RM PGP key check, fix solr DOAP file, add CHANGES entry
Remove unused/stale 'copy-to-stage' and '-dist-keys' targets from ant build


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/5b96f89d
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/5b96f89d
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/5b96f89d

Branch: refs/heads/jira/http2
Commit: 5b96f89d2b038bff2ed3351887a87108f7cc6ea3
Parents: 2b553f0
Author: Jan Høydahl <ja...@apache.org>
Authored: Tue Sep 11 22:27:14 2018 +0200
Committer: Jan Høydahl <ja...@apache.org>
Committed: Tue Sep 11 22:39:19 2018 +0200

----------------------------------------------------------------------
 dev-tools/doap/solr.rdf                  |  2 +-
 dev-tools/scripts/buildAndPushRelease.py | 35 ++++++++++++++------------
 dev-tools/scripts/smokeTestRelease.py    | 36 ++++++++++++++++-----------
 lucene/CHANGES.txt                       |  6 +++++
 lucene/build.xml                         | 15 +----------
 lucene/common-build.xml                  | 26 -------------------
 solr/build.xml                           | 12 +--------
 7 files changed, 49 insertions(+), 83 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/5b96f89d/dev-tools/doap/solr.rdf
----------------------------------------------------------------------
diff --git a/dev-tools/doap/solr.rdf b/dev-tools/doap/solr.rdf
index 108eee3..c739fb7 100644
--- a/dev-tools/doap/solr.rdf
+++ b/dev-tools/doap/solr.rdf
@@ -71,7 +71,7 @@
       <Version>
         <name>solr-7.4.0</name>
         <created>2018-06-27</created>
-        <revision>7.4.1</revision>
+        <revision>7.4.0</revision>
       </Version>
     </release>
     <release>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/5b96f89d/dev-tools/scripts/buildAndPushRelease.py
----------------------------------------------------------------------
diff --git a/dev-tools/scripts/buildAndPushRelease.py b/dev-tools/scripts/buildAndPushRelease.py
index 3694f1a..5a8f5cc 100644
--- a/dev-tools/scripts/buildAndPushRelease.py
+++ b/dev-tools/scripts/buildAndPushRelease.py
@@ -78,9 +78,12 @@ def getGitRev():
     raise RuntimeError('git clone is dirty:\n\n%s' % status)
   branch = os.popen('git rev-parse --abbrev-ref HEAD').read().strip()
   command = 'git log origin/%s..' % branch
-  unpushedCommits = os.popen(command).read().strip()
-  if len(unpushedCommits) > 0:
-    raise RuntimeError('There are unpushed commits - "%s" output is:\n\n%s' % (command, unpushedCommits))
+  p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+  stdout, stderr = p.communicate()
+  if len(stdout.strip()) > 0:
+    raise RuntimeError('There are unpushed commits - "%s" output is:\n\n%s' % (command, stdout.decode('utf-8')))
+  if len(stderr.strip()) > 0:
+    raise RuntimeError('Command "%s" failed:\n\n%s' % (command, stderr.decode('utf-8')))
 
   print('  git clone is clean')
   return os.popen('git rev-parse HEAD').read().strip()
@@ -271,14 +274,6 @@ def parse_config():
   config.version = read_version(config.root)
   print('Building version: %s' % config.version)
 
-  if config.sign:
-    sys.stdout.flush()
-    import getpass
-    config.key_id = config.sign
-    config.key_password = getpass.getpass('Enter GPG keystore password: ')
-  else:
-    config.gpg_password = None
-
   return config
 
 def check_cmdline_tools():  # Fail fast if there are cmdline tool problems
@@ -313,15 +308,15 @@ def check_key_in_keys(gpgKeyID, local_keys):
     if len(gpgKeyID) > 40:
       gpgKeyID = gpgKeyID.replace(" ", "")
     if len(gpgKeyID) == 8:
-      re_to_match = r"^pub\s+\d+[DR]/%s " % gpgKeyID
+      gpgKeyID8Char = "%s %s" % (gpgKeyID[0:4], gpgKeyID[4:8])
+      re_to_match = r"^pub .*\n\s+\w{4} \w{4} \w{4} \w{4} \w{4}  \w{4} \w{4} \w{4} %s" % gpgKeyID8Char
     elif len(gpgKeyID) == 40:
       gpgKeyID40Char = "%s %s %s %s %s  %s %s %s %s %s" % \
                        (gpgKeyID[0:4], gpgKeyID[4:8], gpgKeyID[8:12], gpgKeyID[12:16], gpgKeyID[16:20],
                        gpgKeyID[20:24], gpgKeyID[24:28], gpgKeyID[28:32], gpgKeyID[32:36], gpgKeyID[36:])
-      print("Generated id string %s" % gpgKeyID40Char)
-      re_to_match = r"^\s+Key fingerprint = %s$" % gpgKeyID40Char
+      re_to_match = r"^pub .*\n\s+%s" % gpgKeyID40Char
     else:
-      print('Invalid gpg key id format. Must be 8 byte short ID or 40 byte fingerprint, with or without 0x prefix.')
+      print('Invalid gpg key id format. Must be 8 byte short ID or 40 byte fingerprint, with or without 0x prefix, no spaces.')
       exit(2)
     if re.search(re_to_match, keysFileText, re.MULTILINE):
       print('    Found key %s in KEYS file at %s' % (gpgKeyID, keysFileLocation))
@@ -337,7 +332,15 @@ def main():
 
   c = parse_config()
 
-  check_key_in_keys(c.key_id, c.local_keys)
+  if c.sign:
+    sys.stdout.flush()
+    c.key_id = c.sign
+    check_key_in_keys(c.key_id, c.local_keys)
+    import getpass
+    c.key_password = getpass.getpass('Enter GPG keystore password: ')
+  else:
+    c.key_id = None
+    c.key_password = None
   
   if c.prepare:
     rev = prepare(c.root, c.version, c.key_id, c.key_password)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/5b96f89d/dev-tools/scripts/smokeTestRelease.py
----------------------------------------------------------------------
diff --git a/dev-tools/scripts/smokeTestRelease.py b/dev-tools/scripts/smokeTestRelease.py
index f68a9b1..af26ae1 100644
--- a/dev-tools/scripts/smokeTestRelease.py
+++ b/dev-tools/scripts/smokeTestRelease.py
@@ -285,23 +285,22 @@ def checkAllJARs(topDir, project, gitRevision, version, tmpDir, baseURL):
                                % (fullPath, luceneDistFilenames[jarFilename]))
 
 
-def checkSigs(project, urlString, version, tmpDir, isSigned):
+def checkSigs(project, urlString, version, tmpDir, isSigned, local_keys):
 
   print('  test basics...')
   ents = getDirEntries(urlString)
   artifact = None
-  keysURL = None
   changesURL = None
   mavenURL = None
   expectedSigs = []
   if isSigned:
     expectedSigs.append('asc')
   expectedSigs.extend(['sha1', 'sha512'])
-  
+
   artifacts = []
   for text, subURL in ents:
     if text == 'KEYS':
-      keysURL = subURL
+      raise RuntimeError('%s: release dir should not contain a KEYS file - only toplevel /dist/lucene/KEYS is used' % project)
     elif text == 'maven/':
       mavenURL = subURL
     elif text.startswith('changes'):
@@ -346,14 +345,16 @@ def checkSigs(project, urlString, version, tmpDir, isSigned):
   if expected != actual:
     raise RuntimeError('%s: wrong artifacts: expected %s but got %s' % (project, expected, actual))
                 
-  if keysURL is None:
-    raise RuntimeError('%s is missing KEYS' % project)
-
   print('  get KEYS')
-  download('%s.KEYS' % project, keysURL, tmpDir)
-
-  keysFile = '%s/%s.KEYS' % (tmpDir, project)
-
+  if local_keys is not None:
+    print("    Using local KEYS file %s" % local_keys)
+    keysFile = local_keys
+  else:
+    keysFileURL = "https://archive.apache.org/dist/lucene/KEYS"
+    print("    Downloading online KEYS file %s" % keysFileURL)
+    download('KEYS', keysFileURL, tmpDir)
+    keysFile = '%s/KEYS' % (tmpDir)
+  
   # Set up clean gpg world; import keys file:
   gpgHomeDir = '%s/%s.gpg' % (tmpDir, project)
   if os.path.exists(gpgHomeDir):
@@ -1291,6 +1292,8 @@ def parse_config():
                       help='Temporary directory to test inside, defaults to /tmp/smoke_lucene_$version_$revision')
   parser.add_argument('--not-signed', dest='is_signed', action='store_false', default=True,
                       help='Indicates the release is not signed')
+  parser.add_argument('--local-keys', metavar='PATH',
+                      help='Uses local KEYS file instead of fetching from https://archive.apache.org/dist/lucene/KEYS')
   parser.add_argument('--revision',
                       help='GIT revision number that release was built with, defaults to that in URL')
   parser.add_argument('--version', metavar='X.Y.Z(-ALPHA|-BETA)?',
@@ -1318,6 +1321,9 @@ def parse_config():
     c.revision = revision_match.group(1)
     print('Revision: %s' % c.revision)
 
+  if c.local_keys is not None and not os.path.exists(c.local_keys):
+    parser.error('Local KEYS file "%s" not found' % c.local_keys)
+
   c.java = make_java_config(parser, c.test_java9)
 
   if c.tmp_dir:
@@ -1462,9 +1468,9 @@ def main():
     raise RuntimeError('smokeTestRelease.py for %s.X is incompatible with a %s release.' % (scriptVersion, c.version))
 
   print('NOTE: output encoding is %s' % sys.stdout.encoding)
-  smokeTest(c.java, c.url, c.revision, c.version, c.tmp_dir, c.is_signed, ' '.join(c.test_args))
+  smokeTest(c.java, c.url, c.revision, c.version, c.tmp_dir, c.is_signed, c.local_keys, ' '.join(c.test_args))
 
-def smokeTest(java, baseURL, gitRevision, version, tmpDir, isSigned, testArgs):
+def smokeTest(java, baseURL, gitRevision, version, tmpDir, isSigned, local_keys, testArgs):
 
   startTime = datetime.datetime.now()
 
@@ -1500,14 +1506,14 @@ def smokeTest(java, baseURL, gitRevision, version, tmpDir, isSigned, testArgs):
 
   print()
   print('Test Lucene...')
-  checkSigs('lucene', lucenePath, version, tmpDir, isSigned)
+  checkSigs('lucene', lucenePath, version, tmpDir, isSigned, local_keys)
   for artifact in ('lucene-%s.tgz' % version, 'lucene-%s.zip' % version):
     unpackAndVerify(java, 'lucene', tmpDir, artifact, gitRevision, version, testArgs, baseURL)
   unpackAndVerify(java, 'lucene', tmpDir, 'lucene-%s-src.tgz' % version, gitRevision, version, testArgs, baseURL)
 
   print()
   print('Test Solr...')
-  checkSigs('solr', solrPath, version, tmpDir, isSigned)
+  checkSigs('solr', solrPath, version, tmpDir, isSigned, local_keys)
   for artifact in ('solr-%s.tgz' % version, 'solr-%s.zip' % version):
     unpackAndVerify(java, 'solr', tmpDir, artifact, gitRevision, version, testArgs, baseURL)
   solrSrcUnpackPath = unpackAndVerify(java, 'solr', tmpDir, 'solr-%s-src.tgz' % version,

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/5b96f89d/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 3022a9a..045a1fa 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -313,6 +313,12 @@ Improvements
   number of dimensions is bigger than 1. It improves performance when there is
   correlation between the dimensions, for example ranges. (Ignacio Vera, Adrien Grand)
 
+Build
+
+* LUCENE-5143: Stop publishing KEYS file with each version, use topmost lucene/KEYS file only. 
+  The buildAndPushRelease.py script validates that RM's PGP key is in the KEYS file.
+  Remove unused 'copy-to-stage' and '-dist-keys' targets from ant build. (janhoy)
+
 Other:
 
 * LUCENE-8485: Update randomizedtesting to version 2.6.4. (Dawid Weiss)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/5b96f89d/lucene/build.xml
----------------------------------------------------------------------
diff --git a/lucene/build.xml b/lucene/build.xml
index b6131af..3c1439c 100644
--- a/lucene/build.xml
+++ b/lucene/build.xml
@@ -387,7 +387,7 @@
   <!-- ================================================================== -->
   <target name="dist-src" depends="package-tgz-src"/>
 
-  <target name="dist-all" depends="dist, dist-src, -dist-changes, -dist-keys"/>
+  <target name="dist-all" depends="dist, dist-src, -dist-changes"/>
 
   <!-- copy changes/ to the release folder -->
   <target name="-dist-changes">
@@ -396,21 +396,8 @@
    </copy>
   </target>
 
-  <!-- copy KEYS to the release folder -->
-  <target name="-dist-keys">
-   <get src="http://home.apache.org/keys/group/lucene.asc" 
-        dest="${dist.dir}/KEYS"/>
-  </target>
-
-  <target name="copy-to-stage">
-    <copy-to-stage-macro artifacts.dir="${dist.dir}"/>
-  </target>
-
   <target name="prepare-release-no-sign" depends="clean, dist-all, generate-maven-artifacts"/>
   <target name="prepare-release" depends="prepare-release-no-sign, sign-artifacts"/>
-  <target name="stage" depends="prepare-release, copy-to-stage">
-
-  </target>
 
   <target name="-dist-maven" depends="install-maven-tasks">
     <sequential>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/5b96f89d/lucene/common-build.xml
----------------------------------------------------------------------
diff --git a/lucene/common-build.xml b/lucene/common-build.xml
index 68e17da..9fbddc3 100644
--- a/lucene/common-build.xml
+++ b/lucene/common-build.xml
@@ -2380,32 +2380,6 @@ ${ant.project.name}.test.dependencies=${test.classpath.list}
     </sequential>
   </macrodef>
 
-  <property name="rc" value="rc0"/>
-  <property name="remote.staging.dir" value="public_html/staging_area/${rc}/${version}"/>
-  <property name="keyfile" value="${user.home}/.ssh/id_rsa"/>
-  <property name="scp.user" value="${user.name}"/>
-  <!--keys.dir is the location of the https://svn.apache.org/repos/asf/lucene/java/dist/ directory-->
-  <property name="keys.dir" value="${common.dir}/../../dist"/>
-  <macrodef name="copy-to-stage-macro">
-    <attribute name="artifacts.dir"/>
-    <sequential>
-      <sshexec host="home.apache.org"
-               username="${scp.user}"
-               keyfile="${keyfile}"
-               command="mkdir -p ${remote.staging.dir}"/>
-      <echo>Uploading artifacts to ${scp.user}@home.apache.org:${remote.staging.dir}</echo>
-      <scp todir="${scp.user}@home.apache.org:${remote.staging.dir}"
-           username="${scp.user}"
-           keyfile="${keyfile}"
-           verbose="true">
-        <fileset dir="${artifacts.dir}"/>
-        <fileset dir="${keys.dir}">
-          <include name="KEYS"/>
-        </fileset>
-      </scp>
-    </sequential>
-  </macrodef>
-
   <!-- JFlex task -->
   <target name="-install-jflex" unless="jflex.loaded" depends="ivy-availability-check,ivy-configure">
     <ivy:cachepath organisation="de.jflex" module="jflex" revision="1.6.0"

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/5b96f89d/solr/build.xml
----------------------------------------------------------------------
diff --git a/solr/build.xml b/solr/build.xml
index cb4ed75..7ed40bb 100644
--- a/solr/build.xml
+++ b/solr/build.xml
@@ -443,10 +443,6 @@
   <!-- ===================== DISTRIBUTION-RELATED TASKS ======================== -->
   <!-- ========================================================================= -->
  
-  <target name="copy-to-stage">
-    <copy-to-stage-macro artifacts.dir="${package.dir}"/>
-  </target>
-  
   <target name="dist"
           description="Creates the Solr distribution files."
           depends="dist-solrj, dist-core, dist-test-framework, dist-contrib" />
@@ -467,7 +463,7 @@
   <target name="prepare-release" depends="prepare-release-no-sign, sign-artifacts"/>
  
   <!-- make a distribution -->
-  <target name="package" depends="package-src-tgz,create-package,documentation,-dist-changes,-dist-keys"/>
+  <target name="package" depends="package-src-tgz,create-package,documentation,-dist-changes"/>
 
   <!-- copy changes/ to the release folder -->
   <target name="-dist-changes">
@@ -476,12 +472,6 @@
    </copy>
   </target>
 
-  <!-- copy KEYS to the release folder -->
-  <target name="-dist-keys">
-   <get src="http://home.apache.org/keys/group/lucene.asc" 
-        dest="${package.dir}/KEYS"/>
-  </target>
- 
   <!-- Makes a tarball of the source.    -->
   <!-- Copies NOTICE.txt and LICENSE.txt from solr/ to the root level. -->
   <target name="package-src-tgz" depends="init-dist"


[13/43] lucene-solr:jira/http2: SOLR-12055: Enable async logging by default - rollback

Posted by da...@apache.org.
SOLR-12055: Enable async logging by default - rollback


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/3b62f23f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/3b62f23f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/3b62f23f

Branch: refs/heads/jira/http2
Commit: 3b62f23f72ed826d363b81826be9caf0a2edbd1b
Parents: 8f49892
Author: Erick Erickson <Er...@gmail.com>
Authored: Fri Sep 7 22:51:50 2018 -0700
Committer: Erick Erickson <Er...@gmail.com>
Committed: Fri Sep 7 22:51:50 2018 -0700

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  9 --
 solr/core/src/test-files/log4j2.xml             | 12 +--
 .../apache/solr/handler/RequestLoggingTest.java | 27 ++----
 .../org/apache/solr/logging/TestLogWatcher.java | 91 +++++++++-----------
 solr/server/resources/log4j2-console.xml        |  8 +-
 solr/server/resources/log4j2.xml                | 30 +++----
 6 files changed, 74 insertions(+), 103 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3b62f23f/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 7d30c0c..797acfc 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -100,11 +100,6 @@ Upgrade Notes
   WINDOWS USERS: This JIRA corrects a bug in the start scripts that put example logs under ...\server, solr.log is
   now under ...\example. (Erick Erickson)
 
-* SOLR-12055: Enable async logging by default. This change improves throughput for logging. This opens
-  up a small window where log messages could possibly be lost. If this is unacceptable, switching back to synchronous
-  logging can be done by changing the log4j2.xml files, no internal Solr code changed to make async logging the default.
-  (Erick Erickson)
-
 
 New Features
 ----------------------
@@ -322,12 +317,8 @@ Bug Fixes
 * SOLR-12704: Guard AddSchemaFieldsUpdateProcessorFactory against null field names and field values.
   (Steve Rowe, Varun Thacker)
 
-* SOLR-12728: RequestLoggingTest fails on occasion, not reproducible (Erick Erickson)
-
 * SOLR-12733: SolrMetricReporterTest failure (Erick Erickson, David Smiley)
 
-* SOLR-12732: TestLogWatcher failure on Jenkins (Erick Erickson)
-
 Optimizations
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3b62f23f/solr/core/src/test-files/log4j2.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/log4j2.xml b/solr/core/src/test-files/log4j2.xml
index 5447843..7d0ebf7 100644
--- a/solr/core/src/test-files/log4j2.xml
+++ b/solr/core/src/test-files/log4j2.xml
@@ -27,13 +27,13 @@
     </Console>
   </Appenders>
   <Loggers>
-    <AsyncLogger name="org.apache.zookeeper" level="WARN"/>
-    <AsyncLogger name="org.apache.hadoop" level="WARN"/>
-    <AsyncLogger name="org.apache.directory" level="WARN"/>
-    <AsyncLogger name="org.apache.solr.hadoop" level="INFO"/>
+    <Logger name="org.apache.zookeeper" level="WARN"/>
+    <Logger name="org.apache.hadoop" level="WARN"/>
+    <Logger name="org.apache.directory" level="WARN"/>
+    <Logger name="org.apache.solr.hadoop" level="INFO"/>
 
-    <AsyncRoot level="INFO">
+    <Root level="INFO">
       <AppenderRef ref="STDERR"/>
-    </AsyncRoot>
+    </Root>
   </Loggers>
 </Configuration>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3b62f23f/solr/core/src/test/org/apache/solr/handler/RequestLoggingTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/RequestLoggingTest.java b/solr/core/src/test/org/apache/solr/handler/RequestLoggingTest.java
index ae08e9a..e203e80 100644
--- a/solr/core/src/test/org/apache/solr/handler/RequestLoggingTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/RequestLoggingTest.java
@@ -48,15 +48,17 @@ public class RequestLoggingTest extends SolrTestCaseJ4 {
   @Before
   public void setupAppender() {
     LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
+    LoggerConfig config = ctx.getConfiguration().getLoggerConfig("RequestLoggingTest");
 
     writer = new StringWriter();
     appender = WriterAppender.createAppender(
       PatternLayout
         .newBuilder()
         .withPattern("%-5p [%t]: %m%n")
-        .build(), 
+        .build(),
         null, writer, "RequestLoggingTest", false, true);
     appender.start();
+
   }
 
   @Test
@@ -73,7 +75,7 @@ public class RequestLoggingTest extends SolrTestCaseJ4 {
 
   public void testLogBeforeExecute(Logger logger) {
     Level level = logger.getLevel();
-    
+
     LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
     LoggerConfig config = ctx.getConfiguration().getLoggerConfig(logger.getName());
     config.setLevel(Level.DEBUG);
@@ -82,23 +84,10 @@ public class RequestLoggingTest extends SolrTestCaseJ4 {
 
     try {
       assertQ(req("q", "*:*"));
-      Matcher matcher = null;
-      boolean foundDebugMsg = false;
-      String output = "";
-      for (int msgIdx = 0; msgIdx < 100; ++msgIdx) {
-        output = writer.toString();
-        matcher = Pattern.compile("DEBUG.*q=\\*:\\*.*").matcher(output);
-        if (matcher.find()) {
-          foundDebugMsg = true;
-          break;
-        }
-        try {
-          Thread.sleep(10);
-        } catch (InterruptedException ie) {
-          ;
-        }
-      }
-      assertTrue("Should have found debug-level message. Found " + output, foundDebugMsg);
+
+      String output = writer.toString();
+      Matcher matcher = Pattern.compile("DEBUG.*q=\\*:\\*.*").matcher(output);
+      assertTrue(matcher.find());
       final String group = matcher.group();
       final String msg = "Should not have post query information";
       assertFalse(msg, group.contains("hits"));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3b62f23f/solr/core/src/test/org/apache/solr/logging/TestLogWatcher.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/logging/TestLogWatcher.java b/solr/core/src/test/org/apache/solr/logging/TestLogWatcher.java
index a93a11b..6f7987f 100644
--- a/solr/core/src/test/org/apache/solr/logging/TestLogWatcher.java
+++ b/solr/core/src/test/org/apache/solr/logging/TestLogWatcher.java
@@ -16,7 +16,6 @@
  */
 package org.apache.solr.logging;
 
-import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrDocumentList;
 import org.junit.Before;
@@ -27,63 +26,55 @@ import org.slf4j.LoggerFactory;
 import java.lang.invoke.MethodHandles;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-public class TestLogWatcher extends SolrTestCaseJ4 {
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class TestLogWatcher {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
   private LogWatcherConfig config;
 
   @Before
-  public void before() {
-    config = new LogWatcherConfig(true, null, "INFO", 1);
+  public void setUp() {
+    config = new LogWatcherConfig(true, null, null, 50);
   }
 
-  // Create several log watchers and ensure that new messages go to the new watcher.
   @Test
   public void testLog4jWatcher() {
-    LogWatcher watcher;
-    int lim = random().nextInt(3) + 2;
-    for (int idx = 0; idx < lim; ++idx) {
-      String msg = "This is a test message: " + idx;
-      watcher = LogWatcher.newRegisteredLogWatcher(config, null);
-
-      // First ensure there's nothing in the new watcher.
-
-      // Every time you put a message in the queue, you wait for it to come out _before_ creating
-      // a new watcher so it should be fine.
-      if (looper(watcher, null) == false) {
-        fail("There should be no messages when a new watcher finally gets registered! In loop: " + idx);
-      }
-
-      // Now log a message and ensure that the new watcher sees it.
-      log.warn(msg);
-
-      // Loop to give the logger time to process the async message and notify the new watcher.
-      if (looper(watcher, msg) == false) {
-        fail("Should have found message " + msg + ". In loop: " + idx);
-      }
-    }
+    LogWatcher watcher = LogWatcher.newRegisteredLogWatcher(config, null);
+
+    assertEquals(watcher.getLastEvent(), -1);
+
+    log.warn("This is a test message");
+
+    assertTrue(watcher.getLastEvent() > -1);
+
+    SolrDocumentList events = watcher.getHistory(-1, new AtomicBoolean());
+    assertEquals(events.size(), 1);
+
+    SolrDocument event = events.get(0);
+    assertEquals(event.get("logger"), "org.apache.solr.logging.TestLogWatcher");
+    assertEquals(event.get("message"), "This is a test message");
+
   }
-  private boolean looper(LogWatcher watcher, String msg) {
-    // In local testing this loop usually succeeds 1-2 tries.
-    boolean success = false;
-    boolean testingNew = msg == null;
-    for (int msgIdx = 0; msgIdx < 100 && success == false; ++msgIdx) {
-      if (testingNew) { // check that there are no entries registered for the watcher
-        success = watcher.getLastEvent() == -1;
-      } else { // check that the expected message is there.
-        // Returns an empty (but non-null) list even if there are no messages yet.
-        SolrDocumentList events = watcher.getHistory(-1, new AtomicBoolean());
-        for (SolrDocument doc : events) {
-          if (doc.get("message").equals(msg)) {
-            success = true;
-          }
-        }
-      }
-      try {
-        Thread.sleep(10);
-      } catch (InterruptedException ie) {
-        ;
-      }
-    }
-    return success;
+
+  // This seems weird to do the same thing twice, this is valid. We need to test whether listeners are replaced....
+  @Test
+  public void testLog4jWatcherRepeat() {
+    LogWatcher watcher = LogWatcher.newRegisteredLogWatcher(config, null);
+
+    assertEquals(watcher.getLastEvent(), -1);
+
+    log.warn("This is a test message");
+
+    assertTrue(watcher.getLastEvent() > -1);
+
+    SolrDocumentList events = watcher.getHistory(-1, new AtomicBoolean());
+    assertEquals(events.size(), 1);
+
+    SolrDocument event = events.get(0);
+    assertEquals(event.get("logger"), "org.apache.solr.logging.TestLogWatcher");
+    assertEquals(event.get("message"), "This is a test message");
+
   }
+
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3b62f23f/solr/server/resources/log4j2-console.xml
----------------------------------------------------------------------
diff --git a/solr/server/resources/log4j2-console.xml b/solr/server/resources/log4j2-console.xml
index 698227b..f32f4c1 100644
--- a/solr/server/resources/log4j2-console.xml
+++ b/solr/server/resources/log4j2-console.xml
@@ -29,11 +29,11 @@
     </Console>
   </Appenders>
   <Loggers>
-    <AsyncLogger name="org.apache.zookeeper" level="WARN"/>
-    <AsyncLogger name="org.apache.hadoop" level="WARN"/>
+    <Logger name="org.apache.zookeeper" level="WARN"/>
+    <Logger name="org.apache.hadoop" level="WARN"/>
 
-    <AsyncRoot level="INFO">
+    <Root level="INFO">
       <AppenderRef ref="STDERR"/>
-    </AsyncRoot>
+    </Root>
   </Loggers>
 </Configuration>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3b62f23f/solr/server/resources/log4j2.xml
----------------------------------------------------------------------
diff --git a/solr/server/resources/log4j2.xml b/solr/server/resources/log4j2.xml
index d9c05d8..17bcf4c 100644
--- a/solr/server/resources/log4j2.xml
+++ b/solr/server/resources/log4j2.xml
@@ -27,8 +27,8 @@
       </PatternLayout>
     </Console>
 
-    <RollingRandomAccessFile
-        name="MainLogFile"
+    <RollingFile
+        name="RollingFile"
         fileName="${sys:solr.log.dir}/solr.log"
         filePattern="${sys:solr.log.dir}/solr.log.%i" >
       <PatternLayout>
@@ -41,10 +41,10 @@
         <SizeBasedTriggeringPolicy size="32 MB"/>
       </Policies>
       <DefaultRolloverStrategy max="10"/>
-    </RollingRandomAccessFile>
+    </RollingFile>
 
-    <RollingRandomAccessFile
-        name="SlowLogFile"
+    <RollingFile
+        name="SlowFile"
         fileName="${sys:solr.log.dir}/solr_slow_requests.log"
         filePattern="${sys:solr.log.dir}/solr_slow_requests.log.%i" >
       <PatternLayout>
@@ -57,20 +57,20 @@
         <SizeBasedTriggeringPolicy size="32 MB"/>
       </Policies>
       <DefaultRolloverStrategy max="10"/>
-    </RollingRandomAccessFile>
+    </RollingFile>
 
   </Appenders>
   <Loggers>
-    <AsyncLogger name="org.apache.hadoop" level="warn"/>
-    <AsyncLogger name="org.apache.solr.update.LoggingInfoStream" level="off"/>
-    <AsyncLogger name="org.apache.zookeeper" level="warn"/>
-    <AsyncLogger name="org.apache.solr.core.SolrCore.SlowRequest" level="info" additivity="false">
-      <AppenderRef ref="SlowLogFile"/>
-    </AsyncLogger>
+    <Logger name="org.apache.hadoop" level="warn"/>
+    <Logger name="org.apache.solr.update.LoggingInfoStream" level="off"/>
+    <Logger name="org.apache.zookeeper" level="warn"/>
+    <Logger name="org.apache.solr.core.SolrCore.SlowRequest" level="info" additivity="false">
+      <AppenderRef ref="SlowFile"/>
+    </Logger>
 
-    <AsyncRoot level="info">
-      <AppenderRef ref="MainLogFile"/>
+    <Root level="info">
+      <AppenderRef ref="RollingFile"/>
       <AppenderRef ref="STDOUT"/>
-    </AsyncRoot>
+    </Root>
   </Loggers>
 </Configuration>


[19/43] lucene-solr:jira/http2: Add version 7.5.1

Posted by da...@apache.org.
Add version 7.5.1


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/2ffcb878
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/2ffcb878
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/2ffcb878

Branch: refs/heads/jira/http2
Commit: 2ffcb878b4b79a6205888653f6965807b38d4669
Parents: 4f0320c
Author: Jim Ferenczi <ji...@apache.org>
Authored: Mon Sep 10 09:19:09 2018 +0200
Committer: Jim Ferenczi <ji...@apache.org>
Committed: Mon Sep 10 09:19:09 2018 +0200

----------------------------------------------------------------------
 lucene/CHANGES.txt                                  |  3 +++
 .../src/java/org/apache/lucene/util/Version.java    |  7 +++++++
 solr/CHANGES.txt                                    | 16 ++++++++++++++++
 3 files changed, 26 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2ffcb878/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 846554e..bcd9533 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -160,6 +160,9 @@ Optimizations
 * LUCENE-8448: Boolean queries now propagates the mininum score to their sub-scorers.
   (Jim Ferenczi, Adrien Grand)
 
+======================= Lucene 7.5.1 =======================
+(No Changes)
+
 ======================= Lucene 7.5.0 =======================
 
 API Changes:

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2ffcb878/lucene/core/src/java/org/apache/lucene/util/Version.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/util/Version.java b/lucene/core/src/java/org/apache/lucene/util/Version.java
index 80368da..242b4ae 100644
--- a/lucene/core/src/java/org/apache/lucene/util/Version.java
+++ b/lucene/core/src/java/org/apache/lucene/util/Version.java
@@ -97,6 +97,13 @@ public final class Version {
   public static final Version LUCENE_7_5_0 = new Version(7, 5, 0);
 
   /**
+   * Match settings and bugs in Lucene's 7.5.1 release.
+   * @deprecated Use latest
+   */
+  @Deprecated
+  public static final Version LUCENE_7_5_1 = new Version(7, 5, 1);
+
+  /**
    * Match settings and bugs in Lucene's 8.0.0 release.
    * <p>
    * Use this to get the latest &amp; greatest settings, bug

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2ffcb878/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 797acfc..f5fca60 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -71,6 +71,22 @@ Other Changes
   java.time.DateTimeFormatter instead of Joda time (see upgrade notes).  "Lenient" is enabled.  Removed Joda Time dependency.
   (David Smiley, Bar Rotstein)
 
+==================  7.5.1 ==================
+
+Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
+
+Versions of Major Components
+---------------------
+Apache Tika 1.18
+Carrot2 3.16.0
+Velocity 1.7 and Velocity Tools 2.0
+Apache ZooKeeper 3.4.11
+Jetty 9.4.11.v20180605
+
+
+(No Changes)
+
+
 ==================  7.5.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.


[36/43] lucene-solr:jira/http2: Ref Guide: fix typos; params -> parameters

Posted by da...@apache.org.
Ref Guide: fix typos; params -> parameters


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/b32dcbbe
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/b32dcbbe
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/b32dcbbe

Branch: refs/heads/jira/http2
Commit: b32dcbbe42bd3360f6d1cfa65495f7007034d0a9
Parents: ad7f15d
Author: Cassandra Targett <ct...@apache.org>
Authored: Wed Sep 12 15:04:28 2018 -0500
Committer: Cassandra Targett <ct...@apache.org>
Committed: Wed Sep 12 15:04:28 2018 -0500

----------------------------------------------------------------------
 .../src/common-query-parameters.adoc            |  4 ++--
 .../src/initparams-in-solrconfig.adoc           |  2 +-
 solr/solr-ref-guide/src/other-parsers.adoc      |  6 ++---
 .../src/pagination-of-results.adoc              |  2 +-
 .../src/resource-and-plugin-loading.adoc        | 24 ++++++++++----------
 .../src/rule-based-replica-placement.adoc       |  2 +-
 solr/solr-ref-guide/src/spell-checking.adoc     |  2 +-
 .../transforming-and-indexing-custom-json.adoc  |  2 +-
 8 files changed, 22 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b32dcbbe/solr/solr-ref-guide/src/common-query-parameters.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/common-query-parameters.adoc b/solr/solr-ref-guide/src/common-query-parameters.adoc
index 3f3564f..fe2956e 100644
--- a/solr/solr-ref-guide/src/common-query-parameters.adoc
+++ b/solr/solr-ref-guide/src/common-query-parameters.adoc
@@ -284,9 +284,9 @@ The `echoParams` parameter controls what information about request parameters is
 
 The `echoParams` parameter accepts the following values:
 
-* `explicit`: This is the default value. Only parameters included in the actual request, plus the `_` parameter (which is a 64-bit numeric timestamp) will be added to the params section of the response header.
+* `explicit`: This is the default value. Only parameters included in the actual request, plus the `_` parameter (which is a 64-bit numeric timestamp) will be added to the `params` section of the response header.
 * `all`: Include all request parameters that contributed to the query. This will include everything defined in the request handler definition found in `solrconfig.xml` as well as parameters included with the request, plus the `_` parameter. If a parameter is included in the request handler definition AND the request, it will appear multiple times in the response header.
-* `none`: Entirely removes the "params" section of the response header. No information about the request parameters will be available in the response.
+* `none`: Entirely removes the `params` section of the response header. No information about the request parameters will be available in the response.
 
 Here is an example of a JSON response where the echoParams parameter was not included, so the default of `explicit` is active. The request URL that created this response included three parameters - `q`, `wt`, and `indent`:
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b32dcbbe/solr/solr-ref-guide/src/initparams-in-solrconfig.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/initparams-in-solrconfig.adoc b/solr/solr-ref-guide/src/initparams-in-solrconfig.adoc
index 7334021..c0b7fb3 100644
--- a/solr/solr-ref-guide/src/initparams-in-solrconfig.adoc
+++ b/solr/solr-ref-guide/src/initparams-in-solrconfig.adoc
@@ -46,7 +46,7 @@ The syntax and semantics are similar to that of a `<requestHandler>`. The follow
 A comma-separated list of paths which will use the parameters. Wildcards can be used in paths to define nested paths, as described below.
 
 `name`::
-The name of this set of parameters. The name can be used directly in a requestHandler definition if a path is not explicitly named. If you give your `<initParams>` a name, you can refer to the params in a `<requestHandler>` that is not defined as a path.
+The name of this set of parameters. The name can be used directly in a requestHandler definition if a path is not explicitly named. If you give your `<initParams>` a name, you can refer to the parameters in a `<requestHandler>` that is not defined as a path.
 +
 For example, if an `<initParams>` section has the name "myParams", you can call the name when defining your request handler:
 +

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b32dcbbe/solr/solr-ref-guide/src/other-parsers.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/other-parsers.adoc b/solr/solr-ref-guide/src/other-parsers.adoc
index bcd28eb..f22dcdc 100644
--- a/solr/solr-ref-guide/src/other-parsers.adoc
+++ b/solr/solr-ref-guide/src/other-parsers.adoc
@@ -498,11 +498,11 @@ http://localhost:8983/solr/my_graph/query?fl=id&q={!graph+from=in_edge+to=out_ed
 
 === Simplified Models
 
-The Document & Field modeling used in the above examples enumerated all of the outgoing and income edges for each node explicitly, to help demonstrate exactly how the "from" and "to" params work, and to give you an idea of what is possible. With multiple sets of fields like these for identifying incoming and outgoing edges, it's possible to model many independent Directed Graphs that contain some or all of the documents in your collection.
+The Document & Field modeling used in the above examples enumerated all of the outgoing and income edges for each node explicitly, to help demonstrate exactly how the "from" and "to" parameters work, and to give you an idea of what is possible. With multiple sets of fields like these for identifying incoming and outgoing edges, it's possible to model many independent Directed Graphs that contain some or all of the documents in your collection.
 
 But in many cases it can also be possible to drastically simplify the model used.
 
-For example, the same graph shown in the diagram above can be modelled by Solr Documents that represent each node and know only the ids of the nodes they link to, without knowing anything about the incoming links:
+For example, the same graph shown in the diagram above can be modeled by Solr Documents that represent each node and know only the ids of the nodes they link to, without knowing anything about the incoming links:
 
 [source,bash]
 ----
@@ -927,7 +927,7 @@ In the examples below, the result of each query is "XXX":
 {!switch case=XXX case.bar=zzz case.yak=qqq}
 ----
 
-A practical usage of this `QParserPlugin`, is in specifying `appends` fq params in the configuration of a SearchHandler, to provide a fixed set of filter options for clients using custom parameter names.
+A practical usage of this parser, is in specifying `appends` filter query (`fq`) parameters in the configuration of a SearchHandler, to provide a fixed set of filter options for clients using custom parameter names.
 
 Using the example configuration below, clients can optionally specify the custom parameters `in_stock` and `shipping` to override the default filtering behavior, but are limited to the specific set of legal values (shipping=any|free, in_stock=yes|no|all).
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b32dcbbe/solr/solr-ref-guide/src/pagination-of-results.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/pagination-of-results.adoc b/solr/solr-ref-guide/src/pagination-of-results.adoc
index f3deac7..f0b0fb6 100644
--- a/solr/solr-ref-guide/src/pagination-of-results.adoc
+++ b/solr/solr-ref-guide/src/pagination-of-results.adoc
@@ -98,7 +98,7 @@ There are a few important constraints to be aware of when using `cursorMark` par
 . `cursorMark` and `start` are mutually exclusive parameters.
 * Your requests must either not include a `start` parameter, or it must be specified with a value of "```0```".
 . `sort` clauses must include the uniqueKey field (either `asc` or `desc`).
-* If `id` is your uniqueKey field, then sort params like `id asc` and `name asc, id desc` would both work fine, but `name asc` by itself would not
+* If `id` is your uniqueKey field, then sort parameters like `id asc` and `name asc, id desc` would both work fine, but `name asc` by itself would not
 . Sorts including <<working-with-dates.adoc#working-with-dates,Date Math>> based functions that involve calculations relative to `NOW` will cause confusing results, since every document will get a new sort value on every subsequent request. This can easily result in cursors that never end, and constantly return the same documents over and over – even if the documents are never updated.
 +
 In this situation, choose & re-use a fixed value for the <<working-with-dates.adoc#now,`NOW` request param>> in all of your cursor requests.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b32dcbbe/solr/solr-ref-guide/src/resource-and-plugin-loading.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/resource-and-plugin-loading.adoc b/solr/solr-ref-guide/src/resource-and-plugin-loading.adoc
index 60cd60f..04dc840 100644
--- a/solr/solr-ref-guide/src/resource-and-plugin-loading.adoc
+++ b/solr/solr-ref-guide/src/resource-and-plugin-loading.adoc
@@ -16,8 +16,8 @@
 // specific language governing permissions and limitations
 // under the License.
 
-Solr components can be configured using *resources*: data stored in external files that may be referred to in a location-independent fashion. Examples include: files needed by schema components, e.g. a stopword list for <<filter-descriptions.adoc#stop-filter,Stop Filter>>; and machine-learned models for <<learning-to-rank.adoc#learning-to-rank,Learning to Rank>>.
-  
+Solr components can be configured using *resources*: data stored in external files that may be referred to in a location-independent fashion. Examples include: files needed by schema components, e.g., a stopword list for <<filter-descriptions.adoc#stop-filter,Stop Filter>>; and machine-learned models for <<learning-to-rank.adoc#learning-to-rank,Learning to Rank>>.
+
 Solr *plugins*, which can be configured in `solrconfig.xml`, are Java classes that are normally packaged in `.jar` files along with supporting classes and data. Solr ships with a number of built-in plugins, and can also be configured to use custom plugins.  Example plugins are the <<uploading-structured-data-store-data-with-the-data-import-handler.adoc#uploading-structured-data-store-data-with-the-data-import-handler,Data Import Handler>> and custom search components.
 
 Resources and plugins may be stored:
@@ -25,29 +25,29 @@ Resources and plugins may be stored:
 * in ZooKeeper under a collection's configset node (SolrCloud only);
 * on a filesystem accessible to Solr nodes; or
 * in Solr's <<blob-store-api.adoc#blob-store-api,Blob Store>> (SolrCloud only).
-  
-NOTE: Schema components may not be stored as plugins in the Blob Store, and cannot access resources stored in the Blob Store.  
 
-== Resource and Plugin Loading Sequence 
+NOTE: Schema components may not be stored as plugins in the Blob Store, and cannot access resources stored in the Blob Store.
+
+== Resource and Plugin Loading Sequence
 
 Under SolrCloud, resources and plugins to be loaded are first looked up in ZooKeeper under the collection's configset znode.  If the resource or plugin is not found there, Solr will fall back to loading <<Resources and Plugins on the Filesystem,from the filesystem>>.
 
-Note that by default, Solr will not attempt to load resources and plugins from the Blob Store.  To enable this, see the section <<blob-store-api.adoc#use-a-blob-in-a-handler-or-component,Use a Blob in a Handler or Component>>.  When loading from the Blob Store is enabled for a component, lookups occur only in the Blob Store, and never in ZooKeeper or on the filesystem.  
+Note that by default, Solr will not attempt to load resources and plugins from the Blob Store.  To enable this, see the section <<blob-store-api.adoc#use-a-blob-in-a-handler-or-component,Use a Blob in a Handler or Component>>.  When loading from the Blob Store is enabled for a component, lookups occur only in the Blob Store, and never in ZooKeeper or on the filesystem.
 
 == Resources and Plugins in ConfigSets on ZooKeeper
 
 Resources and plugins may be uploaded to ZooKeeper as part of a configset, either via the <<configsets-api.adoc#configsets-api,Configsets API>> or <<solr-control-script-reference.adoc#upload-a-configuration-set,`bin/solr zk upload`>>.
 
-To upload a plugin or resource to a configset already stored on ZooKeeper, you can use <<solr-control-script-reference.adoc#copy-between-local-files-and-zookeeper-znodes,`bin/solr zk cp`>>.   
+To upload a plugin or resource to a configset already stored on ZooKeeper, you can use <<solr-control-script-reference.adoc#copy-between-local-files-and-zookeeper-znodes,`bin/solr zk cp`>>.
 
 CAUTION: By default, ZooKeeper's file size limit is 1MB. If your files are larger than this, you'll need to either <<setting-up-an-external-zookeeper-ensemble.adoc#increasing-the-file-size-limit,increase the ZooKeeper file size limit>> or store them instead <<Resources and Plugins on the Filesystem,on the filesystem>>.
-      
-== Resources and Plugins on the Filesystem 
+
+== Resources and Plugins on the Filesystem
 
 Under standalone Solr, when looking up a plugin or resource to be loaded, Solr's resource loader will first look under the `<instanceDir>/conf/` directory.  If the plugin or resource is not found, the configured plugin and resource file paths are searched - see the section <<Lib Directives in SolrConfig>> below.
 
 On core load, Solr's resource loader constructs a list of paths (subdirectories and jars), first under <<solr_home-lib,`solr_home/lib`>>, and then under directories pointed to by <<Lib Directives in SolrConfig,`<lib/>` directives in SolrConfig>>.
-  
+
 When looking up a resource or plugin to be loaded, the paths on the list are searched in the order they were added.
 
 NOTE: Under SolrCloud, each node hosting a collection replica will need its own copy of plugins and resources to be loaded.
@@ -58,7 +58,7 @@ WARNING: Resource files *will not be loaded* if they are located directly under
 
 === solr_home/lib
 
-Each Solr node can have a directory named `lib/` under the <<taking-solr-to-production.adoc#solr-home-directory,Solr home directory>>.  In order to use this directory to host resources or plugins, it must first be manually created. 
+Each Solr node can have a directory named `lib/` under the <<taking-solr-to-production.adoc#solr-home-directory,Solr home directory>>.  In order to use this directory to host resources or plugins, it must first be manually created.
 
 === Lib Directives in SolrConfig
 
@@ -68,7 +68,7 @@ Loading occurs in the order `<lib/>` directives appear in `solrconfig.xml`. If t
 
 A regular expression supplied in the `<lib/>` element's `regex` attribute value can be used to restrict which subdirectories and/or jar files are added to the Solr resource loader's list of search locations.  If no regular expression is given, all direct subdirectory and jar children are included in the resource path list.  All directories are resolved as relative to the Solr core's `instanceDir`.
 
-From an example SolrConfig: 
+From an example SolrConfig:
 
 [source,xml]
 ----

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b32dcbbe/solr/solr-ref-guide/src/rule-based-replica-placement.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/rule-based-replica-placement.adoc b/solr/solr-ref-guide/src/rule-based-replica-placement.adoc
index 66b1d45..5dbd170 100644
--- a/solr/solr-ref-guide/src/rule-based-replica-placement.adoc
+++ b/solr/solr-ref-guide/src/rule-based-replica-placement.adoc
@@ -167,7 +167,7 @@ host:!192.45.67.3
 
 == Defining Rules
 
-Rules are specified per collection during collection creation as request parameters. It is possible to specify multiple ‘rule’ and ‘snitch’ params as in this example:
+Rules are specified per collection during collection creation as request parameters. It is possible to specify multiple ‘rule’ and ‘snitch’ parameters as in this example:
 
 [source,text]
 ----

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b32dcbbe/solr/solr-ref-guide/src/spell-checking.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/spell-checking.adoc b/solr/solr-ref-guide/src/spell-checking.adoc
index 7911b84..c883ed9 100644
--- a/solr/solr-ref-guide/src/spell-checking.adoc
+++ b/solr/solr-ref-guide/src/spell-checking.adoc
@@ -233,7 +233,7 @@ This parameter specifies the maximum number of documents that should be collecte
 The default value for this parameter is `0`, but when `spellcheck.collateExtendedResults` is false, the optimization is always used as if `1` had been specified.
 
 `spellcheck.collateParam.*` Prefix::
-This parameter prefix can be used to specify any additional parameters that you wish to the Spellchecker to use when internally validating collation queries. For example, even if your regular search results allow for loose matching of one or more query terms via parameters like `q.op=OR` and `mm=20%` you can specify override params such as `spellcheck.collateParam.q.op=AND&spellcheck.collateParam.mm=100%` to require that only collations consisting of words that are all found in at least one document may be returned.
+This parameter prefix can be used to specify any additional parameters that you wish to the Spellchecker to use when internally validating collation queries. For example, even if your regular search results allow for loose matching of one or more query terms via parameters like `q.op=OR` and `mm=20%` you can specify override parameters such as `spellcheck.collateParam.q.op=AND&spellcheck.collateParam.mm=100%` to require that only collations consisting of words that are all found in at least one document may be returned.
 
 `spellcheck.dictionary`::
 This parameter causes Solr to use the dictionary named in the parameter's argument. The default setting is `default`. This parameter can be used to invoke a specific spellchecker on a per request basis.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b32dcbbe/solr/solr-ref-guide/src/transforming-and-indexing-custom-json.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/transforming-and-indexing-custom-json.adoc b/solr/solr-ref-guide/src/transforming-and-indexing-custom-json.adoc
index 36f64e1..7f7e58b 100644
--- a/solr/solr-ref-guide/src/transforming-and-indexing-custom-json.adoc
+++ b/solr/solr-ref-guide/src/transforming-and-indexing-custom-json.adoc
@@ -903,7 +903,7 @@ It is possible to send any JSON to the `/update/json/docs` endpoint and the defa
 </initParams>
 ----
 
-So, if no params are passed, the entire JSON file would get indexed to the `\_src_` field and all the values in the input JSON would go to a field named `text`. If there is a value for the uniqueKey it is stored and if no value could be obtained from the input JSON, a UUID is created and used as the uniqueKey field value.
+So, if no parameters are passed, the entire JSON file would get indexed to the `\_src_` field and all the values in the input JSON would go to a field named `text`. If there is a value for the uniqueKey it is stored and if no value could be obtained from the input JSON, a UUID is created and used as the uniqueKey field value.
 
 Alternately, use the Request Parameters feature to set these parameters, as shown earlier in the section <<Reusing Parameters in Multiple Requests>>.
 


[18/43] lucene-solr:jira/http2: SOLR-12738: Incorrect Suggestions in autoscaling framework and refactoring

Posted by da...@apache.org.
SOLR-12738: Incorrect Suggestions in autoscaling framework and refactoring


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/4f0320c3
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/4f0320c3
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/4f0320c3

Branch: refs/heads/jira/http2
Commit: 4f0320c3f3c16c162e576cf19ffa27055e866eaa
Parents: 70f0019
Author: Noble Paul <no...@apache.org>
Authored: Mon Sep 10 12:24:24 2018 +1000
Committer: Noble Paul <no...@apache.org>
Committed: Mon Sep 10 12:24:24 2018 +1000

----------------------------------------------------------------------
 .../client/solrj/cloud/autoscaling/Clause.java  | 252 ++++++++++++-------
 .../solrj/cloud/autoscaling/Condition.java      |  18 +-
 .../solrj/cloud/autoscaling/CoresVariable.java  |   5 +-
 .../cloud/autoscaling/FreeDiskVariable.java     |  40 ++-
 .../client/solrj/cloud/autoscaling/Operand.java |   1 +
 .../solrj/cloud/autoscaling/ReplicaCount.java   |  48 +++-
 .../client/solrj/cloud/autoscaling/Row.java     |  35 ++-
 .../solrj/cloud/autoscaling/Suggestion.java     |  43 +++-
 .../solrj/cloud/autoscaling/Variable.java       |  18 +-
 .../solrj/cloud/autoscaling/VariableBase.java   |  16 +-
 .../solrj/cloud/autoscaling/Violation.java      |  31 +--
 .../autoscaling/WithCollectionVariable.java     |   3 +-
 .../solrj/cloud/autoscaling/TestPolicy.java     |  25 +-
 .../solrj/cloud/autoscaling/TestPolicy2.java    | 193 +++++++++++++-
 14 files changed, 537 insertions(+), 191 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4f0320c3/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Clause.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Clause.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Clause.java
index 3a57a1f..820f335 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Clause.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Clause.java
@@ -20,7 +20,7 @@ package org.apache.solr.client.solrj.cloud.autoscaling;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.HashMap;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Locale;
@@ -46,6 +46,7 @@ import static org.apache.solr.client.solrj.cloud.autoscaling.Operand.RANGE_EQUAL
 import static org.apache.solr.client.solrj.cloud.autoscaling.Operand.WILDCARD;
 import static org.apache.solr.client.solrj.cloud.autoscaling.Policy.ANY;
 import static org.apache.solr.common.params.CoreAdminParams.COLLECTION;
+import static org.apache.solr.common.params.CoreAdminParams.NODE;
 import static org.apache.solr.common.params.CoreAdminParams.REPLICA;
 import static org.apache.solr.common.params.CoreAdminParams.SHARD;
 import static org.apache.solr.common.util.StrUtils.formatString;
@@ -168,7 +169,7 @@ public class Clause implements MapWriter, Comparable<Clause> {
     return globalTag;
   }
 
-  private Condition evaluateValue(Condition condition, Function<Condition, Object> computedValueEvaluator) {
+  Condition evaluateValue(Condition condition, Function<Condition, Object> computedValueEvaluator) {
     if (condition == null) return null;
     if (condition.computedType == null) return condition;
     Object val = computedValueEvaluator.apply(condition);
@@ -358,107 +359,184 @@ public class Clause implements MapWriter, Comparable<Clause> {
     return operand;
   }
 
-  public List<Violation> test(Policy.Session session, double[] deviations) {
-    ComputedValueEvaluator computedValueEvaluator = new ComputedValueEvaluator(session);
-    Violation.Ctx ctx = new Violation.Ctx(this, session.matrix, computedValueEvaluator);
-    if (isPerCollectiontag()) {
-      Map<String, Map<String, Map<String, ReplicaCount>>> replicaCounts = computeReplicaCounts(session.matrix, computedValueEvaluator);
-      for (Map.Entry<String, Map<String, Map<String, ReplicaCount>>> e : replicaCounts.entrySet()) {
-        computedValueEvaluator.collName = e.getKey();
-        if (!collection.isPass(computedValueEvaluator.collName)) continue;
-        for (Map.Entry<String, Map<String, ReplicaCount>> shardVsCount : e.getValue().entrySet()) {
-          computedValueEvaluator.shardName = shardVsCount.getKey();
-          if (!shard.isPass(computedValueEvaluator.shardName)) continue;
-          for (Map.Entry<String, ReplicaCount> counts : shardVsCount.getValue().entrySet()) {
-            if (tag.varType.meta.isNodeSpecificVal()) computedValueEvaluator.node = counts.getKey();
-            SealedClause sealedClause = getSealedClause(computedValueEvaluator);
-            ReplicaCount replicas = counts.getValue();
-            if (!sealedClause.replica.isPass(replicas)) {
-              Violation violation = new Violation(sealedClause,
-                  computedValueEvaluator.collName,
-                  computedValueEvaluator.shardName,
-                  tag.varType.meta.isNodeSpecificVal() ? computedValueEvaluator.node : null,
-                  counts.getValue(),
-                  sealedClause.getReplica().delta(replicas),
-                  tag.varType.meta.isNodeSpecificVal() ? null : counts.getKey());
-              tag.varType.addViolatingReplicas(ctx.reset(counts.getKey(), replicas, violation));
-            } else {
-              if (deviations != null && sealedClause.replica.op == RANGE_EQUAL) {
-                Number actualCount = replicas.getVal(type);
-                Double realDelta = ((RangeVal) sealedClause.replica.val).realDelta(actualCount.doubleValue());
-                realDelta = this.isReplicaZero() ? -1 * realDelta : realDelta;
-                deviations[0] += Math.abs(realDelta);
-              }
-            }
-          }
+  List<Violation> testGroupNodes(Policy.Session session, double[] deviations) {
+    //e.g:  {replica:'#EQUAL', shard:'#EACH',  sysprop.zone:'#EACH'}
+    ComputedValueEvaluator eval = new ComputedValueEvaluator(session);
+    eval.collName = (String) collection.getValue();
+    Violation.Ctx ctx = new Violation.Ctx(this, session.matrix, eval);
+
+    Set tags = new HashSet();
+    for (Row row : session.matrix) {
+      eval.node = row.node;
+      Condition tag = this.tag;
+      if (tag.computedType != null) tag = evaluateValue(tag, eval);
+      Object val = row.getVal(tag.name);
+      if (val != null && tag.isPass(val)) {
+        if (tag.op == LESS_THAN || tag.op == GREATER_THAN) {
+          tags.add(this.tag);
+        } else {
+          tags.add(val);
         }
       }
-    } else {
-      for (Row r : session.matrix) {
-        computedValueEvaluator.node = r.node;
-        SealedClause sealedClause = getSealedClause(computedValueEvaluator);
-        if (!sealedClause.getGlobalTag().isPass(r)) {
-          sealedClause.getGlobalTag().varType.addViolatingReplicas(ctx.reset(null, null,
-              new Violation(sealedClause, null, null, r.node, r.getVal(sealedClause.globalTag.name),
-                  sealedClause.globalTag.delta(r.getVal(globalTag.name)), null)));
+    }
+    if (tags.isEmpty()) return Collections.emptyList();
+
+    Set<String> shards = getShardNames(session, eval);
+
+    for (String s : shards) {
+      final ReplicaCount replicaCount = new ReplicaCount();
+      eval.shardName = s;
+
+      for (Object t : tags) {
+        replicaCount.reset();
+        for (Row row : session.matrix) {
+          eval.node = row.node;
+          if (t instanceof Condition) {
+            Condition tag = (Condition) t;
+            if (tag.computedType != null) tag = evaluateValue(tag, eval);
+            if (!tag.isPass(row)) continue;
+          } else {
+            if (!t.equals(row.getVal(tag.name))) continue;
+          }
+          addReplicaCountsForNode(eval, replicaCount, row);
+        }
+
+        SealedClause sealedClause = this.getSealedClause(eval);
+        if (!sealedClause.replica.isPass(replicaCount)) {
+          ReplicaCount replicaCountCopy = replicaCount.copy();
+          Violation violation = new Violation(sealedClause,
+              eval.collName,
+              eval.shardName,
+              null,
+              replicaCountCopy,
+              sealedClause.getReplica().replicaCountDelta(replicaCountCopy),
+              t);
+          ctx.resetAndAddViolation(t, replicaCountCopy, violation);
+          sealedClause.addViolatingReplicas(sealedClause.tag, eval, ctx, tag.name, t, violation, session);
+        } else {
+          computeDeviation(deviations, replicaCount, sealedClause);
         }
       }
     }
     return ctx.allViolations;
+  }
 
+  private void computeDeviation(double[] deviations, ReplicaCount replicaCount, SealedClause sealedClause) {
+    if (deviations != null && sealedClause.replica.op == RANGE_EQUAL) {
+      Number actualCount = replicaCount.getVal(type);
+      Double realDelta = ((RangeVal) sealedClause.replica.val).realDelta(actualCount.doubleValue());
+      realDelta = this.isReplicaZero() ? -1 * realDelta : realDelta;
+      deviations[0] += Math.abs(realDelta);
+    }
   }
 
-  private Map<String, Map<String, Map<String, ReplicaCount>>> computeReplicaCounts(List<Row> allRows,
-                                                                                   ComputedValueEvaluator computedValueEvaluator) {
-    Map<String, Map<String, Map<String, ReplicaCount>>> collVsShardVsTagVsCount = new HashMap<>();
-    for (Row row : allRows) {
-      computedValueEvaluator.node = row.node;
-      for (Map.Entry<String, Map<String, List<ReplicaInfo>>> colls : row.collectionVsShardVsReplicas.entrySet()) {
-        String collectionName = colls.getKey();
-        if (!collection.isPass(collectionName)) continue;
-        Map<String, Map<String, ReplicaCount>> collMap = collVsShardVsTagVsCount.computeIfAbsent(collectionName, s -> new HashMap<>());
-        for (Map.Entry<String, List<ReplicaInfo>> shards : colls.getValue().entrySet()) {
-          String shardName = shards.getKey();
-          if (ANY.equals(shard.val)) shardName = ANY;
-          if (!shard.isPass(shardName)) break;
-          Map<String, ReplicaCount> tagVsCount = collMap.computeIfAbsent(shardName, s -> new HashMap<>());
-          Object tagVal = row.getVal(tag.name);
-          computedValueEvaluator.collName = collectionName;
-          computedValueEvaluator.shardName = shardName;
-          SealedClause sealedClause = getSealedClause(computedValueEvaluator);
-          Condition t = sealedClause.getTag();
-          if (t.varType.meta.isNodeSpecificVal()) {
-            boolean pass = t.getOperand().match(t.val, tagVal) == TestStatus.PASS;
-            tagVsCount.computeIfAbsent(row.node, s -> new ReplicaCount());
-            if(pass) {
-              tagVsCount.get(row.node).increment(shards.getValue());
-            }
-          } else {
-            tagVsCount.computeIfAbsent(String.valueOf(t.getValue()), s -> new ReplicaCount());
-            boolean pass = sealedClause.getTag().isPass(tagVal);
-            if(!pass && !isReplicaZero()) continue;
-            tagVsCount.computeIfAbsent(pass ? String.valueOf(tagVal) : "", s -> new ReplicaCount());
-            if (pass) {
-              tagVsCount.get(String.valueOf(tagVal)).increment(shards.getValue());
-            }
-          }
+  void addViolatingReplicas(Condition tag,
+                            ComputedValueEvaluator eval,
+                            Violation.Ctx ctx, String tagName, Object tagVal,
+                            Violation violation,
+                            Policy.Session session) {
+    if (tag.varType.addViolatingReplicas(ctx)) return;
+    for (Row row : session.matrix) {
+      if (tagVal.equals(row.getVal(tagName))) {
+        row.forEachReplica(eval.collName, ri -> {
+          if (Policy.ANY.equals(eval.shardName)
+              || eval.shardName.equals(ri.getShard()))
+            violation.addReplica(new Violation.ReplicaInfoAndErr(ri).withDelta(tag.delta(row.getVal(tag.name))));
+        });
+      }
+    }
+
+  }
+
+  private void addReplicaCountsForNode(ComputedValueEvaluator computedValueEvaluator, ReplicaCount replicaCount, Row node) {
+    node.forEachReplica((String) collection.getValue(), ri -> {
+      if (Policy.ANY.equals(computedValueEvaluator.shardName)
+          || computedValueEvaluator.shardName.equals(ri.getShard()))
+        replicaCount.increment(ri);
+    });
+  }
+
+  List<Violation> testPerNode(Policy.Session session, double[] deviations) {
+    ComputedValueEvaluator eval = new ComputedValueEvaluator(session);
+    eval.collName = (String) collection.getValue();
+    Violation.Ctx ctx = new Violation.Ctx(this, session.matrix, eval);
+    Set<String> shards = getShardNames(session, eval);
+    for (String s : shards) {
+      final ReplicaCount replicaCount = new ReplicaCount();
+      eval.shardName = s;
+      for (Row row : session.matrix) {
+        replicaCount.reset();
+        eval.node = row.node;
+        Condition tag = this.tag;
+        if (tag.computedType != null) {
+          tag = evaluateValue(tag, eval);
+        }
+        if (!tag.isPass(row)) continue;
+        addReplicaCountsForNode(eval, replicaCount, row);
+        SealedClause sealedClause = this.getSealedClause(eval);
+        if (!sealedClause.replica.isPass(replicaCount)) {
+          ReplicaCount replicaCountCopy = replicaCount.copy();
+          Violation violation = new Violation(sealedClause,
+              eval.collName,
+              eval.shardName,
+              eval.node,
+              replicaCountCopy,
+              sealedClause.getReplica().replicaCountDelta(replicaCountCopy),
+              eval.node);
+          ctx.resetAndAddViolation(row.node, replicaCountCopy, violation);
+          sealedClause.addViolatingReplicas(sealedClause.tag, eval, ctx, NODE, row.node, violation, session);
+        } else {
+          computeDeviation(deviations, replicaCount, sealedClause);
         }
       }
     }
+    return ctx.allViolations;
+  }
+
+  private Set<String> getShardNames(Policy.Session session,
+                                    ComputedValueEvaluator eval) {
+    Set<String> shards = new HashSet<>();
+    if (isShardAbsent()) {
+      shards.add(Policy.ANY); //consider the entire collection is a single shard
+    } else {
+      for (Row row : session.matrix) {
+        row.forEachShard(eval.collName, (shard, r) -> {
+          if (this.shard.isPass(shard)) shards.add(shard); // add relevant shards
+        });
+      }
+    }
+    return shards;
+  }
+
+  boolean isShardAbsent() {
+    return Policy.ANY.equals(shard.val);
+  }
+
+  public List<Violation> test(Policy.Session session, double[] deviations) {
+    if (isPerCollectiontag()) {
+      return tag.varType == Type.NODE ||
+          (tag.varType.meta.isNodeSpecificVal() && replica.computedType == null) ?
+          testPerNode(session, deviations) :
+          testGroupNodes(session, deviations);
+    } else {
+      ComputedValueEvaluator computedValueEvaluator = new ComputedValueEvaluator(session);
+      Violation.Ctx ctx = new Violation.Ctx(this, session.matrix, computedValueEvaluator);
+      for (Row r : session.matrix) {
+        computedValueEvaluator.node = r.node;
+        SealedClause sealedClause = getSealedClause(computedValueEvaluator);
+        if (!sealedClause.getGlobalTag().isPass(r)) {
+          ctx.resetAndAddViolation(r.node, null, new Violation(sealedClause, null, null, r.node, r.getVal(sealedClause.globalTag.name),
+              sealedClause.globalTag.delta(r.getVal(globalTag.name)), r.node));
+          addViolatingReplicas(sealedClause.globalTag, computedValueEvaluator, ctx, Type.CORES.tagName, r.node, ctx.currentViolation, session);
+
+        }
+      }
+      return ctx.allViolations;
 
-    if (this.getTag().op != LESS_THAN && this.getTag().varType == Type.NODE) {
-      collVsShardVsTagVsCount.forEach((coll, shardVsNodeVsCount) ->
-          shardVsNodeVsCount.forEach((shard, nodeVsCount) -> {
-            for (Row row : allRows) {
-              if (!nodeVsCount.containsKey(row.node)) {
-                nodeVsCount.put(row.node, new ReplicaCount());
-              }
-            }
-          }));
     }
-    return collVsShardVsTagVsCount;
   }
 
+
   public boolean isMatch(ReplicaInfo r, String collection, String shard) {
     if (type != null && r.getType() != type) return false;
     if (r.getCollection().equals(collection)) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4f0320c3/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Condition.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Condition.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Condition.java
index 3a58804..5b60ef0 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Condition.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Condition.java
@@ -85,8 +85,12 @@ public class Condition implements MapWriter {
     return false;
   }
 
-  public Double delta(Object val) {
+  public Double replicaCountDelta(Object val) {
     if (val instanceof ReplicaCount) val = ((ReplicaCount) val).getVal(getClause().type);
+    return op.delta(this.val, val);
+  }
+
+  public Double delta(Object val) {
     if (this.val instanceof String) {
       if (op == LESS_THAN || op == GREATER_THAN) {
         return op
@@ -96,15 +100,9 @@ public class Condition implements MapWriter {
         return 0d;
       }
     } else {
-      if (this == getClause().getReplica()) {
-        Double delta = op.delta(this.val, val);
-        return getClause().isReplicaZero() ? -1 * delta : delta;
-      } else {
-        return op
-            .opposite(getClause().isReplicaZero() && this == getClause().getTag())
-            .delta(this.val, val);
-      }
-
+      return op
+          .opposite(getClause().isReplicaZero() && this == getClause().getTag())
+          .delta(this.val, val);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4f0320c3/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/CoresVariable.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/CoresVariable.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/CoresVariable.java
index 3344626..f8717b1 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/CoresVariable.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/CoresVariable.java
@@ -33,8 +33,7 @@ public class CoresVariable extends VariableBase {
     return VariableBase.getOperandAdjustedValue(super.validate(name, val, isRuleVal), val);
   }
 
-  @Override
-  public void addViolatingReplicas(Violation.Ctx ctx) {
+  public boolean addViolatingReplicas(Violation.Ctx ctx) {
     for (Row row : ctx.allRows) {
       if (row.node.equals(ctx.currentViolation.node)) {
         row.forEachReplica(replicaInfo -> ctx.currentViolation
@@ -42,7 +41,7 @@ public class CoresVariable extends VariableBase {
                 .withDelta(ctx.currentViolation.replicaCountDelta)));
       }
     }
-
+    return true;
 
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4f0320c3/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/FreeDiskVariable.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/FreeDiskVariable.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/FreeDiskVariable.java
index 600a708..600695a 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/FreeDiskVariable.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/FreeDiskVariable.java
@@ -20,12 +20,16 @@ package org.apache.solr.client.solrj.cloud.autoscaling;
 import java.util.ArrayList;
 import java.util.Comparator;
 import java.util.List;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReference;
 import java.util.function.Consumer;
 import java.util.stream.Collectors;
 
+import org.apache.solr.client.solrj.cloud.autoscaling.Suggester.Hint;
 import org.apache.solr.common.cloud.rule.ImplicitSnitch;
 import org.apache.solr.common.util.Pair;
 
+import static org.apache.solr.client.solrj.cloud.autoscaling.Suggestion.suggestNegativeViolations;
 import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.CORE_IDX;
 import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.TOTALDISK;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOVEREPLICA;
@@ -68,14 +72,12 @@ public class FreeDiskVariable extends VariableBase {
   @Override
   public void getSuggestions(Suggestion.Ctx ctx) {
     if (ctx.violation == null) return;
-    if (ctx.violation.replicaCountDelta < 0 && !ctx.violation.getViolatingReplicas().isEmpty()) {
-
-      Comparator<Row> rowComparator = Comparator.comparing(r -> ((Double) r.getVal(ImplicitSnitch.DISK, 0d)));
+    if (ctx.violation.replicaCountDelta > 0) {
       List<Row> matchingNodes = ctx.session.matrix.stream().filter(
           row -> ctx.violation.getViolatingReplicas()
               .stream()
               .anyMatch(p -> row.node.equals(p.replicaInfo.getNode())))
-          .sorted(rowComparator)
+          .sorted(Comparator.comparing(r -> ((Double) r.getVal(ImplicitSnitch.DISK, 0d))))
           .collect(Collectors.toList());
 
 
@@ -94,16 +96,42 @@ public class FreeDiskVariable extends VariableBase {
           if (currentDelta < 1) break;
           if (replica.getVariables().get(CORE_IDX.tagName) == null) continue;
           Suggester suggester = ctx.session.getSuggester(MOVEREPLICA)
-              .hint(Suggester.Hint.COLL_SHARD, new Pair<>(replica.getCollection(), replica.getShard()))
-              .hint(Suggester.Hint.SRC_NODE, node.node)
+              .hint(Hint.COLL_SHARD, new Pair<>(replica.getCollection(), replica.getShard()))
+              .hint(Hint.SRC_NODE, node.node)
               .forceOperation(true);
           if (ctx.addSuggestion(suggester) == null) break;
           currentDelta -= Clause.parseLong(CORE_IDX.tagName, replica.getVariable(CORE_IDX.tagName));
         }
       }
+    } else if (ctx.violation.replicaCountDelta < 0) {
+      suggestNegativeViolations(ctx, shards -> getSortedShards(ctx,shards));
     }
   }
 
+
+
+
+  private List<String> getSortedShards(Suggestion.Ctx ctx, Set<String> shardSet) {
+    return  shardSet.stream()
+        .map(shard1 -> {
+          AtomicReference<Pair<String, Long>> result = new AtomicReference<>();
+          for (Row node : ctx.session.matrix) {
+            node.forEachShard(ctx.violation.coll, (s, ri) -> {
+              if (result.get() != null) return;
+              if (s.equals(shard1) && ri.size() > 0) {
+                Number sz = ((Number) ri.get(0).getVariable(CORE_IDX.tagName));
+                if (sz != null) result.set(new Pair<>(shard1, sz.longValue()));
+              }
+            });
+          }
+          return result.get() == null ? new Pair<>(shard1, 0L) : result.get();
+        })
+        .sorted(Comparator.comparingLong(Pair::second))
+        .map(Pair::first)
+        .collect(Collectors.toList());
+
+  }
+
   //When a replica is added, freedisk should be incremented
   @Override
   public void projectAddReplica(Cell cell, ReplicaInfo ri, Consumer<Row.OperationInfo> ops, boolean strictMode) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4f0320c3/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Operand.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Operand.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Operand.java
index d4835b8..58b72bb 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Operand.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Operand.java
@@ -99,6 +99,7 @@ public enum Operand {
   NOT_EQUAL("!", 2) {
     @Override
     public TestStatus match(Object ruleVal, Object testVal) {
+      if(testVal == null) return PASS;
       return super.match(ruleVal, testVal) == PASS ? FAIL : PASS;
     }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4f0320c3/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaCount.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaCount.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaCount.java
index 7e17df1..87fcf5a 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaCount.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaCount.java
@@ -27,6 +27,16 @@ import org.apache.solr.common.util.Utils;
 class ReplicaCount  implements MapWriter {
   long nrt, tlog, pull;
 
+  public ReplicaCount() {
+    nrt = tlog = pull = 0;
+  }
+
+  public ReplicaCount(long nrt, long tlog, long pull) {
+    this.nrt = nrt;
+    this.tlog = tlog;
+    this.pull = pull;
+  }
+
   public long total() {
     return nrt + tlog + pull;
   }
@@ -55,19 +65,23 @@ class ReplicaCount  implements MapWriter {
   public void increment(List<ReplicaInfo> infos) {
     if (infos == null) return;
     for (ReplicaInfo info : infos) {
-      switch (info.getType()) {
-        case NRT:
-          nrt++;
-          break;
-        case PULL:
-          pull++;
-          break;
-        case TLOG:
-          tlog++;
-          break;
-        default:
-          nrt++;
-      }
+      increment(info);
+    }
+  }
+
+  void increment(ReplicaInfo info) {
+    switch (info.getType()) {
+      case NRT:
+        nrt++;
+        break;
+      case PULL:
+        pull++;
+        break;
+      case TLOG:
+        tlog++;
+        break;
+      default:
+        nrt++;
     }
   }
 
@@ -75,4 +89,12 @@ class ReplicaCount  implements MapWriter {
   public String toString() {
     return Utils.toJSONString(this);
   }
+
+  public ReplicaCount copy() {
+    return new ReplicaCount(nrt, tlog, pull);
+  }
+
+  public void reset() {
+    nrt = tlog = pull = 0;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4f0320c3/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Row.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Row.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Row.java
index 88e9921..85d6f30 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Row.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Row.java
@@ -27,6 +27,7 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
+import java.util.function.BiConsumer;
 import java.util.function.Consumer;
 import java.util.stream.Collectors;
 
@@ -71,6 +72,12 @@ public class Row implements MapWriter {
     }
   }
 
+  public void forEachShard(String collection, BiConsumer<String, List<ReplicaInfo>> consumer) {
+    collectionVsShardVsReplicas
+        .getOrDefault(collection, Collections.emptyMap())
+        .forEach(consumer);
+  }
+
   public Row(String node, Cell[] cells, boolean anyValueMissing, Map<String,
       Map<String, List<ReplicaInfo>>> collectionVsShardVsReplicas, boolean isLive, Policy.Session session) {
     this.session = session;
@@ -98,6 +105,7 @@ public class Row implements MapWriter {
   }
 
   Object getVal(String name) {
+    if (NODE.equals(name)) return this.node;
     for (Cell cell : cells) if (cell.name.equals(name)) return cell.val;
     return null;
   }
@@ -128,11 +136,11 @@ public class Row implements MapWriter {
    * values of certain attributes will be modified, in this node as well as other nodes. Please note that
    * the state of the current session is kept intact while this operation is being performed
    *
-   * @param coll  collection name
-   * @param shard shard name
-   * @param type  replica type
+   * @param coll           collection name
+   * @param shard          shard name
+   * @param type           replica type
    * @param recursionCount the number of times we have recursed to add more replicas
-   * @param strictMode whether suggester is operating in strict mode or not
+   * @param strictMode     whether suggester is operating in strict mode or not
    */
   Row addReplica(String coll, String shard, Replica.Type type, int recursionCount, boolean strictMode) {
     if (recursionCount > 3) {
@@ -157,7 +165,7 @@ public class Row implements MapWriter {
       if (op.isAdd) {
         row = row.session.getNode(op.node).addReplica(op.coll, op.shard, op.type, recursionCount + 1, strictMode);
       } else {
-        row.session.getNode(op.node).removeReplica(op.coll, op.shard, op.type, recursionCount+1);
+        row.session.getNode(op.node).removeReplica(op.coll, op.shard, op.type, recursionCount + 1);
       }
     }
 
@@ -198,10 +206,12 @@ public class Row implements MapWriter {
     if (idx == -1) return null;
     return r.get(idx);
   }
+
   public Row removeReplica(String coll, String shard, Replica.Type type) {
-    return removeReplica(coll,shard, type, 0);
+    return removeReplica(coll, shard, type, 0);
 
   }
+
   // this simulates removing a replica from a node
   public Row removeReplica(String coll, String shard, Replica.Type type, int recursionCount) {
     if (recursionCount > 3) {
@@ -240,10 +250,21 @@ public class Row implements MapWriter {
     forEachReplica(collectionVsShardVsReplicas, consumer);
   }
 
+  public void forEachReplica(String coll, Consumer<ReplicaInfo> consumer) {
+    collectionVsShardVsReplicas.getOrDefault(coll, Collections.emptyMap()).forEach((shard, replicaInfos) -> {
+      for (ReplicaInfo replicaInfo : replicaInfos) {
+        consumer.accept(replicaInfo);
+      }
+    });
+  }
+
   public static void forEachReplica(Map<String, Map<String, List<ReplicaInfo>>> collectionVsShardVsReplicas, Consumer<ReplicaInfo> consumer) {
     collectionVsShardVsReplicas.forEach((coll, shardVsReplicas) -> shardVsReplicas
         .forEach((shard, replicaInfos) -> {
-          for (ReplicaInfo r : replicaInfos) consumer.accept(r);
+          for (int i = 0; i < replicaInfos.size(); i++) {
+            ReplicaInfo r = replicaInfos.get(i);
+            consumer.accept(r);
+          }
         }));
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4f0320c3/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Suggestion.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Suggestion.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Suggestion.java
index 3b18e02..1f711e5 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Suggestion.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Suggestion.java
@@ -18,7 +18,10 @@
 package org.apache.solr.client.solrj.cloud.autoscaling;
 
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
+import java.util.function.Function;
 
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.V2RequestSupport;
@@ -54,15 +57,51 @@ public class Suggestion {
     }
   }
 
+  static void suggestNegativeViolations(Suggestion.Ctx ctx, Function<Set<String>, List<String>> shardSorter) {
+    if (ctx.violation.coll == null) return;
+    Set<String> shardSet = new HashSet<>();
+    for (Row node : ctx.session.matrix)
+      node.forEachShard(ctx.violation.coll, (s, ri) -> {
+        if (Policy.ANY.equals(ctx.violation.shard) || s.equals(ctx.violation.shard)) shardSet.add(s);
+      });
+    //Now, sort shards based on their index size ascending
+    List<String> shards = shardSorter.apply(shardSet);
+    outer:
+    for (int i = 0; i < 5; i++) {
+      int totalSuggestions = 0;
+      for (String shard : shards) {
+        Suggester suggester = ctx.session.getSuggester(MOVEREPLICA)
+            .hint(Suggester.Hint.COLL_SHARD, new Pair<>(ctx.violation.coll, shard))
+            .forceOperation(true);
+        SolrRequest op = ctx.addSuggestion(suggester);
+        if (op == null) continue;
+        totalSuggestions++;
+        boolean violationStillExists = false;
+        for (Violation violation : suggester.session.getViolations()) {
+          if (violation.getClause().original == ctx.violation.getClause().original) {
+            violationStillExists = true;
+            break;
+          }
+        }
+        if (!violationStillExists) break outer;
+      }
+      if (totalSuggestions == 0) break;
+    }
+  }
 
-  static void perNodeSuggestions(Ctx ctx) {
+
+  static void suggestPositiveViolations(Ctx ctx) {
     if (ctx.violation == null) return;
+    Double currentDelta = ctx.violation.replicaCountDelta;
     for (ReplicaInfoAndErr e : ctx.violation.getViolatingReplicas()) {
+      if (currentDelta <= 0) break;
       Suggester suggester = ctx.session.getSuggester(MOVEREPLICA)
           .forceOperation(true)
           .hint(Suggester.Hint.COLL_SHARD, new Pair<>(e.replicaInfo.getCollection(), e.replicaInfo.getShard()))
           .hint(Suggester.Hint.SRC_NODE, e.replicaInfo.getNode());
-      if (ctx.addSuggestion(suggester) == null) break;
+      if (ctx.addSuggestion(suggester) != null) {
+        currentDelta--;
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4f0320c3/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Variable.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Variable.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Variable.java
index 1ffb0a5..870483a 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Variable.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Variable.java
@@ -31,8 +31,8 @@ import java.util.function.Consumer;
 import org.apache.solr.common.cloud.rule.ImplicitSnitch;
 
 import static java.util.Collections.emptySet;
-import static java.util.Collections.unmodifiableSet;
 import static java.util.Collections.unmodifiableMap;
+import static java.util.Collections.unmodifiableSet;
 
 /**
  * A Variable Type used in Autoscaling policy rules. Each variable type may have unique implementation
@@ -53,11 +53,8 @@ public interface Variable {
   default void projectAddReplica(Cell cell, ReplicaInfo ri, Consumer<Row.OperationInfo> opCollector, boolean strictMode) {
   }
 
-  default void addViolatingReplicas(Violation.Ctx ctx) {
-    for (Row row : ctx.allRows) {
-      if (ctx.clause.tag.varType.meta.isNodeSpecificVal() && !row.node.equals(ctx.tagKey)) continue;
-      Violation.collectViolatingReplicas(ctx, row);
-    }
+  default boolean addViolatingReplicas(Violation.Ctx ctx) {
+    return false;
   }
 
   void getSuggestions(Suggestion.Ctx ctx);
@@ -85,7 +82,10 @@ public interface Variable {
    * Type details of each variable in policies
    */
   public enum Type implements Variable {
-    @Meta(name = "withCollection", type = String.class, isNodeSpecificVal = true, implementation = WithCollectionVariable.class)
+    @Meta(name = "withCollection",
+        type = String.class,
+        isNodeSpecificVal = true,
+        implementation = WithCollectionVariable.class)
     WITH_COLLECTION(),
 
     @Meta(name = "collection",
@@ -285,8 +285,8 @@ public interface Variable {
     }
 
     @Override
-    public void addViolatingReplicas(Violation.Ctx ctx) {
-      impl.addViolatingReplicas(ctx);
+    public boolean addViolatingReplicas(Violation.Ctx ctx) {
+      return impl.addViolatingReplicas(ctx);
     }
 
     public Operand getOperand(Operand expected, Object val, ComputedType computedType) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4f0320c3/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/VariableBase.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/VariableBase.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/VariableBase.java
index 82a5ce6..aaa874d 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/VariableBase.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/VariableBase.java
@@ -18,11 +18,14 @@
 package org.apache.solr.client.solrj.cloud.autoscaling;
 
 
+import java.util.ArrayList;
+
 import org.apache.solr.common.cloud.rule.ImplicitSnitch;
 import org.apache.solr.common.util.StrUtils;
 
 import static org.apache.solr.client.solrj.cloud.autoscaling.Clause.parseString;
-import static org.apache.solr.client.solrj.cloud.autoscaling.Suggestion.perNodeSuggestions;
+import static org.apache.solr.client.solrj.cloud.autoscaling.Suggestion.suggestNegativeViolations;
+import static org.apache.solr.client.solrj.cloud.autoscaling.Suggestion.suggestPositiveViolations;
 import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.FREEDISK;
 
 public class VariableBase implements Variable {
@@ -34,7 +37,12 @@ public class VariableBase implements Variable {
 
   @Override
   public void getSuggestions(Suggestion.Ctx ctx) {
-    perNodeSuggestions(ctx);
+    if (ctx.violation == null) return;
+    if (ctx.violation.replicaCountDelta > 0) {
+      suggestPositiveViolations(ctx);
+    } else {
+      suggestNegativeViolations(ctx, ArrayList::new);
+    }
   }
 
   static Object getOperandAdjustedValue(Object val, Object original) {
@@ -177,7 +185,7 @@ public class VariableBase implements Variable {
 
     @Override
     public void getSuggestions(Suggestion.Ctx ctx) {
-      perNodeSuggestions(ctx);
+      suggestPositiveViolations(ctx);
     }
   }
 
@@ -188,7 +196,7 @@ public class VariableBase implements Variable {
 
     @Override
     public void getSuggestions(Suggestion.Ctx ctx) {
-      perNodeSuggestions(ctx);
+      suggestPositiveViolations(ctx);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4f0320c3/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Violation.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Violation.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Violation.java
index 53f7924..e0d2048 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Violation.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Violation.java
@@ -49,29 +49,6 @@ public class Violation implements MapWriter {
     hash = ("" + coll + " " + shard + " " + node + " " + String.valueOf(tagKey) + " " + Utils.toJSONString(getClause().toMap(new HashMap<>()))).hashCode();
   }
 
-  static void collectViolatingReplicas(Ctx ctx, Row row) {
-    if (ctx.clause.tag.varType.meta.isNodeSpecificVal()) {
-      row.forEachReplica(replica -> {
-        if (ctx.clause.collection.isPass(replica.getCollection()) && ctx.clause.getShard().isPass(replica.getShard())) {
-          ctx.currentViolation.addReplica(new ReplicaInfoAndErr(replica)
-              .withDelta(ctx.clause.tag.delta(row.getVal(ctx.clause.tag.name))));
-        }
-      });
-    } else {
-      row.forEachReplica(replica -> {
-        if (ctx.clause.replica.isPass(0) && !ctx.clause.tag.isPass(row)) return;
-        if (!ctx.clause.replica.isPass(0) && ctx.clause.tag.isPass(row)) return;
-        if(!ctx.currentViolation.getClause().matchShard(replica.getShard(), ctx.currentViolation.shard)) return;
-        if (!ctx.clause.collection.isPass(ctx.currentViolation.coll) || !ctx.clause.shard.isPass(ctx.currentViolation.shard))
-          return;
-        ctx.currentViolation.addReplica(new ReplicaInfoAndErr(replica).withDelta(ctx.clause.tag.delta(row.getVal(ctx.clause.tag.name))));
-      });
-
-    }
-
-
-  }
-
   public Violation addReplica(ReplicaInfoAndErr r) {
     replicaInfoAndErrs.add(r);
     return this;
@@ -159,9 +136,9 @@ public class Violation implements MapWriter {
   @Override
   public void writeMap(EntryWriter ew) throws IOException {
     ew.putIfNotNull("collection", coll);
-    ew.putIfNotNull("shard", shard);
+    if (!Policy.ANY.equals(shard)) ew.putIfNotNull("shard", shard);
     ew.putIfNotNull("node", node);
-    ew.putStringIfNotNull("tagKey", tagKey);
+    ew.putIfNotNull("tagKey", tagKey);
     ew.putIfNotNull("violation", (MapWriter) ew1 -> {
       if (getClause().isPerCollectiontag()) ew1.put("replica", actualVal);
       else ew1.put(clause.tag.name, String.valueOf(actualVal));
@@ -179,7 +156,7 @@ public class Violation implements MapWriter {
 
   static class Ctx {
     final Function<Condition, Object> evaluator;
-    String tagKey;
+    Object tagKey;
     Clause clause;
     ReplicaCount count;
     Violation currentViolation;
@@ -192,7 +169,7 @@ public class Violation implements MapWriter {
       this.evaluator = evaluator;
     }
 
-    public Ctx reset(String tagKey, ReplicaCount count, Violation currentViolation) {
+    public Ctx resetAndAddViolation(Object tagKey, ReplicaCount count, Violation currentViolation) {
       this.tagKey = tagKey;
       this.count = count;
       this.currentViolation = currentViolation;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4f0320c3/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/WithCollectionVariable.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/WithCollectionVariable.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/WithCollectionVariable.java
index b295aee..db50726 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/WithCollectionVariable.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/WithCollectionVariable.java
@@ -82,7 +82,7 @@ public class WithCollectionVariable extends VariableBase {
     return Integer.compare(v1.getViolatingReplicas().size(), v2.getViolatingReplicas().size());
   }
 
-  public void addViolatingReplicas(Violation.Ctx ctx) {
+  public boolean addViolatingReplicas(Violation.Ctx ctx) {
     String node = ctx.currentViolation.node;
     for (Row row : ctx.allRows) {
       if (node.equals(row.node)) {
@@ -103,6 +103,7 @@ public class WithCollectionVariable extends VariableBase {
         }
       }
     }
+    return true;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4f0320c3/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
index b75e1ba..a48141e 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
@@ -1377,9 +1377,9 @@ public class TestPolicy extends SolrTestCaseJ4 {
         "}");
     Map<String, Map> nodeValues = (Map<String, Map>) Utils.fromJSONString("{" +
         "node1:{cores:12, freedisk: 334, heapUsage:10480, rack: rack4, sysprop.fs: slowdisk}," +
-        "node2:{cores:4, freedisk: 749, heapUsage:6873, rack: rack3}," +
+        "node2:{cores:4, freedisk: 749, heapUsage:6873, rack: rack3, sysprop.fs: unknown }," +
         "node3:{cores:7, freedisk: 262, heapUsage:7834, rack: rack2, sysprop.fs : ssd}," +
-        "node4:{cores:8, freedisk: 375, heapUsage:16900, nodeRole:overseer, rack: rack1}" +
+        "node4:{cores:8, freedisk: 375, heapUsage:16900, nodeRole:overseer, rack: rack1, sysprop.fs: unknown}" +
         "}");
     Policy policy = new Policy(policies);
     Suggester suggester = policy.createSession(getSolrCloudManager(nodeValues, clusterState))
@@ -1573,9 +1573,9 @@ public class TestPolicy extends SolrTestCaseJ4 {
         "}");
     Map<String, Map> nodeValues = (Map<String, Map>) Utils.fromJSONString("{" +
         "node1:{cores:12, freedisk: 334, heapUsage:10480, rack: rack4, sysprop.fs: slowdisk}," +
-        "node2:{cores:4, freedisk: 749, heapUsage:6873, rack: rack3}," +
+        "node2:{cores:4, freedisk: 749, heapUsage:6873, rack: rack3, sysprop.fs: unknown}," +
         "node3:{cores:7, freedisk: 262, heapUsage:7834, rack: rack2, sysprop.fs : ssd}," +
-        "node4:{cores:8, freedisk: 375, heapUsage:16900, nodeRole:overseer, rack: rack1}" +
+        "node4:{cores:8, freedisk: 375, heapUsage:16900, nodeRole:overseer, rack: rack1, sysprop.fs: unknown}" +
         "}");
     Policy policy = new Policy(policies);
     Suggester suggester = policy.createSession(getSolrCloudManager(nodeValues, clusterState))
@@ -1889,10 +1889,10 @@ public class TestPolicy extends SolrTestCaseJ4 {
         "      {'minimize':'cores', 'precision':3}," +
         "      {'maximize':'freedisk','precision':100}]}";
     Map<String, Map> nodeValues = (Map<String, Map>) Utils.fromJSONString("{" +
-        "node1:{cores:12, freedisk: 334, heapUsage:10480, rack: rack4}," +
-        "node2:{cores:4, freedisk: 749, heapUsage:6873, rack: rack3}," +
+        "node1:{cores:12, freedisk: 334, heapUsage:10480, rack: rack4, sysprop.fs: slowdisk}," +
+        "node2:{cores:4, freedisk: 749, heapUsage:6873, rack: rack3, sysprop.fs: slowdisk}," +
         "node3:{cores:7, freedisk: 262, heapUsage:7834, rack: rack2, sysprop.fs : ssd}," +
-        "node4:{cores:8, freedisk: 375, heapUsage:16900, nodeRole:overseer, rack: rack1}" +
+        "node4:{cores:8, freedisk: 375, heapUsage:16900, nodeRole:overseer, rack: rack1, sysprop.fs: slowdisk}" +
         "}");
     Policy policy = new Policy((Map<String, Object>) Utils.fromJSONString(autoscaleJson));
     SolrCloudManager cloudManager = getSolrCloudManager(nodeValues, clusterState);
@@ -2526,7 +2526,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
   }
 
 
-  public void testFreeDiskSuggestions() {
+  public void testFreeDiskSuggestions() throws IOException {
     String dataproviderdata = "{" +
         "  liveNodes:[node1,node2]," +
         "  replicaInfo : {" +
@@ -2549,7 +2549,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
     List<Violation> violations = cfg.getPolicy().createSession(cloudManagerWithData(dataproviderdata)).getViolations();
     assertEquals(1, violations.size());
     assertEquals(4, violations.get(0).getViolatingReplicas().size());
-    assertEquals(-4, violations.get(0).replicaCountDelta, 0.1);
+    assertEquals(4, violations.get(0).replicaCountDelta, 0.1);
     for (Violation.ReplicaInfoAndErr r : violations.get(0).getViolatingReplicas()) {
       assertEquals(500d, r.delta, 0.1);
 
@@ -2579,11 +2579,8 @@ public class TestPolicy extends SolrTestCaseJ4 {
     assertEquals(1, violations.size());
     assertEquals(-4, violations.get(0).replicaCountDelta, 0.1);
     assertEquals(1, violations.size());
-    assertEquals(4, violations.get(0).getViolatingReplicas().size());
-    for (Violation.ReplicaInfoAndErr r : violations.get(0).getViolatingReplicas()) {
-      assertEquals(500d, r.delta, 0.1);
+    assertEquals(0, violations.get(0).getViolatingReplicas().size());
 
-    }
     l = PolicyHelper.getSuggestions(cfg, cloudManagerWithData(dataproviderdata));
     assertEquals(3, l.size());
     m = l.get(0).toMap(new LinkedHashMap<>());
@@ -3653,7 +3650,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
     cfg = new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
     violations = cfg.getPolicy().createSession(cloudManagerWithData((Map) Utils.fromJSONString(dataproviderdata))).getViolations();
     assertEquals(1, violations.size());
-    assertEquals(4, violations.get(0).getViolatingReplicas().size());
+    assertEquals(-4d, violations.get(0).replicaCountDelta, 0.01);
     for (Violation.ReplicaInfoAndErr r : violations.get(0).getViolatingReplicas()) {
       assertEquals(10.0d, r.delta.doubleValue(), 0.1);
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4f0320c3/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
index d6a5d51..5365e28 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
@@ -31,6 +31,7 @@ import java.util.Set;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import com.google.common.collect.ImmutableSet;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.cloud.NodeStateProvider;
 import org.apache.solr.client.solrj.cloud.SolrCloudManager;
@@ -57,13 +58,13 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
         "      'shard1': {" +
         "        'range': '80000000-ffffffff'," +
         "        'replicas': {" +
-        "          'r1': {" +
+        "          'r1': {" +//east
         "            'core': 'r1'," +
         "            'base_url': 'http://10.0.0.4:8983/solr'," +
         "            'node_name': 'node1'," +
         "            'state': 'active'" +
         "          }," +
-        "          'r2': {" +
+        "          'r2': {" +//west
         "            'core': 'r2'," +
         "            'base_url': 'http://10.0.0.4:7574/solr'," +
         "            'node_name': 'node2'," +
@@ -74,25 +75,25 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
         "      'shard2': {" +
         "        'range': '0-7fffffff'," +
         "        'replicas': {" +
-        "          'r3': {" +
+        "          'r3': {" +//east
         "            'core': 'r3'," +
         "            'base_url': 'http://10.0.0.4:8983/solr'," +
         "            'node_name': 'node1'," +
         "            'state': 'active'" +
         "          }," +
-        "          'r4': {" +
+        "          'r4': {" +//west
         "            'core': 'r4'," +
         "            'base_url': 'http://10.0.0.4:8987/solr'," +
         "            'node_name': 'node4'," +
         "            'state': 'active'" +
         "          }," +
-        "          'r6': {" +
+        "          'r6': {" +//east
         "            'core': 'r6'," +
         "            'base_url': 'http://10.0.0.4:8989/solr'," +
         "            'node_name': 'node3'," +
         "            'state': 'active'" +
         "          }," +
-        "          'r5': {" +
+        "          'r5': {" +//east
         "            'core': 'r5'," +
         "            'base_url': 'http://10.0.0.4:8983/solr'," +
         "            'node_name': 'node1'," +
@@ -126,7 +127,7 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
     Policy.Session session = policy.createSession(createCloudManager(state, metaData));
     List<Violation> violations = session.getViolations();
     assertEquals(1, violations.size());
-    assertEquals(4, violations.get(0).getViolatingReplicas().size());
+    assertEquals(3, violations.get(0).getViolatingReplicas().size());
     assertEquals(1.0, violations.get(0).replicaCountDelta, 0.01);
     for (Violation.ReplicaInfoAndErr r : violations.get(0).getViolatingReplicas()) {
       assertEquals("shard2", r.replicaInfo.getShard());
@@ -139,7 +140,7 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
     session = policy.createSession(createCloudManager(state, metaData));
     violations = session.getViolations();
     assertEquals(1, violations.size());
-    assertEquals(4, violations.get(0).getViolatingReplicas().size());
+    assertEquals(3, violations.get(0).getViolatingReplicas().size());
     assertEquals(1.0, violations.get(0).replicaCountDelta, 0.01);
     for (Violation.ReplicaInfoAndErr r : violations.get(0).getViolatingReplicas()) {
       assertEquals("shard2", r.replicaInfo.getShard());
@@ -526,6 +527,182 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
       }
     };
   }
+  public void testSysPropSuggestions() {
+    String diagnostics = "{" +
+        "  'diagnostics': {" +
+        "    'sortedNodes': [" +
+        "      {" +
+        "        'node': '127.0.0.1:63191_solr'," +
+        "        'isLive': true," +
+        "        'cores': 3.0," +
+        "        'sysprop.zone': 'east'," +
+        "        'freedisk': 1727.1459312438965," +
+        "        'heapUsage': 24.97510064011647," +
+        "        'sysLoadAvg': 272.75390625," +
+        "        'totaldisk': 1037.938980102539," +
+        "        'replicas': {" +
+        "          'zonesTest': {" +
+        "            'shard1': [" +
+        "              {" +
+        "                'core_node5': {" +
+        "                  'core': 'zonesTest_shard1_replica_n2'," +
+        "                  'leader': 'true'," +
+        "                  'base_url': 'https://127.0.0.1:63191/solr'," +
+        "                  'node_name': '127.0.0.1:63191_solr'," +
+        "                  'state': 'active'," +
+        "                  'type': 'NRT'," +
+        "                  'force_set_state': 'false'," +
+        "                  'INDEX.sizeInGB': 6.426125764846802E-8," +
+        "                  'shard': 'shard1'," +
+        "                  'collection': 'zonesTest'" +
+        "                }" +
+        "              }," +
+        "              {" +
+        "                'core_node7': {" +
+        "                  'core': 'zonesTest_shard1_replica_n4'," +
+        "                  'base_url': 'https://127.0.0.1:63191/solr'," +
+        "                  'node_name': '127.0.0.1:63191_solr'," +
+        "                  'state': 'active'," +
+        "                  'type': 'NRT'," +
+        "                  'force_set_state': 'false'," +
+        "                  'INDEX.sizeInGB': 6.426125764846802E-8," +
+        "                  'shard': 'shard1'," +
+        "                  'collection': 'zonesTest'" +
+        "                }" +
+        "              }," +
+        "              {" +
+        "                'core_node12': {" +
+        "                  'core': 'zonesTest_shard1_replica_n10'," +
+        "                  'base_url': 'https://127.0.0.1:63191/solr'," +
+        "                  'node_name': '127.0.0.1:63191_solr'," +
+        "                  'state': 'active'," +
+        "                  'type': 'NRT'," +
+        "                  'force_set_state': 'false'," +
+        "                  'INDEX.sizeInGB': 6.426125764846802E-8," +
+        "                  'shard': 'shard1'," +
+        "                  'collection': 'zonesTest'" +
+        "                }" +
+        "              }" +
+        "            ]" +
+        "          }" +
+        "        }" +
+        "      }," +
+        "      {" +
+        "        'node': '127.0.0.1:63192_solr'," +
+        "        'isLive': true," +
+        "        'cores': 3.0," +
+        "        'sysprop.zone': 'east'," +
+        "        'freedisk': 1727.1459312438965," +
+        "        'heapUsage': 24.98878807983566," +
+        "        'sysLoadAvg': 272.75390625," +
+        "        'totaldisk': 1037.938980102539," +
+        "        'replicas': {" +
+        "          'zonesTest': {" +
+        "            'shard2': [" +
+        "              {" +
+        "                'core_node3': {" +
+        "                  'core': 'zonesTest_shard1_replica_n1'," +
+        "                  'base_url': 'https://127.0.0.1:63192/solr'," +
+        "                  'node_name': '127.0.0.1:63192_solr'," +
+        "                  'state': 'active'," +
+        "                  'type': 'NRT'," +
+        "                  'force_set_state': 'false'," +
+        "                  'INDEX.sizeInGB': 6.426125764846802E-8," +
+        "                  'shard': 'shard2'," +
+        "                  'collection': 'zonesTest'" +
+        "                }" +
+        "              }," +
+        "              {" +
+        "                'core_node9': {" +
+        "                  'core': 'zonesTest_shard1_replica_n6'," +
+        "                  'base_url': 'https://127.0.0.1:63192/solr'," +
+        "                  'node_name': '127.0.0.1:63192_solr'," +
+        "                  'state': 'active'," +
+        "                  'type': 'NRT'," +
+        "                  'force_set_state': 'false'," +
+        "                  'INDEX.sizeInGB': 6.426125764846802E-8," +
+        "                  'shard': 'shard2'," +
+        "                  'collection': 'zonesTest'" +
+        "                }" +
+        "              }," +
+        "              {" +
+        "                'core_node11': {" +
+        "                  'core': 'zonesTest_shard1_replica_n8'," +
+        "                  'base_url': 'https://127.0.0.1:63192/solr'," +
+        "                  'node_name': '127.0.0.1:63192_solr'," +
+        "                  'state': 'active'," +
+        "                  'type': 'NRT'," +
+        "                  'force_set_state': 'false'," +
+        "                  'INDEX.sizeInGB': 6.426125764846802E-8," +
+        "                  'shard': 'shard2'," +
+        "                  'collection': 'zonesTest'" +
+        "                }" +
+        "              }" +
+        "            ]" +
+        "          }" +
+        "        }" +
+        "      }," +
+        "      {" +
+        "        'node': '127.0.0.1:63219_solr'," +
+        "        'isLive': true," +
+        "        'cores': 0.0," +
+        "        'sysprop.zone': 'west'," +
+        "        'freedisk': 1768.6174201965332," +
+        "        'heapUsage': 24.98878807983566," +
+        "        'sysLoadAvg': 272.75390625," +
+        "        'totaldisk': 1037.938980102539," +
+        "        'replicas': {}" +
+        "      }," +
+        "      {" +
+        "        'node': '127.0.0.1:63229_solr'," +
+        "        'isLive': true," +
+        "        'cores': 0.0," +
+        "        'sysprop.zone': 'west'," +
+        "        'freedisk': 1768.6174201965332," +
+        "        'heapUsage': 24.98878807983566," +
+        "        'sysLoadAvg': 272.75390625," +
+        "        'totaldisk': 1037.938980102539," +
+        "        'replicas': {}" +
+        "      }" +
+        "    ]," +
+        "    'liveNodes': [" +
+        "      '127.0.0.1:63191_solr'," +
+        "      '127.0.0.1:63192_solr'," +
+        "      '127.0.0.1:63219_solr'," +
+        "      '127.0.0.1:63229_solr'" +
+        "    ]," +
+        "    'config': {" +
+        "      'cluster-preferences': [" +
+        "        {'minimize': 'cores', 'precision': 1}," +
+        "        {'maximize': 'freedisk', 'precision': 100}," +
+        "        {'minimize': 'sysLoadAvg', 'precision': 10}" +
+        "      ]," +
+        "      'cluster-policy': [" +
+        "        {'replica': '<3', 'shard': '#EACH', 'sysprop.zone': [east, west]}" +
+        "      ]" +
+        "    }" +
+        "  }" +
+        "}";
+
+    Map<String, Object> m = (Map<String, Object>) Utils.fromJSONString(diagnostics);
+
+    Map<String, Object> conf = (Map<String, Object>) Utils.getObjectByPath(m, false, "diagnostics/config");
+    Policy policy = new Policy(conf);
+    SolrCloudManager cloudManagerFromDiagnostics = createCloudManagerFromDiagnostics(m);
+    Policy.Session session = policy.createSession(cloudManagerFromDiagnostics);
+    List<Violation> violations = session.getViolations();
+    for (Violation violation : violations) {
+      assertEquals(1.0d, violation.replicaCountDelta.doubleValue(), 0.0001);
+    }
+    assertEquals(2, violations.size());
+    List<Suggester.SuggestionInfo> suggestions = PolicyHelper.getSuggestions(new AutoScalingConfig(conf), cloudManagerFromDiagnostics);
+    assertEquals(2, suggestions.size());
+    for (Suggester.SuggestionInfo suggestion : suggestions) {
+      assertTrue(ImmutableSet.of("127.0.0.1:63219_solr", "127.0.0.1:63229_solr").contains(
+          Utils.getObjectByPath(suggestion, true, "operation/command/move-replica/targetNode")));
+
+    }
+  }
 
 
 }


[32/43] lucene-solr:jira/http2: LUCENE-8343: add CHANGES entry

Posted by da...@apache.org.
LUCENE-8343: add CHANGES entry


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d6143867
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d6143867
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d6143867

Branch: refs/heads/jira/http2
Commit: d6143867df97d61f6d4eaafcce21b2319b5de602
Parents: a955140
Author: Mike McCandless <mi...@apache.org>
Authored: Tue Sep 11 15:12:26 2018 -0400
Committer: Mike McCandless <mi...@apache.org>
Committed: Tue Sep 11 15:12:26 2018 -0400

----------------------------------------------------------------------
 lucene/CHANGES.txt | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d6143867/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 5035625..3022a9a 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -219,6 +219,10 @@ API Changes:
 * LUCENE-8422: Static helper functions for Matches and MatchesIterator implementations
   have been moved from Matches to MatchesUtils (Alan Woodward)
 
+* LUCENE-8343: Suggesters now require Long (versus long, previously) from weight() method
+  while indexing, and provide double (versus long, previously) scores at lookup time
+  (Alessandro Benedetti)
+
 * LUCENE-8459: SearcherTaxonomyManager now has a constructor taking already opened
   IndexReaders, allowing the caller to pass a FilterDirectoryReader, for example.
   (Mike McCandless)


[27/43] lucene-solr:jira/http2: SOLR-12701: format/style consistency fixes for math expression docs; CSS change to make bold monospace appear properly

Posted by da...@apache.org.
SOLR-12701: format/style consistency fixes for math expression docs; CSS change to make bold monospace appear properly


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a619038e
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a619038e
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a619038e

Branch: refs/heads/jira/http2
Commit: a619038e90628f63aec3b0e813c10c5fc3f1f6bb
Parents: a1b6db2
Author: Cassandra Targett <ct...@apache.org>
Authored: Tue Sep 11 08:45:46 2018 -0500
Committer: Cassandra Targett <ct...@apache.org>
Committed: Tue Sep 11 08:47:16 2018 -0500

----------------------------------------------------------------------
 solr/solr-ref-guide/src/css/ref-guide.css       |   5 +
 solr/solr-ref-guide/src/curve-fitting.adoc      |  26 +-
 solr/solr-ref-guide/src/dsp.adoc                |  66 ++---
 solr/solr-ref-guide/src/machine-learning.adoc   | 239 +++++++++----------
 solr/solr-ref-guide/src/matrix-math.adoc        |  20 +-
 solr/solr-ref-guide/src/numerical-analysis.adoc | 114 ++++-----
 .../src/probability-distributions.adoc          | 100 ++++----
 solr/solr-ref-guide/src/regression.adoc         |  77 +++---
 solr/solr-ref-guide/src/scalar-math.adoc        |   4 +-
 solr/solr-ref-guide/src/simulations.adoc        | 125 +++++-----
 solr/solr-ref-guide/src/statistics.adoc         |  22 +-
 .../src/stream-source-reference.adoc            |   4 +-
 solr/solr-ref-guide/src/term-vectors.adoc       |  91 ++++---
 solr/solr-ref-guide/src/time-series.adoc        |  14 +-
 solr/solr-ref-guide/src/variables.adoc          |  62 ++---
 solr/solr-ref-guide/src/vector-math.adoc        |  50 ++--
 solr/solr-ref-guide/src/vectorization.adoc      |  58 +++--
 17 files changed, 498 insertions(+), 579 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a619038e/solr/solr-ref-guide/src/css/ref-guide.css
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/css/ref-guide.css b/solr/solr-ref-guide/src/css/ref-guide.css
index 907e2e4..4a4d777 100644
--- a/solr/solr-ref-guide/src/css/ref-guide.css
+++ b/solr/solr-ref-guide/src/css/ref-guide.css
@@ -885,6 +885,11 @@ h6 strong
     line-height: 1.45;
 }
 
+p strong code,
+td strong code {
+  font-weight: bold;
+}
+
 pre,
 pre > code
 {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a619038e/solr/solr-ref-guide/src/curve-fitting.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/curve-fitting.adoc b/solr/solr-ref-guide/src/curve-fitting.adoc
index 336ffde..a86f7ea 100644
--- a/solr/solr-ref-guide/src/curve-fitting.adoc
+++ b/solr/solr-ref-guide/src/curve-fitting.adoc
@@ -21,11 +21,11 @@
 
 
 The `polyfit` function is a general purpose curve fitter used to model
-the *non-linear* relationship between two random variables.
+the non-linear relationship between two random variables.
 
-The `polyfit` function is passed *x* and *y* axises and fits a smooth curve to the data.
-If only a single array is provided it is treated as the *y* axis and a sequence is generated
-for the *x* axis.
+The `polyfit` function is passed x- and y-axes and fits a smooth curve to the data.
+If only a single array is provided it is treated as the y-axis and a sequence is generated
+for the x-axis.
 
 The `polyfit` function also has a parameter the specifies the degree of the polynomial. The higher
 the degree the more curves that can be modeled.
@@ -34,7 +34,7 @@ The example below uses the `polyfit` function to fit a curve to an array using
 a 3 degree polynomial. The fitted curve is then subtracted from the original curve. The output
 shows the error between the fitted curve and the original curve, known as the residuals.
 The output also includes the sum-of-squares of the residuals which provides a measure
-of how large the error is..
+of how large the error is.
 
 [source,text]
 ----
@@ -45,7 +45,7 @@ let(echo="residuals, sumSqError",
     sumSqError=sumSq(residuals))
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -95,7 +95,7 @@ let(echo="residuals, sumSqError",
     sumSqError=sumSq(residuals))
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -138,10 +138,10 @@ responds with:
 The `polyfit` function returns a function that can be used with the `predict`
 function.
 
-In the example below the x axis is included for clarity.
+In the example below the x-axis is included for clarity.
 The `polyfit` function returns a function for the fitted curve.
 The `predict` function is then used to predict a value along the curve, in this
-case the prediction is made for the *x* value of 5.
+case the prediction is made for the *`x`* value of 5.
 
 [source,text]
 ----
@@ -151,7 +151,7 @@ let(x=array(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14),
     p=predict(curve, 5))
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -185,7 +185,7 @@ let(x=array(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14),
     d=derivative(curve))
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -235,7 +235,7 @@ let(x=array(0,1,2,3,4,5,6,7,8,9, 10),
     f=gaussfit(x, y))
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -283,7 +283,7 @@ let(x=array(0,1,2,3,4,5,6,7,8,9, 10),
 
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a619038e/solr/solr-ref-guide/src/dsp.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/dsp.adoc b/solr/solr-ref-guide/src/dsp.adoc
index 386ae33..60a6594 100644
--- a/solr/solr-ref-guide/src/dsp.adoc
+++ b/solr/solr-ref-guide/src/dsp.adoc
@@ -30,17 +30,17 @@ the more advanced DSP functions, its useful to get a better understanding of how
 The `dotProduct` function can be used to combine two arrays into a single product. A simple example can help
 illustrate this concept.
 
-In the example below two arrays are set to variables *a* and *b* and then operated on by the `dotProduct` function.
-The output of the `dotProduct` function is set to variable *c*.
+In the example below two arrays are set to variables *`a`* and *`b`* and then operated on by the `dotProduct` function.
+The output of the `dotProduct` function is set to variable *`c`*.
 
-Then the `mean` function is then used to compute the mean of the first array which is set to the variable `d`.
+Then the `mean` function is then used to compute the mean of the first array which is set to the variable *`d`*.
 
-Both the *dot product* and the *mean* are included in the output.
+Both the dot product and the mean are included in the output.
 
-When we look at the output of this expression we see that the *dot product* and the *mean* of the first array
+When we look at the output of this expression we see that the dot product and the mean of the first array
 are both 30.
 
-The dot product function *calculated the mean* of the first array.
+The `dotProduct` function calculated the mean of the first array.
 
 [source,text]
 ----
@@ -51,7 +51,7 @@ let(echo="c, d",
     d=mean(a))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -76,9 +76,9 @@ calculation using vector math and look at the output of each step.
 
 In the example below the `ebeMultiply` function performs an element-by-element multiplication of
 two arrays. This is the first step of the dot product calculation. The result of the element-by-element
-multiplication is assigned to variable *c*.
+multiplication is assigned to variable *`c`*.
 
-In the next step the `add` function adds all the elements of the array in variable *c*.
+In the next step the `add` function adds all the elements of the array in variable *`c`*.
 
 Notice that multiplying each element of the first array by .2 and then adding the results is
 equivalent to the formula for computing the mean of the first array. The formula for computing the mean
@@ -95,7 +95,7 @@ let(echo="c, d",
     d=add(c))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -122,11 +122,13 @@ When this expression is sent to the /stream handler it responds with:
 ----
 
 In the example above two arrays were combined in a way that produced the mean of the first. In the second array
-each value was set to .2. Another way of looking at this is that each value in the second array has the same weight.
-By varying the weights in the second array we can produce a different result. For example if the first array represents a time series,
+each value was set to ".2". Another way of looking at this is that each value in the second array has the same weight.
+By varying the weights in the second array we can produce a different result.
+For example if the first array represents a time series,
 the weights in the second array can be set to add more weight to a particular element in the first array.
 
-The example below creates a weighted average with the weight decreasing from right to left. Notice that the weighted mean
+The example below creates a weighted average with the weight decreasing from right to left.
+Notice that the weighted mean
 of 36.666 is larger than the previous mean which was 30. This is because more weight was given to last element in the
 array.
 
@@ -139,7 +141,7 @@ let(echo="c, d",
     d=add(c))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -167,13 +169,13 @@ When this expression is sent to the /stream handler it responds with:
 
 === Representing Correlation
 
-Often when we think of correlation, we are thinking of *Pearsons* correlation in the field of statistics. But the definition of
+Often when we think of correlation, we are thinking of _Pearson correlation_ in the field of statistics. But the definition of
 correlation is actually more general: a mutual relationship or connection between two or more things.
 In the field of digital signal processing the dot product is used to represent correlation. The examples below demonstrates
 how the dot product can be used to represent correlation.
 
 In the example below the dot product is computed for two vectors. Notice that the vectors have different values that fluctuate
-together. The output of the dot product is 190, which is hard to reason about because because its not scaled.
+together. The output of the dot product is 190, which is hard to reason about because it's not scaled.
 
 [source,text]
 ----
@@ -183,7 +185,7 @@ let(echo="c, d",
     c=dotProduct(a, b))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -206,9 +208,9 @@ One approach to scaling the dot product is to first scale the vectors so that bo
 magnitude of 1, also called unit vectors, are used when comparing only the angle between vectors rather then the magnitude.
 The `unitize` function can be used to unitize the vectors before calculating the dot product.
 
-Notice in the example below the dot product result, set to variable *e*, is effectively 1. When applied to unit vectors the dot product
-will be scaled between 1 and -1. Also notice in the example `cosineSimilarity` is calculated on the *unscaled* vectors and the
-answer is also effectively 1. This is because *cosine similarity* is a scaled *dot product*.
+Notice in the example below the dot product result, set to variable *`e`*, is effectively 1. When applied to unit vectors the dot product
+will be scaled between 1 and -1. Also notice in the example `cosineSimilarity` is calculated on the unscaled vectors and the
+answer is also effectively 1. This is because cosine similarity is a scaled dot product.
 
 
 [source,text]
@@ -222,7 +224,7 @@ let(echo="e, f",
     f=cosineSimilarity(a, b))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -254,7 +256,7 @@ let(echo="c, d",
     c=cosineSimilarity(a, b))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -275,10 +277,10 @@ When this expression is sent to the /stream handler it responds with:
 
 == Convolution
 
-The `conv` function calculates the convolution of two vectors. The convolution is calculated by *reversing*
-the second vector and sliding it across the first vector. The *dot product* of the two vectors
+The `conv` function calculates the convolution of two vectors. The convolution is calculated by reversing
+the second vector and sliding it across the first vector. The dot product of the two vectors
 is calculated at each point as the second vector is slid across the first vector.
-The dot products are collected in a *third vector* which is the *convolution* of the two vectors.
+The dot products are collected in a third vector which is the convolution of the two vectors.
 
 === Moving Average Function
 
@@ -290,7 +292,7 @@ is syntactic sugar for convolution.
 Below is an example of a moving average with a window size of 5. Notice that original vector has 13 elements
 but the result of the moving average has only 9 elements. This is because the `movingAvg` function
 only begins generating results when it has a full window. In this case because the window size is 5 so the
-moving average starts generating results from the 4th index of the original array.
+moving average starts generating results from the 4^th^ index of the original array.
 
 [source,text]
 ----
@@ -298,7 +300,7 @@ let(a=array(1, 2, 3, 4, 5, 6, 7, 6, 5, 4, 3, 2, 1),
     b=movingAvg(a, 5))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -344,7 +346,7 @@ let(a=array(1, 2, 3, 4, 5, 6, 7, 6, 5, 4, 3, 2, 1),
     c=conv(a, b))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -381,7 +383,7 @@ When this expression is sent to the /stream handler it responds with:
 }
 ----
 
-We achieve the same result as the `movingAvg` gunction by using the `copyOfRange` function to copy a range of
+We achieve the same result as the `movingAvg` function by using the `copyOfRange` function to copy a range of
 the result that drops the first and last 4 values of
 the convolution result. In the example below the `precision` function is also also used to remove floating point errors from the
 convolution result. When this is added the output is exactly the same as the `movingAvg` function.
@@ -395,7 +397,7 @@ let(a=array(1, 2, 3, 4, 5, 6, 7, 6, 5, 4, 3, 2, 1),
     e=precision(d, 2))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -446,7 +448,7 @@ let(a=array(1, 2, 3, 4, 5, 6, 7, 6, 5, 4, 3, 2, 1),
     c=conv(a, rev(b)))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -504,7 +506,7 @@ let(a=array(1, 2, 3, 4, 5, 6, 7, 6, 5, 4, 3, 2, 1),
     c=finddelay(a, b))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a619038e/solr/solr-ref-guide/src/machine-learning.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/machine-learning.adoc b/solr/solr-ref-guide/src/machine-learning.adoc
index abbca4b..142d617 100644
--- a/solr/solr-ref-guide/src/machine-learning.adoc
+++ b/solr/solr-ref-guide/src/machine-learning.adoc
@@ -26,13 +26,12 @@ Before performing machine learning operations its often necessary to
 scale the feature vectors so they can be compared at the same scale.
 
 All the scaling function operate on vectors and matrices.
-When operating on a matrix the *rows* of the matrix are scaled.
+When operating on a matrix the rows of the matrix are scaled.
 
 === Min/Max Scaling
 
-The `minMaxScale` function scales a vector or matrix between a min and
-max value. By default it will scale between 0 and 1 if min/max values
-are not provided.
+The `minMaxScale` function scales a vector or matrix between a minimum and maximum value.
+By default it will scale between 0 and 1 if min/max values are not provided.
 
 Below is a simple example of min/max scaling between 0 and 1.
 Notice that once brought into the same scale the vectors are the same.
@@ -79,10 +78,10 @@ This expression returns the following response:
 
 === Standardization
 
-The `standardize` function scales a vector so that it has a
-mean of 0 and a standard deviation of 1. Standardization can be
-used with machine learning algorithms, such as SVM, that
-perform better when the data has a normal distribution.
+The `standardize` function scales a vector so that it has a mean of 0 and a standard deviation of 1.
+Standardization can be used with machine learning algorithms, such as
+https://en.wikipedia.org/wiki/Support_vector_machine[Support Vector Machine (SVM)], that perform better
+when the data has a normal distribution.
 
 [source,text]
 ----
@@ -127,8 +126,7 @@ This expression returns the following response:
 === Unit Vectors
 
 The `unitize` function scales vectors to a magnitude of 1. A vector with a
-magnitude of 1 is known as a unit vector.  Unit vectors are
-preferred when the vector math deals
+magnitude of 1 is known as a unit vector. Unit vectors are preferred when the vector math deals
 with vector direction rather than magnitude.
 
 [source,text]
@@ -173,24 +171,20 @@ This expression returns the following response:
 
 == Distance and Distance Measures
 
-The `distance` function computes the distance for two
-numeric arrays or a *distance matrix* for the columns of a matrix.
+The `distance` function computes the distance for two numeric arrays or a distance matrix for the columns of a matrix.
 
-There are four distance measure functions that return a function
-that performs the actual distance calculation:
+There are five distance measure functions that return a function that performs the actual distance calculation:
 
-* euclidean (default)
-* manhattan
-* canberra
-* earthMovers
-* haversineMeters (Geospatial distance measure)
+* `euclidean` (default)
+* `manhattan`
+* `canberra`
+* `earthMovers`
+* `haversineMeters` (Geospatial distance measure)
 
 The distance measure functions can be used with all machine learning functions
-that support different distance measures.
-
-Below is an example for computing euclidean distance for
-two numeric arrays:
+that support distance measures.
 
+Below is an example for computing Euclidean distance for two numeric arrays:
 
 [source,text]
 ----
@@ -294,48 +288,46 @@ This expression returns the following response:
 }
 ----
 
-== K-means Clustering
+== K-Means Clustering
 
 The `kmeans` functions performs k-means clustering of the rows of a matrix.
 Once the clustering has been completed there are a number of useful functions available
-for examining the *clusters* and *centroids*.
+for examining the clusters and centroids.
 
-The examples below are clustering *term vectors*.
-The chapter on <<term-vectors.adoc#term-vectors,Text Analysis and Term Vectors>> should be
-consulted for a full explanation of these features.
+The examples below cluster _term vectors_.
+The section <<term-vectors.adoc#term-vectors,Text Analysis and Term Vectors>> offers
+a full explanation of these features.
 
 === Centroid Features
 
 In the example below the `kmeans` function is used to cluster a result set from the Enron email data-set
 and then the top features are extracted from the cluster centroids.
 
-Let's look at what data is assigned to each variable:
-
-* *a*: The `random` function returns a sample of 500 documents from the *enron*
-collection that match the query *body:oil*. The `select` function selects the *id* and
-and annotates each tuple with the analyzed bigram terms from the body field.
-
-* *b*: The `termVectors` function creates a TF-IDF term vector matrix from the
-tuples stored in variable *a*. Each row in the matrix represents a document. The columns of the matrix
-are the bigram terms that were attached to each tuple.
-* *c*: The `kmeans` function clusters the rows of the matrix into 5 clusters. The k-means clustering is performed using the
-*Euclidean distance* measure.
-* *d*: The `getCentroids` function returns a matrix of cluster centroids. Each row in the matrix is a centroid
-from one of the 5 clusters. The columns of the matrix are the same bigrams terms of the term vector matrix.
-* *e*: The `topFeatures` function returns the column labels for the top 5 features of each centroid in the matrix.
-This returns the top 5 bigram terms for each centroid.
-
 [source,text]
 ----
-let(a=select(random(enron, q="body:oil", rows="500", fl="id, body"),
+let(a=select(random(enron, q="body:oil", rows="500", fl="id, body"), <1>
                     id,
                     analyze(body, body_bigram) as terms),
-    b=termVectors(a, maxDocFreq=.10, minDocFreq=.05, minTermLength=14, exclude="_,copyright"),
-    c=kmeans(b, 5),
-    d=getCentroids(c),
-    e=topFeatures(d, 5))
+    b=termVectors(a, maxDocFreq=.10, minDocFreq=.05, minTermLength=14, exclude="_,copyright"),<2>
+    c=kmeans(b, 5), <3>
+    d=getCentroids(c), <4>
+    e=topFeatures(d, 5)) <5>
 ----
 
+Let's look at what data is assigned to each variable:
+
+<1> *`a`*: The `random` function returns a sample of 500 documents from the "enron"
+collection that match the query "body:oil". The `select` function selects the `id` and
+and annotates each tuple with the analyzed bigram terms from the `body` field.
+<2> *`b`*: The `termVectors` function creates a TF-IDF term vector matrix from the
+tuples stored in variable *`a`*. Each row in the matrix represents a document. The columns of the matrix
+are the bigram terms that were attached to each tuple.
+<3> *`c`*: The `kmeans` function clusters the rows of the matrix into 5 clusters. The k-means clustering is performed using the Euclidean distance measure.
+<4> *`d`*: The `getCentroids` function returns a matrix of cluster centroids. Each row in the matrix is a centroid
+from one of the 5 clusters. The columns of the matrix are the same bigrams terms of the term vector matrix.
+<5> *`e`*: The `topFeatures` function returns the column labels for the top 5 features of each centroid in the matrix.
+This returns the top 5 bigram terms for each centroid.
+
 This expression returns the following response:
 
 [source,json]
@@ -396,12 +388,6 @@ This expression returns the following response:
 The example below examines the top features of a specific cluster. This example uses the same techniques
 as the centroids example but the top features are extracted from a cluster rather then the centroids.
 
-The `getCluster` function returns a cluster by its index. Each cluster is a matrix containing term vectors
-that have been clustered together based on their features.
-
-In the example below the `topFeatures` function is used to extract the top 4 features from each term vector
-in the cluster.
-
 [source,text]
 ----
 let(a=select(random(collection3, q="body:oil", rows="500", fl="id, body"),
@@ -409,10 +395,15 @@ let(a=select(random(collection3, q="body:oil", rows="500", fl="id, body"),
                     analyze(body, body_bigram) as terms),
     b=termVectors(a, maxDocFreq=.09, minDocFreq=.03, minTermLength=14, exclude="_,copyright"),
     c=kmeans(b, 25),
-    d=getCluster(c, 0),
-    e=topFeatures(d, 4))
+    d=getCluster(c, 0), <1>
+    e=topFeatures(d, 4)) <2>
 ----
 
+<1> The `getCluster` function returns a cluster by its index. Each cluster is a matrix containing term vectors
+that have been clustered together based on their features.
+<2> The `topFeatures` function is used to extract the top 4 features from each term vector
+in the cluster.
+
 This expression returns the following response:
 
 [source,json]
@@ -489,19 +480,17 @@ This expression returns the following response:
 }
 ----
 
-== Multi K-means Clustering
+== Multi K-Means Clustering
 
-K-means clustering will be produce different results depending on
+K-means clustering will produce different results depending on
 the initial placement of the centroids. K-means is fast enough
 that multiple trials can be performed and the best outcome selected.
-The `multiKmeans` function runs the K-means
-clustering algorithm for a gven number of trials and selects the
-best result based on which trial produces the lowest intra-cluster
-variance.
 
-The example below is identical to centroids example except that
-it uses `multiKmeans` with 100 trials, rather then a single
-trial of the `kmeans` function.
+The `multiKmeans` function runs the k-means clustering algorithm for a given number of trials and selects the
+best result based on which trial produces the lowest intra-cluster variance.
+
+The example below is identical to centroids example except that it uses `multiKmeans` with 100 trials,
+rather then a single trial of the `kmeans` function.
 
 [source,text]
 ----
@@ -569,10 +558,10 @@ This expression returns the following response:
 }
 ----
 
-== Fuzzy K-means Clustering
+== Fuzzy K-Means Clustering
 
 The `fuzzyKmeans` function is a soft clustering algorithm which
-allows vectors to be assigned to more then one cluster. The *fuzziness* parameter
+allows vectors to be assigned to more then one cluster. The `fuzziness` parameter
 is a value between 1 and 2 that determines how fuzzy to make the cluster assignment.
 
 After the clustering has been performed the `getMembershipMatrix` function can be called
@@ -585,27 +574,26 @@ A simple example will make this more clear. In the example below 300 documents a
 then turned into a term vector matrix. Then the `fuzzyKmeans` function clusters the
 term vectors into 12 clusters with a fuzziness factor of 1.25.
 
-The `getMembershipMatrix` function is used to return the membership matrix and the first row
-of membership matrix is retrieved with the `rowAt` function. The `precision` function is then applied to the first row
-of the matrix to make it easier to read.
-
-The output shows a single vector representing the cluster membership probabilities for the first
-term vector. Notice that the term vector has the highest association with the 12th cluster,
-but also has significant associations with the 3rd, 5th, 6th and 7th clusters.
-
 [source,text]
 ----
-et(a=select(random(collection3, q="body:oil", rows="300", fl="id, body"),
+let(a=select(random(collection3, q="body:oil", rows="300", fl="id, body"),
                    id,
                    analyze(body, body_bigram) as terms),
    b=termVectors(a, maxDocFreq=.09, minDocFreq=.03, minTermLength=14, exclude="_,copyright"),
    c=fuzzyKmeans(b, 12, fuzziness=1.25),
-   d=getMembershipMatrix(c),
-   e=rowAt(d, 0),
-   f=precision(e, 5))
+   d=getMembershipMatrix(c),  <1>
+   e=rowAt(d, 0),  <2>
+   f=precision(e, 5))  <3>
 ----
 
-This expression returns the following response:
+<1> The `getMembershipMatrix` function is used to return the membership matrix;
+<2> and the first row of membership matrix is retrieved with the `rowAt` function.
+<3> The `precision` function is then applied to the first row
+of the matrix to make it easier to read.
+
+This expression returns a single vector representing the cluster membership probabilities for the first
+term vector. Notice that the term vector has the highest association with the 12^th^ cluster,
+but also has significant associations with the 3^rd^, 5^th^, 6^th^ and 7^th^ clusters:
 
 [source,json]
 ----
@@ -637,30 +625,21 @@ This expression returns the following response:
 }
 ----
 
-== K-nearest Neighbor (KNN)
+== K-Nearest Neighbor (KNN)
 
 The `knn` function searches the rows of a matrix for the
-K-nearest neighbors of a search vector. The `knn` function
-returns a *matrix* of the K-nearest neighbors. The `knn` function
-supports changing of the distance measure by providing one of the
-four distance measure functions as the fourth parameter:
+k-nearest neighbors of a search vector. The `knn` function
+returns a matrix of the k-nearest neighbors.
 
-* euclidean (Default)
-* manhattan
-* canberra
-* earthMovers
+The `knn` function supports changing of the distance measure by providing one of these
+distance measure functions as the fourth parameter:
 
-The example below builds on the clustering examples to demonstrate
-the `knn` function.
+* `euclidean` (Default)
+* `manhattan`
+* `canberra`
+* `earthMovers`
 
-In the example, the centroids matrix is set to variable *d*. The first
-centroid vector is selected from the matrix with the `rowAt` function.
-Then the `knn` function is used to find the 3 nearest neighbors
-to the centroid vector in the term vector matrix (variable b).
-
-The `knn` function returns a matrix with the 3 nearest neighbors based on the
-default distance measure which is euclidean. Finally, the top 4 features
-of the term vectors in the nearest neighbor matrix are returned.
+The example below builds on the clustering examples to demonstrate the `knn` function.
 
 [source,text]
 ----
@@ -669,13 +648,21 @@ let(a=select(random(collection3, q="body:oil", rows="500", fl="id, body"),
                     analyze(body, body_bigram) as terms),
     b=termVectors(a, maxDocFreq=.09, minDocFreq=.03, minTermLength=14, exclude="_,copyright"),
     c=multiKmeans(b, 5, 100),
-    d=getCentroids(c),
-    e=rowAt(d, 0),
-    g=knn(b, e, 3),
-    h=topFeatures(g, 4))
+    d=getCentroids(c),  <1>
+    e=rowAt(d, 0),  <2>
+    g=knn(b, e, 3),  <3>
+    h=topFeatures(g, 4)) <4>
 ----
 
-This expression returns the following response:
+<1> In the example, the centroids matrix is set to variable *`d`*.
+<2> The first centroid vector is selected from the matrix with the `rowAt` function.
+<3> Then the `knn` function is used to find the 3 nearest neighbors
+to the centroid vector in the term vector matrix (variable *`b`*).
+<4> The `topFeatures` function is used to request the top 4 featurs of the term vectors in the knn matrix.
+
+The `knn` function returns a matrix with the 3 nearest neighbors based on the
+default distance measure which is euclidean. Finally, the top 4 features
+of the term vectors in the nearest neighbor matrix are returned:
 
 [source,json]
 ----
@@ -713,20 +700,18 @@ This expression returns the following response:
 }
 ----
 
-== KNN Regression
+== K-Nearest Neighbor Regression
 
-KNN regression is a non-linear, multi-variate regression method. Knn regression is a lazy learning
+K-nearest neighbor regression is a non-linear, multi-variate regression method. Knn regression is a lazy learning
 technique which means it does not fit a model to the training set in advance. Instead the
 entire training set of observations and outcomes are held in memory and predictions are made
 by averaging the outcomes of the k-nearest neighbors.
 
 The `knnRegress` function prepares the training set for use with the `predict` function.
 
-Below is an example of the `knnRegress` function. In this example 10000 random samples
-are taken each containing the variables *filesize_d*, *service_d* and *response_d*. The pairs of
-*filesize_d* and *service_d* will be used to predict the value of *response_d*.
-
-Notice that `knnRegress` returns a tuple describing the regression inputs.
+Below is an example of the `knnRegress` function. In this example 10,000 random samples
+are taken, each containing the variables `filesize_d`, `service_d` and `response_d`. The pairs of
+`filesize_d` and `service_d` will be used to predict the value of `response_d`.
 
 [source,text]
 ----
@@ -738,7 +723,7 @@ let(samples=random(collection1, q="*:*", rows="10000", fl="filesize_d, service_d
     lazyModel=knnRegress(observations, outcomes , 5))
 ----
 
-This expression returns the following response:
+This expression returns the following response. Notice that `knnRegress` returns a tuple describing the regression inputs:
 
 [source,json]
 ----
@@ -767,6 +752,7 @@ This expression returns the following response:
 === Prediction and Residuals
 
 The output of `knnRegress` can be used with the `predict` function like other regression models.
+
 In the example below the `predict` function is used to predict results for the original training
 data. The sumSq of the residuals is then calculated.
 
@@ -806,14 +792,15 @@ This expression returns the following response:
 
 If the features in the observation matrix are not in the same scale then the larger features
 will carry more weight in the distance calculation then the smaller features. This can greatly
-impact the accuracy of the prediction. The `knnRegress` function has a *scale* parameter which
-can be set to *true* to automatically scale the features in the same range.
+impact the accuracy of the prediction. The `knnRegress` function has a `scale` parameter which
+can be set to `true` to automatically scale the features in the same range.
 
 The example below shows `knnRegress` with feature scaling turned on.
-Notice that when feature scaling is turned on the sumSqErr in the output is much lower.
+
+Notice that when feature scaling is turned on the `sumSqErr` in the output is much lower.
 This shows how much more accurate the predictions are when feature scaling is turned on in
-this particular example. This is because the *filesize_d* feature is significantly larger then
-the *service_d* feature.
+this particular example. This is because the `filesize_d` feature is significantly larger then
+the `service_d` feature.
 
 [source,text]
 ----
@@ -850,16 +837,15 @@ This expression returns the following response:
 
 === Setting Robust Regression
 
-The default prediction approach is to take the *mean* of the outcomes of the k-nearest
-neighbors. If the outcomes contain outliers the *mean* value can be skewed. Setting
-the *robust* parameter to true will take the *median* outcome of the k-nearest neighbors.
+The default prediction approach is to take the mean of the outcomes of the k-nearest
+neighbors. If the outcomes contain outliers the mean value can be skewed. Setting
+the `robust` parameter to `true` will take the median outcome of the k-nearest neighbors.
 This provides a regression prediction that is robust to outliers.
 
-
 === Setting the Distance Measure
 
 The distance measure can be changed for the k-nearest neighbor search by adding a distance measure
-function to the `knnRegress` parameters. Below is an example using manhattan distance.
+function to the `knnRegress` parameters. Below is an example using `manhattan` distance.
 
 [source,text]
 ----
@@ -892,10 +878,3 @@ This expression returns the following response:
   }
 }
 ----
-
-
-
-
-
-
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a619038e/solr/solr-ref-guide/src/matrix-math.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/matrix-math.adoc b/solr/solr-ref-guide/src/matrix-math.adoc
index ba45cca..b73f01a 100644
--- a/solr/solr-ref-guide/src/matrix-math.adoc
+++ b/solr/solr-ref-guide/src/matrix-math.adoc
@@ -35,7 +35,7 @@ matrix(array(1, 2),
        array(4, 5))
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -80,7 +80,7 @@ let(a=array(1, 2),
     d=colAt(c, 1))
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -129,7 +129,7 @@ let(echo="d, e",
     e=getColumnLabels(c))
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -182,7 +182,7 @@ let(echo="b,c",
     c=columnCount(a))
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -217,7 +217,7 @@ let(a=matrix(array(1, 2),
     b=transpose(a))
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -259,7 +259,7 @@ let(a=matrix(array(1, 2, 3),
     b=sumRows(a))
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -292,7 +292,7 @@ let(a=matrix(array(1, 2, 3),
     b=grandSum(a))
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -326,7 +326,7 @@ let(a=matrix(array(1, 2),
     b=scalarAdd(10, a))
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -370,7 +370,7 @@ let(a=matrix(array(1, 2),
     b=ebeAdd(a, a))
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -413,7 +413,7 @@ let(a=matrix(array(1, 2),
     c=matrixMult(a, b))
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a619038e/solr/solr-ref-guide/src/numerical-analysis.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/numerical-analysis.adoc b/solr/solr-ref-guide/src/numerical-analysis.adoc
index cb2bc2e..b4e3584 100644
--- a/solr/solr-ref-guide/src/numerical-analysis.adoc
+++ b/solr/solr-ref-guide/src/numerical-analysis.adoc
@@ -16,21 +16,20 @@
 // specific language governing permissions and limitations
 // under the License.
 
-This section of the math expression user guide covers *interpolation*, *derivatives* and *integrals*.
-These three interrelated topics are part of the field of mathematics called *numerical analysis*.
+Interpolation, derivatives and integrals are three interrelated topics which are part of the field of mathematics called numerical analysis. This section explores the math expressions available for numerical anlysis.
 
 == Interpolation
 
 Interpolation is used to construct new data points between a set of known control of points.
-The ability to *predict* new data points allows for *sampling* along the curve defined by the
+The ability to predict new data points allows for sampling along the curve defined by the
 control points.
 
-The interpolation functions described below all return an *interpolation model*
+The interpolation functions described below all return an _interpolation model_
 that can be passed to other functions which make use of the sampling capability.
 
 If returned directly the interpolation model returns an array containing predictions for each of the
 control points. This is useful in the case of `loess` interpolation which first smooths the control points
-and then interpolates the smoothed points. All other interpolation function simply return the original
+and then interpolates the smoothed points. All other interpolation functions simply return the original
 control points because interpolation predicts a curve that passes through the original control points.
 
 There are different algorithms for interpolation that will result in different predictions
@@ -54,29 +53,25 @@ samples every second. In order to do this the data points between the minutes mu
 The `predict` function can be used to predict values anywhere within the bounds of the interpolation
 range.  The example below shows a very simple example of upsampling.
 
-In the example linear interpolation is performed on the arrays in variables *x* and *y*. The *x* variable,
-which is the x axis, is a sequence from 0 to 20 with a stride of 2. The *y* variable defines the curve
-along the x axis.
-
-The `lerp` function performs the interpolation and returns the interpolation model.
-
-The `u` value is an array from 0 to 20 with a stride of 1. This fills in the gaps of the original x axis.
-The `predict` function then uses the interpolation function in variable *l* to predict values for
-every point in the array assigned to variable *u*.
-
-The variable *p* is the array of predictions, which is the upsampled set of y values.
-
 [source,text]
 ----
-let(x=array(0, 2,  4,  6,  8,   10, 12,  14, 16, 18, 20),
-    y=array(5, 10, 60, 190, 100, 130, 100, 20, 30, 10, 5),
-    l=lerp(x, y),
-    u=array(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20),
-    p=predict(l, u))
+let(x=array(0, 2,  4,  6,  8,   10, 12,  14, 16, 18, 20),  <1>
+    y=array(5, 10, 60, 190, 100, 130, 100, 20, 30, 10, 5),  <2>
+    l=lerp(x, y),  <3>
+    u=array(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20),  <4>
+    p=predict(l, u))  <5>
 ----
 
-When this expression is sent to the /stream handler it
-responds with:
+<1> In the example linear interpolation is performed on the arrays in variables *`x`* and *`y`*. The *`x`* variable,
+which is the x-axis, is a sequence from 0 to 20 with a stride of 2.
+<2> The *`y`* variable defines the curve along the x-axis.
+<3> The `lerp` function performs the interpolation and returns the interpolation model.
+<4> The `u` value is an array from 0 to 20 with a stride of 1. This fills in the gaps of the original x axis.
+The `predict` function then uses the interpolation function in variable *`l`* to predict values for
+every point in the array assigned to variable *`u`*.
+<5> The variable *`p`* is the array of predictions, which is the upsampled set of *`y`* values.
+
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -127,21 +122,15 @@ A technique known as local regression is used to compute the smoothed curve.  Th
 neighborhood of the local regression can be adjusted
 to control how close the new curve conforms to the original control points.
 
-The `loess` function is passed *x* and *y* axises and fits a smooth curve to the data.
-If only a single array is provided it is treated as the *y* axis and a sequence is generated
-for the *x* axis.
+The `loess` function is passed *`x`*- and *`y`*-axes and fits a smooth curve to the data.
+If only a single array is provided it is treated as the *`y`*-axis and a sequence is generated
+for the *`x`*-axis.
 
-The example below uses the `loess` function to fit a curve to a set of *y* values in an array.
-The bandwidth parameter defines the percent of data to use for the local
+The example below uses the `loess` function to fit a curve to a set of *`y`* values in an array.
+The `bandwidth` parameter defines the percent of data to use for the local
 regression. The lower the percent the smaller the neighborhood used for the local
 regression and the closer the curve will be to the original data.
 
-In the example the fitted curve is subtracted from the original curve using the
-`ebeSubtract` function. The output shows the error between the
-fitted curve and the original curve, known as the residuals. The output also includes
-the sum-of-squares of the residuals which provides a measure
-of how large the error is.
-
 [source,text]
 ----
 let(echo="residuals, sumSqError",
@@ -151,8 +140,11 @@ let(echo="residuals, sumSqError",
     sumSqError=sumSq(residuals))
 ----
 
-When this expression is sent to the /stream handler it
-responds with:
+In the example the fitted curve is subtracted from the original curve using the
+`ebeSubtract` function. The output shows the error between the
+fitted curve and the original curve, known as the residuals. The output also includes
+the sum-of-squares of the residuals which provides a measure
+of how large the error is:
 
 [source,json]
 ----
@@ -194,9 +186,7 @@ responds with:
 }
 ----
 
-In the next example the curve is fit using a bandwidth of .25. Notice that the curve
-is a closer fit, shown by the smaller residuals and lower value for the sum-of-squares of the
-residuals.
+In the next example the curve is fit using a `bandwidth` of `.25`:
 
 [source,text]
 ----
@@ -207,8 +197,8 @@ let(echo="residuals, sumSqError",
     sumSqError=sumSq(residuals))
 ----
 
-When this expression is sent to the /stream handler it
-responds with:
+Notice that the curve is a closer fit, shown by the smaller `residuals` and lower value for the sum-of-squares of the
+residuals:
 
 [source,json]
 ----
@@ -252,11 +242,11 @@ responds with:
 
 == Derivatives
 
-The derivative of a function measures the rate of change of the *y* value in respects to the
-rate of change of the *x* value.
+The derivative of a function measures the rate of change of the *`y`* value in respects to the
+rate of change of the *`x`* value.
 
-The `derivative` function can compute the derivative of any *interpolation* function.
-The `derivative` function can also compute the derivative of a derivative.
+The `derivative` function can compute the derivative of any interpolation function.
+It can also compute the derivative of a derivative.
 
 The example below computes the derivative for a `loess` interpolation function.
 
@@ -268,7 +258,7 @@ let(x=array(0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
     derivative=derivative(curve))
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -327,7 +317,7 @@ let(x=array(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19
     integral=integrate(curve,  0, 20))
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -357,7 +347,7 @@ let(x=array(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19
     integral=integrate(curve,  0, 10))
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -382,18 +372,7 @@ responds with:
 The `bicubicSpline` function can be used to interpolate and predict values
 anywhere within a grid of data.
 
-A simple example will make this more clear.
-
-In example below a bicubic spline is used to interpolate a matrix of real estate data.
-Each row of the matrix represents a specific *year*. Each column of the matrix
-represents a *floor* of the building. The grid of numbers is the average selling price of
-an apartment for each year and floor. For example in 2002 the average selling price for
-the 9th floor was 415000 (row 3, column 3).
-
-The `bicubicSpline` function is then used to
-interpolate the grid, and the `predict` function is used to predict a value for year 2003, floor 8.
-Notice that the matrix does not included a data point for year 2003, floor 8. The `bicupicSpline`
-function creates that data point based on the surrounding data in the matrix.
+A simple example will make this more clear:
 
 [source,text]
 ----
@@ -408,8 +387,16 @@ let(years=array(1998, 2000, 2002, 2004, 2006),
     prediction=predict(bspline, 2003, 8))
 ----
 
-When this expression is sent to the /stream handler it
-responds with:
+In this example a bicubic spline is used to interpolate a matrix of real estate data.
+Each row of the matrix represent specific `years`. Each column of the matrix
+represents `floors` of the building. The grid of numbers is the average selling price of
+an apartment for each year and floor. For example in 2002 the average selling price for
+the 9th floor was `415000` (row 3, column 3).
+
+The `bicubicSpline` function is then used to
+interpolate the grid, and the `predict` function is used to predict a value for year 2003, floor 8.
+Notice that the matrix does not include a data point for year 2003, floor 8. The `bicupicSpline`
+function creates that data point based on the surrounding data in the matrix:
 
 [source,json]
 ----
@@ -427,4 +414,3 @@ responds with:
   }
 }
 ----
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a619038e/solr/solr-ref-guide/src/probability-distributions.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/probability-distributions.adoc b/solr/solr-ref-guide/src/probability-distributions.adoc
index 5ae2248..7482872 100644
--- a/solr/solr-ref-guide/src/probability-distributions.adoc
+++ b/solr/solr-ref-guide/src/probability-distributions.adoc
@@ -17,18 +17,16 @@
 // under the License.
 
 This section of the user guide covers the
-*probability distribution
-framework* included in the math expressions library.
+probability distribution
+framework included in the math expressions library.
 
 == Probability Distribution Framework
 
-The probability distribution framework includes
-many commonly used *real* and *discrete* probability
-distributions, including support for *empirical* and
-*enumerated* distributions that model real world data.
+The probability distribution framework includes many commonly used <<Real Distributions,real>>
+and <<Discrete,discrete>> probability distributions, including support for <<Empirical Distribution,empirical>>
+and <<Enumerated Distributions,enumerated>> distributions that model real world data.
 
-The probability distribution framework also includes a set
-of functions that use the probability distributions
+The probability distribution framework also includes a set of functions that use the probability distributions
 to support probability calculations and sampling.
 
 === Real Distributions
@@ -93,18 +91,18 @@ random variable within a specific distribution.
 Below is example of calculating the cumulative probability
 of a random variable within a normal distribution.
 
-In the example a normal distribution function is created
-with a mean of 10 and a standard deviation of 5. Then
-the cumulative probability of the value 12 is calculated for this
-specific distribution.
-
 [source,text]
 ----
 let(a=normalDistribution(10, 5),
     b=cumulativeProbability(a, 12))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+In this example a normal distribution function is created
+with a mean of 10 and a standard deviation of 5. Then
+the cumulative probability of the value 12 is calculated for this
+specific distribution.
+
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -127,10 +125,10 @@ Below is an example of a cumulative probability calculation
 using an empirical distribution.
 
 In the example an empirical distribution is created from a random
-sample taken from the *price_f* field.
+sample taken from the `price_f` field.
 
-The cumulative probability of the value .75 is then calculated.
-The *price_f* field in this example was generated using a
+The cumulative probability of the value `.75` is then calculated.
+The `price_f` field in this example was generated using a
 uniform real distribution between 0 and 1, so the output of the
  `cumulativeProbability` function is very close to .75.
 
@@ -142,7 +140,7 @@ let(a=random(collection1, q="*:*", rows="30000", fl="price_f"),
     d=cumulativeProbability(c, .75))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -171,7 +169,7 @@ Below is an example which calculates the probability
 of a discrete value within a Poisson distribution.
 
 In the example a Poisson distribution function is created
-with a mean of 100. Then the
+with a mean of `100`. Then the
 probability of encountering a sample of the discrete value 101 is calculated for this
 specific distribution.
 
@@ -181,7 +179,7 @@ let(a=poissonDistribution(100),
     b=probability(a, 101))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -200,12 +198,10 @@ When this expression is sent to the /stream handler it responds with:
 }
 ----
 
-Below is an example of a probability calculation
-using an enumerated distribution.
+Below is an example of a probability calculation using an enumerated distribution.
 
 In the example an enumerated distribution is created from a random
-sample taken from the *day_i* field, which was created
-using a uniform integer distribution between 0 and 30.
+sample taken from the `day_i` field, which was created using a uniform integer distribution between 0 and 30.
 
 The probability of the discrete value 10 is then calculated.
 
@@ -217,7 +213,7 @@ let(a=random(collection1, q="*:*", rows="30000", fl="day_i"),
     d=probability(c, 10))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -239,11 +235,9 @@ When this expression is sent to the /stream handler it responds with:
 === Sampling
 
 All probability distributions support sampling. The `sample`
-function returns 1 or more random samples from a probability
-distribution.
+function returns 1 or more random samples from a probability distribution.
 
-Below is an example drawing a single sample from
-a normal distribution.
+Below is an example drawing a single sample from a normal distribution.
 
 [source,text]
 ----
@@ -251,7 +245,7 @@ let(a=normalDistribution(10, 5),
     b=sample(a))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -270,8 +264,7 @@ When this expression is sent to the /stream handler it responds with:
 }
 ----
 
-Below is an example drawing 10 samples from a normal
-distribution.
+Below is an example drawing 10 samples from a normal distribution.
 
 [source,text]
 ----
@@ -279,7 +272,7 @@ let(a=normalDistribution(10, 5),
     b=sample(a, 10))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -315,14 +308,14 @@ The multivariate normal distribution is a generalization of the
 univariate normal distribution to higher dimensions.
 
 The multivariate normal distribution models two or more random
-variables that are normally distributed. The relationship between
-the variables is defined by a covariance matrix.
+variables that are normally distributed. The relationship between the variables is defined by a covariance matrix.
 
 ==== Sampling
 
 The `sample` function can be used to draw samples
 from a multivariate normal distribution in much the same
 way as a univariate normal distribution.
+
 The difference is that each sample will be an array containing a sample
 drawn from each of the underlying normal distributions.
 If multiple samples are drawn, the `sample` function returns a matrix with a
@@ -333,33 +326,25 @@ multivariate normal distribution.
 The example below demonstrates how to initialize and draw samples
 from a multivariate normal distribution.
 
-In this example 5000 random samples are selected from a collection
-of log records. Each sample contains
-the fields *filesize_d* and *response_d*. The values of both fields conform
-to a normal distribution.
+In this example 5000 random samples are selected from a collection of log records. Each sample contains
+the fields `filesize_d` and `response_d`. The values of both fields conform to a normal distribution.
 
-Both fields are then vectorized. The *filesize_d* vector is stored in
-variable *b* and the *response_d* variable is stored in variable *c*.
+Both fields are then vectorized. The `filesize_d` vector is stored in
+variable *`b`* and the `response_d` variable is stored in variable *`c`*.
 
-An array is created that contains the *means* of the two vectorized fields.
+An array is created that contains the means of the two vectorized fields.
 
 Then both vectors are added to a matrix which is transposed. This creates
-an *observation* matrix where each row contains one observation of
-*filesize_d* and *response_d*. A covariance matrix is then created from the columns of
-the observation matrix with the
-`cov` function. The covariance matrix describes the covariance between
-*filesize_d* and *response_d*.
+an observation matrix where each row contains one observation of
+`filesize_d` and `response_d`. A covariance matrix is then created from the columns of
+the observation matrix with the `cov` function. The covariance matrix describes the covariance between
+`filesize_d` and `response_d`.
 
 The `multivariateNormalDistribution` function is then called with the
 array of means for the two fields and the covariance matrix. The model for the
-multivariate normal distribution is assigned to variable *g*.
+multivariate normal distribution is assigned to variable *`g`*.
 
-Finally five samples are drawn from the multivariate normal distribution. The samples
-are returned as a matrix, with each row representing one sample. There are two
-columns in the matrix. The first column contains samples for *filesize_d* and the second
-column contains samples for *response_d*. Over the long term the covariance between
-the columns will conform to the covariance matrix used to instantiate the
-multivariate normal distribution.
+Finally five samples are drawn from the multivariate normal distribution.
 
 [source,text]
 ----
@@ -373,7 +358,11 @@ let(a=random(collection2, q="*:*", rows="5000", fl="filesize_d, response_d"),
     h=sample(g, 5))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+The samples are returned as a matrix, with each row representing one sample. There are two
+columns in the matrix. The first column contains samples for `filesize_d` and the second
+column contains samples for `response_d`. Over the long term the covariance between
+the columns will conform to the covariance matrix used to instantiate the
+multivariate normal distribution.
 
 [source,json]
 ----
@@ -412,4 +401,3 @@ When this expression is sent to the /stream handler it responds with:
   }
 }
 ----
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a619038e/solr/solr-ref-guide/src/regression.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/regression.adoc b/solr/solr-ref-guide/src/regression.adoc
index b57c62b..4ec23c3 100644
--- a/solr/solr-ref-guide/src/regression.adoc
+++ b/solr/solr-ref-guide/src/regression.adoc
@@ -16,28 +16,23 @@
 // specific language governing permissions and limitations
 // under the License.
 
-
-This section of the math expressions user guide covers simple and multivariate linear regression.
-
+The math expressions library supports simple and multivariate linear regression.
 
 == Simple Linear Regression
 
 The `regress` function is used to build a linear regression model
 between two random variables. Sample observations are provided with two
-numeric arrays. The first numeric array is the *independent variable* and
-the second array is the *dependent variable*.
+numeric arrays. The first numeric array is the independent variable and
+the second array is the dependent variable.
 
 In the example below the `random` function selects 5000 random samples each containing
-the fields *filesize_d* and *response_d*. The two fields are vectorized
-and stored in variables *b* and *c*. Then the `regress` function performs a regression
+the fields `filesize_d` and `response_d`. The two fields are vectorized
+and stored in variables *`b`* and *`c`*. Then the `regress` function performs a regression
 analysis on the two numeric arrays.
 
 The `regress` function returns a single tuple with the results of the regression
 analysis.
 
-Note that in this regression analysis the value of *RSquared* is *.75*. This means that changes in
-*filesize_d* explain 75% of the variability of the *response_d* variable.
-
 [source,text]
 ----
 let(a=random(collection2, q="*:*", rows="5000", fl="filesize_d, response_d"),
@@ -46,7 +41,8 @@ let(a=random(collection2, q="*:*", rows="5000", fl="filesize_d, response_d"),
     d=regress(b, c))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+Note that in this regression analysis the value of `RSquared` is `.75`. This means that changes in
+`filesize_d` explain 75% of the variability of the `response_d` variable:
 
 [source,json]
 ----
@@ -81,11 +77,10 @@ When this expression is sent to the /stream handler it responds with:
 
 The `predict` function uses the regression model to make predictions.
 Using the example above the regression model can be used to predict the value
-of *response_d* given a value for *filesize_d*.
+of `response_d` given a value for `filesize_d`.
 
 In the example below the `predict` function uses the regression analysis to predict
-the value of *response_d* for the *filesize_d* value of 40000.
-
+the value of `response_d` for the `filesize_d` value of `40000`.
 
 [source,text]
 ----
@@ -96,7 +91,7 @@ let(a=random(collection2, q="*:*", rows="5000", fl="filesize_d, response_d"),
     e=predict(d, 40000))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -131,7 +126,7 @@ let(a=random(collection2, q="*:*", rows="5000", fl="filesize_d, response_d"),
     e=predict(d, b))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -169,9 +164,9 @@ The difference between the observed value and the predicted value is known as th
 residual. There isn't a specific function to calculate the residuals but vector
 math can used to perform the calculation.
 
-In the example below the predictions are stored in variable *e*. The `ebeSubtract`
+In the example below the predictions are stored in variable *`e`*. The `ebeSubtract`
 function is then used to subtract the predictions
-from the actual *response_d* values stored in variable *c*. Variable *f* contains
+from the actual `response_d` values stored in variable *`c`*. Variable *`f`* contains
 the array of residuals.
 
 [source,text]
@@ -184,7 +179,7 @@ let(a=random(collection2, q="*:*", rows="5000", fl="filesize_d, response_d"),
     f=ebeSubtract(c, e))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -221,20 +216,17 @@ When this expression is sent to the /stream handler it responds with:
 == Multivariate Linear Regression
 
 The `olsRegress` function performs a multivariate linear regression analysis. Multivariate linear
-regression models the linear relationship between two or more *independent* variables and a *dependent* variable.
+regression models the linear relationship between two or more independent variables and a dependent variable.
 
 The example below extends the simple linear regression example by introducing a new independent variable
-called *service_d*. The *service_d* variable is the service level of the request and it can range from 1 to 4
+called `service_d`. The `service_d` variable is the service level of the request and it can range from 1 to 4
 in the data-set. The higher the service level, the higher the bandwidth available for the request.
 
-Notice that the two independent variables *filesize_d* and *service_d* are vectorized and stored
-in the variables *b* and *c*. The variables *b* and *c* are then added as rows to a `matrix`. The matrix is
-then transposed so that each row in the matrix represents one observation with *filesize_d* and *service_d*.
+Notice that the two independent variables `filesize_d` and `service_d` are vectorized and stored
+in the variables *`b`* and *`c`*. The variables *`b`* and *`c`* are then added as rows to a `matrix`. The matrix is
+then transposed so that each row in the matrix represents one observation with `filesize_d` and `service_d`.
 The `olsRegress` function then performs the multivariate regression analysis using the observation matrix as the
-independent variables and the *response_d* values, stored in variable *d*, as the dependent variable.
-
-Notice that the RSquared of the regression analysis is 1. This means that linear relationship between
-*filesize_d* and *service_d* describe 100% of the variability of the *response_d* variable.
+independent variables and the `response_d` values, stored in variable *`d`*, as the dependent variable.
 
 [source,text]
 ----
@@ -246,7 +238,8 @@ let(a=random(collection2, q="*:*", rows="30000", fl="filesize_d, service_d, resp
     f=olsRegress(e, d))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+Notice in the response that the RSquared of the regression analysis is 1. This means that linear relationship between
+`filesize_d` and `service_d` describe 100% of the variability of the `response_d` variable:
 
 [source,json]
 ----
@@ -299,10 +292,11 @@ When this expression is sent to the /stream handler it responds with:
 
 === Prediction
 
-The `predict` function can also be used to make predictions for multivariate linear regression. Below is an example
-of a single prediction using the multivariate linear regression model and a single observation. The observation
-is an array that matches the structure of the observation matrix used to build the model. In this case
-the first value represent a *filesize_d* of 40000 and the second value represents a *service_d* of 4.
+The `predict` function can also be used to make predictions for multivariate linear regression.
+
+Below is an example of a single prediction using the multivariate linear regression model and a single observation.
+The observation is an array that matches the structure of the observation matrix used to build the model. In this case
+the first value represents a `filesize_d` of `40000` and the second value represents a `service_d` of `4`.
 
 [source,text]
 ----
@@ -315,7 +309,7 @@ let(a=random(collection2, q="*:*", rows="5000", fl="filesize_d, service_d, respo
     g=predict(f, array(40000, 4)))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -335,9 +329,10 @@ When this expression is sent to the /stream handler it responds with:
 ----
 
 The `predict` function can also make predictions for more than one multivariate observation. In this scenario
-an observation matrix used. In the example below the observation matrix used to build the multivariate regression model
-is passed to the `predict` function and it returns an array of predictions.
+an observation matrix used.
 
+In the example below the observation matrix used to build the multivariate regression model
+is passed to the `predict` function and it returns an array of predictions.
 
 [source,text]
 ----
@@ -350,7 +345,7 @@ let(a=random(collection2, q="*:*", rows="5000", fl="filesize_d, service_d, respo
     g=predict(f, e))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -388,7 +383,7 @@ Once the predictions are generated the residuals can be calculated using the sam
 simple linear regression.
 
 Below is an example of the residuals calculation following a multivariate linear regression. In the example
-the predictions stored variable *g* are subtracted from observed values stored in variable *d*.
+the predictions stored variable *`g`* are subtracted from observed values stored in variable *`d`*.
 
 [source,text]
 ----
@@ -402,7 +397,7 @@ let(a=random(collection2, q="*:*", rows="5000", fl="filesize_d, service_d, respo
     h=ebeSubtract(d, g))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -433,7 +428,3 @@ When this expression is sent to the /stream handler it responds with:
   }
 }
 ----
-
-
-
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a619038e/solr/solr-ref-guide/src/scalar-math.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/scalar-math.adoc b/solr/solr-ref-guide/src/scalar-math.adoc
index 07b1eb5..b602279 100644
--- a/solr/solr-ref-guide/src/scalar-math.adoc
+++ b/solr/solr-ref-guide/src/scalar-math.adoc
@@ -26,7 +26,7 @@ For example the expression below adds two numbers together:
 add(1, 1)
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -98,7 +98,7 @@ select(search(collection2, q="*:*", fl="price_f", sort="price_f desc", rows="3")
        mult(price_f, 10) as newPrice)
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a619038e/solr/solr-ref-guide/src/simulations.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/simulations.adoc b/solr/solr-ref-guide/src/simulations.adoc
index 97d9ec5..14d7c6a 100644
--- a/solr/solr-ref-guide/src/simulations.adoc
+++ b/solr/solr-ref-guide/src/simulations.adoc
@@ -18,59 +18,59 @@
 
 
 Monte Carlo simulations are commonly used to model the behavior of
-stochastic systems. This section of the user guide describes
-how to perform both *uncorrelated* and *correlated* Monte Carlo simulations
-using the *sampling* capabilities of the probability distribution framework.
+stochastic systems. This section describes
+how to perform both uncorrelated and correlated Monte Carlo simulations
+using the sampling capabilities of the probability distribution framework.
 
 == Uncorrelated Simulations
 
 Uncorrelated Monte Carlo simulations model stochastic systems with the assumption
- that the underlying random variables move independently of each other.
- A simple example of a Monte Carlo simulation using two independently changing random variables
- is described below.
+that the underlying random variables move independently of each other.
+A simple example of a Monte Carlo simulation using two independently changing random variables
+is described below.
 
 In this example a Monte Carlo simulation is used to determine the probability that a simple hinge assembly will
 fall within a required length specification.
 
-The hinge has two components *A* and *B*. The combined length of the two components must be less then 5 centimeters
+The hinge has two components A and B. The combined length of the two components must be less then 5 centimeters
 to fall within specification.
 
-A random sampling of lengths for component *A* has shown that its length conforms to a
+A random sampling of lengths for component A has shown that its length conforms to a
 normal distribution with a mean of 2.2 centimeters and a standard deviation of .0195
 centimeters.
 
-A random sampling of lengths for component *B* has shown that its length conforms
+A random sampling of lengths for component B has shown that its length conforms
 to a normal distribution with a mean of 2.71 centimeters and a standard deviation of .0198 centimeters.
 
-The Monte Carlo simulation below performs the following steps:
-
-* A normal distribution with a mean of 2.2 and a standard deviation of .0195 is created to model the length of componentA.
-* A normal distribution with a mean of 2.71 and a standard deviation of .0198 is created to model the length of componentB.
-* The `monteCarlo` function samples from the componentA and componentB distributions and sets the values to variables sampleA and sampleB. It then
-  calls the *add(sampleA, sampleB)* function to find the combined lengths of the samples. The `monteCarlo` function runs a set number of times, 100000 in the example below, and collects the results in an array. Each
-  time the function is called new samples are drawn from the componentA
-  and componentB distributions. On each run, the `add` function adds the two samples to calculate the combined length.
-  The result of each run is collected in an array and assigned to the *simresults* variable.
-* An `empiricalDistribution` function is then created from the *simresults* array to model the distribution of the
-  simulation results.
-* Finally, the `cumulativeProbability` function is called on the *simmodel* to determine the cumulative probability
-  that the combined length of the components is 5 or less.
-* Based on the simulation there is .9994371944629039 probability that the combined length of a component pair will
-be 5 or less.
-
 [source,text]
 ----
-let(componentA=normalDistribution(2.2,  .0195),
-    componentB=normalDistribution(2.71, .0198),
-    simresults=monteCarlo(sampleA=sample(componentA),
+let(componentA=normalDistribution(2.2, .0195),  <1>
+    componentB=normalDistribution(2.71, .0198),  <2>
+    simresults=monteCarlo(sampleA=sample(componentA),  <3>
                           sampleB=sample(componentB),
-                          add(sampleA, sampleB),
-                          100000),
-    simmodel=empiricalDistribution(simresults),
-    prob=cumulativeProbability(simmodel,  5))
+                          add(sampleA, sampleB),  <4>
+                          100000),  <5>
+    simmodel=empiricalDistribution(simresults),  <6>
+    prob=cumulativeProbability(simmodel,  5))  <7>
 ----
 
-When this expression is sent to the /stream handler it responds with:
+The Monte Carlo simulation below performs the following steps:
+
+<1> A normal distribution with a mean of 2.2 and a standard deviation of .0195 is created to model the length of `componentA`.
+<2> A normal distribution with a mean of 2.71 and a standard deviation of .0198 is created to model the length of `componentB`.
+<3> The `monteCarlo` function samples from the `componentA` and `componentB` distributions and sets the values to variables `sampleA` and `sampleB`.
+<4> It then calls the `add(sampleA, sampleB)`* function to find the combined lengths of the samples.
+<5> The `monteCarlo` function runs a set number of times, 100000, and collects the results in an array. Each
+  time the function is called new samples are drawn from the `componentA`
+  and `componentB` distributions. On each run, the `add` function adds the two samples to calculate the combined length.
+  The result of each run is collected in an array and assigned to the `simresults` variable.
+<6> An `empiricalDistribution` function is then created from the `simresults` array to model the distribution of the
+  simulation results.
+<7> Finally, the `cumulativeProbability` function is called on the `simmodel` to determine the cumulative probability
+  that the combined length of the components is 5 or less.
+
+Based on the simulation there is .9994371944629039 probability that the combined length of a component pair will
+be 5 or less:
 
 [source,json]
 ----
@@ -91,36 +91,32 @@ When this expression is sent to the /stream handler it responds with:
 
 == Correlated Simulations
 
-The simulation above assumes that the lengths of *componentA* and *componentB* vary independently.
+The simulation above assumes that the lengths of `componentA` and `componentB` vary independently.
 What would happen to the probability model if there was a correlation between the lengths of
-*componentA* and *componentB*.
+`componentA` and `componentB`?
 
 In the example below a database containing assembled pairs of components is used to determine
 if there is a correlation between the lengths of the components, and how the correlation effects the model.
 
 Before performing a simulation of the effects of correlation on the probability model its
-useful to understand what the correlation is between the lengths of *componentA* and *componentB*.
-
-In the example below 5000 random samples are selected from a collection
-of assembled hinges. Each sample contains
-lengths of the components in the fields *componentA_d* and *componentB_d*.
-
-Both fields are then vectorized. The *componentA_d* vector is stored in
-variable *b* and the *componentB_d* variable is stored in variable *c*.
-
-Then the correlation of the two vectors is calculated using the `corr` function. Note that the outcome
-from `corr` is 0.9996931313216989. This means that *componentA_d* and *componentB_d* are almost
-perfectly correlated.
+useful to understand what the correlation is between the lengths of `componentA` and `componentB`.
 
 [source,text]
 ----
-let(a=random(collection5, q="*:*", rows="5000", fl="componentA_d, componentB_d"),
-    b=col(a, componentA_d)),
+let(a=random(collection5, q="*:*", rows="5000", fl="componentA_d, componentB_d"), <1>
+    b=col(a, componentA_d)), <2>
     c=col(a, componentB_d)),
-    d=corr(b, c))
+    d=corr(b, c))  <3>
 ----
 
-When this expression is sent to the /stream handler it responds with:
+<1> In the example, 5000 random samples are selected from a collection of assembled hinges.
+Each sample contains lengths of the components in the fields `componentA_d` and `componentB_d`.
+<2> Both fields are then vectorized. The *componentA_d* vector is stored in
+variable *`b`* and the *componentB_d* variable is stored in variable *`c`*.
+<3> Then the correlation of the two vectors is calculated using the `corr` function.
+
+Note from the result that the outcome from `corr` is 0.9996931313216989.
+This means that `componentA_d` and *`componentB_d` are almost perfectly correlated.
 
 [source,json]
 ----
@@ -139,35 +135,34 @@ When this expression is sent to the /stream handler it responds with:
 }
 ----
 
-How does correlation effect the probability model?
+=== Correlation Effects on the Probability Model
 
-The example below explores how to use a *multivariate normal distribution* function
+The example below explores how to use a multivariate normal distribution function
 to model how correlation effects the probability of hinge defects.
 
 In this example 5000 random samples are selected from a collection
 containing length data for assembled hinges. Each sample contains
-the fields *componentA_d* and *componentB_d*.
+the fields `componentA_d` and `componentB_d`.
 
-Both fields are then vectorized. The *componentA_d* vector is stored in
-variable *b* and the *componentB_d* variable is stored in variable *c*.
+Both fields are then vectorized. The `componentA_d` vector is stored in
+variable *`b`* and the `componentB_d` variable is stored in variable *`c`*.
 
-An array is created that contains the *means* of the two vectorized fields.
+An array is created that contains the means of the two vectorized fields.
 
 Then both vectors are added to a matrix which is transposed. This creates
-an *observation* matrix where each row contains one observation of
-*componentA_d* and *componentB_d*. A covariance matrix is then created from the columns of
+an observation matrix where each row contains one observation of
+`componentA_d` and `componentB_d`. A covariance matrix is then created from the columns of
 the observation matrix with the
-`cov` function. The covariance matrix describes the covariance between
-*componentA_d* and *componentB_d*.
+`cov` function. The covariance matrix describes the covariance between `componentA_d` and `componentB_d`.
 
 The `multivariateNormalDistribution` function is then called with the
 array of means for the two fields and the covariance matrix. The model
-for the multivariate normal distribution is stored in variable *g*.
+for the multivariate normal distribution is stored in variable *`g`*.
 
-The `monteCarlo` function then calls the function *add(sample(g))* 50000 times
+The `monteCarlo` function then calls the function `add(sample(g))` 50000 times
 and collections the results in a vector. Each time the function is called a single sample
 is drawn from the multivariate normal distribution. Each sample is a vector containing
-one *componentA* and *componentB* pair. the `add` function adds the values in the vector to
+one `componentA` and `componentB` pair. The `add` function adds the values in the vector to
 calculate the length of the pair. Over the long term the samples drawn from the
 multivariate normal distribution will conform to the covariance matrix used to construct it.
 
@@ -195,7 +190,7 @@ let(a=random(hinges, q="*:*", rows="5000", fl="componentA_d, componentB_d"),
     j=cumulativeProbability(i, 5))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a619038e/solr/solr-ref-guide/src/statistics.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/statistics.adoc b/solr/solr-ref-guide/src/statistics.adoc
index 7d3ea94..af908d3 100644
--- a/solr/solr-ref-guide/src/statistics.adoc
+++ b/solr/solr-ref-guide/src/statistics.adoc
@@ -37,7 +37,7 @@ let(a=random(collection1, q="*:*", rows="1500", fl="price_f"),
     c=describe(b))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -90,7 +90,7 @@ let(a=random(collection1, q="*:*", rows="15000", fl="price_f"),
     c=hist(b, 5))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -179,7 +179,7 @@ let(a=random(collection1, q="*:*", rows="15000", fl="price_f"),
      d=col(c, N))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -228,7 +228,7 @@ let(a=random(collection1, q="*:*", rows="15000", fl="day_i"),
      c=freqTable(b))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -302,7 +302,7 @@ let(a=random(collection1, q="*:*", rows="15000", fl="price_f"),
      c=percentile(b, 95))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -344,7 +344,7 @@ let(a=array(1, 2, 3, 4, 5),
     c=cov(a, b))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -380,7 +380,7 @@ let(a=array(1, 2, 3, 4, 5),
      e=cov(d))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -437,7 +437,7 @@ let(a=array(1, 2, 3, 4, 5),
     c=corr(a, b, type=spearmans))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -504,7 +504,7 @@ let(a=random(collection1, q="*:*", rows="1500", fl="price_f"),
     e=ttest(c, d))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -552,7 +552,7 @@ let(a=random(collection1, q="*:*", rows="1500", fl="price_f"),
     e=ttest(c, d))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -588,7 +588,7 @@ let(a=array(1,2,3),
     b=zscores(a))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a619038e/solr/solr-ref-guide/src/stream-source-reference.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/stream-source-reference.adoc b/solr/solr-ref-guide/src/stream-source-reference.adoc
index e9185e0..042463c 100644
--- a/solr/solr-ref-guide/src/stream-source-reference.adoc
+++ b/solr/solr-ref-guide/src/stream-source-reference.adoc
@@ -216,8 +216,8 @@ The `nodes` function provides breadth-first graph traversal. For details, see th
 
 == knnSearch
 
-The `knnSearch` function returns the K nearest neighbors for a document based on text similarity. Under the covers the `knnSearch` function
-use the More Like This query parser plugin.
+The `knnSearch` function returns the k-nearest neighbors for a document based on text similarity. Under the covers the `knnSearch` function
+uses the More Like This query parser plugin.
 
 === knnSearch Parameters
 


[21/43] lucene-solr:jira/http2: Revert "Add version 7.5.1"

Posted by da...@apache.org.
Revert "Add version 7.5.1"

This reverts commit 2ffcb878b4b79a6205888653f6965807b38d4669.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/1f2b344d
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/1f2b344d
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/1f2b344d

Branch: refs/heads/jira/http2
Commit: 1f2b344def296dd4ca15999e9177607d9a1abc69
Parents: 8fbeedf
Author: Jim Ferenczi <ji...@apache.org>
Authored: Mon Sep 10 15:55:47 2018 +0200
Committer: Jim Ferenczi <ji...@apache.org>
Committed: Mon Sep 10 15:56:05 2018 +0200

----------------------------------------------------------------------
 lucene/CHANGES.txt                                  |  3 ---
 .../src/java/org/apache/lucene/util/Version.java    |  7 -------
 solr/CHANGES.txt                                    | 16 ----------------
 3 files changed, 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1f2b344d/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index bcd9533..846554e 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -160,9 +160,6 @@ Optimizations
 * LUCENE-8448: Boolean queries now propagates the mininum score to their sub-scorers.
   (Jim Ferenczi, Adrien Grand)
 
-======================= Lucene 7.5.1 =======================
-(No Changes)
-
 ======================= Lucene 7.5.0 =======================
 
 API Changes:

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1f2b344d/lucene/core/src/java/org/apache/lucene/util/Version.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/util/Version.java b/lucene/core/src/java/org/apache/lucene/util/Version.java
index 242b4ae..80368da 100644
--- a/lucene/core/src/java/org/apache/lucene/util/Version.java
+++ b/lucene/core/src/java/org/apache/lucene/util/Version.java
@@ -97,13 +97,6 @@ public final class Version {
   public static final Version LUCENE_7_5_0 = new Version(7, 5, 0);
 
   /**
-   * Match settings and bugs in Lucene's 7.5.1 release.
-   * @deprecated Use latest
-   */
-  @Deprecated
-  public static final Version LUCENE_7_5_1 = new Version(7, 5, 1);
-
-  /**
    * Match settings and bugs in Lucene's 8.0.0 release.
    * <p>
    * Use this to get the latest &amp; greatest settings, bug

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1f2b344d/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index f5fca60..797acfc 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -71,22 +71,6 @@ Other Changes
   java.time.DateTimeFormatter instead of Joda time (see upgrade notes).  "Lenient" is enabled.  Removed Joda Time dependency.
   (David Smiley, Bar Rotstein)
 
-==================  7.5.1 ==================
-
-Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
-
-Versions of Major Components
----------------------
-Apache Tika 1.18
-Carrot2 3.16.0
-Velocity 1.7 and Velocity Tools 2.0
-Apache ZooKeeper 3.4.11
-Jetty 9.4.11.v20180605
-
-
-(No Changes)
-
-
 ==================  7.5.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.


[03/43] lucene-solr:jira/http2: Merge remote-tracking branch 'upstream/master' into LUCENE-8343

Posted by da...@apache.org.
Merge remote-tracking branch 'upstream/master' into LUCENE-8343


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/cef9a228
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/cef9a228
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/cef9a228

Branch: refs/heads/jira/http2
Commit: cef9a2283e30a297b3add8e73ee6dba9492dcc57
Parents: 17cfa63 6e88035
Author: Alessandro Benedetti <a....@sease.io>
Authored: Thu Jun 7 16:50:58 2018 +0100
Committer: Alessandro Benedetti <a....@sease.io>
Committed: Thu Jun 7 16:50:58 2018 +0100

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |  18 +
 .../lucene/analysis/hunspell/Dictionary.java    |   8 +-
 .../miscellaneous/ConcatenateGraphFilter.java   | 375 ++++++++++++++++++
 .../ConcatenateGraphFilterFactory.java          |  70 ++++
 .../miscellaneous/FingerprintFilter.java        |   4 +-
 .../miscellaneous/WordDelimiterFilter.java      |   7 +-
 .../analysis/ngram/EdgeNGramFilterFactory.java  |  10 +-
 .../analysis/ngram/EdgeNGramTokenFilter.java    |  85 ++--
 .../analysis/ngram/NGramFilterFactory.java      |  10 +-
 .../lucene/analysis/ngram/NGramTokenFilter.java |  93 +++--
 .../lucene/analysis/synonym/SynonymFilter.java  |   4 +-
 .../analysis/synonym/WordnetSynonymParser.java  |   7 +-
 ...ache.lucene.analysis.util.TokenFilterFactory |   1 +
 .../analysis/core/TestBugInSomething.java       |   2 +-
 .../lucene/analysis/core/TestRandomChains.java  |  12 +-
 .../TestConcatenateGraphFilter.java             | 168 ++++++++
 .../TestConcatenateGraphFilterFactory.java      |  83 ++++
 .../miscellaneous/TestFingerprintFilter.java    |   9 +
 .../ngram/EdgeNGramTokenFilterTest.java         | 122 ++++--
 .../analysis/ngram/NGramTokenFilterTest.java    | 118 ++++--
 .../analysis/ngram/NGramTokenizerTest.java      |   3 +-
 .../lucene/analysis/ngram/TestNGramFilters.java |  18 +-
 .../classification/BM25NBClassifierTest.java    |   2 +-
 .../CachingNaiveBayesClassifierTest.java        |   2 +-
 .../SimpleNaiveBayesClassifierTest.java         |   2 +-
 .../codecs/blockterms/BlockTermsWriter.java     |  10 +-
 .../codecs/simpletext/SimpleTextBKDWriter.java  |  10 +-
 .../simpletext/SimpleTextFieldInfosFormat.java  |  11 +-
 .../simpletext/SimpleTextSegmentInfoFormat.java |   3 +-
 .../CompressingStoredFieldsIndexReader.java     |  25 +-
 .../CompressingStoredFieldsWriter.java          |   5 +-
 .../CompressingTermVectorsWriter.java           |   8 +-
 .../lucene50/Lucene50FieldInfosFormat.java      |   2 +-
 .../lucene60/Lucene60FieldInfosFormat.java      |   8 +-
 .../lucene/index/BinaryDocValuesWriter.java     |   5 +
 .../lucene/index/BufferedUpdatesStream.java     |   2 +-
 .../org/apache/lucene/index/CheckIndex.java     |  27 +-
 .../lucene/index/DefaultIndexingChain.java      |  16 +
 .../org/apache/lucene/index/DocConsumer.java    |   9 +
 .../apache/lucene/index/DocValuesWriter.java    |   3 +
 .../lucene/index/DocumentsWriterPerThread.java  |  19 +-
 .../java/org/apache/lucene/index/FieldInfo.java |  18 +-
 .../org/apache/lucene/index/FieldInfos.java     |  51 ++-
 .../org/apache/lucene/index/IndexWriter.java    |  47 ++-
 .../org/apache/lucene/index/MultiFields.java    |   6 +-
 .../lucene/index/NumericDocValuesWriter.java    |   5 +
 .../apache/lucene/index/ParallelLeafReader.java |   7 +-
 .../org/apache/lucene/index/PendingDeletes.java |   8 +-
 .../apache/lucene/index/PendingSoftDeletes.java |  42 +-
 .../org/apache/lucene/index/ReaderPool.java     |   4 +-
 .../apache/lucene/index/ReadersAndUpdates.java  |   6 -
 .../apache/lucene/index/SegmentCommitInfo.java  |  34 +-
 .../org/apache/lucene/index/SegmentInfos.java   |  19 +-
 .../org/apache/lucene/index/SegmentMerger.java  |   1 +
 .../apache/lucene/index/SegmentWriteState.java  |   4 +-
 .../SoftDeletesDirectoryReaderWrapper.java      |  68 +++-
 .../index/SoftDeletesRetentionMergePolicy.java  |   2 +-
 .../lucene/index/SortedDocValuesWriter.java     |   5 +
 .../index/SortedNumericDocValuesWriter.java     |   5 +
 .../lucene/index/SortedSetDocValuesWriter.java  |   6 +-
 .../apache/lucene/index/SortingLeafReader.java  |   6 +-
 .../apache/lucene/search/BlendedTermQuery.java  |   8 +-
 .../apache/lucene/search/CachingCollector.java  |   9 +-
 .../org/apache/lucene/search/MaxScoreCache.java |   2 +-
 .../org/apache/lucene/search/PhraseQuery.java   |   2 +-
 .../apache/lucene/search/PointInSetQuery.java   |   2 +-
 .../apache/lucene/search/PointRangeQuery.java   |   5 +-
 .../apache/lucene/search/spans/SpanWeight.java  |   4 +-
 .../java/org/apache/lucene/util/ArrayUtil.java  | 200 +++++++++-
 .../java/org/apache/lucene/util/BytesRef.java   |   9 +-
 .../org/apache/lucene/util/BytesRefBuilder.java |   4 +-
 .../java/org/apache/lucene/util/CharsRef.java   |   3 +-
 .../org/apache/lucene/util/CharsRefBuilder.java |   4 +-
 .../org/apache/lucene/util/DocIdSetBuilder.java |   5 +-
 .../java/org/apache/lucene/util/IntsRef.java    |   4 +-
 .../java/org/apache/lucene/util/LongsRef.java   |   4 +-
 .../java/org/apache/lucene/util/PagedBytes.java |   7 +-
 .../org/apache/lucene/util/QueryBuilder.java    |   4 +-
 .../org/apache/lucene/util/RoaringDocIdSet.java |   3 +-
 .../apache/lucene/util/SparseFixedBitSet.java   |   3 +-
 .../org/apache/lucene/util/StringHelper.java    |   2 +-
 .../java/org/apache/lucene/util/Version.java    |   7 +
 .../automaton/DaciukMihovAutomatonBuilder.java  |   4 +-
 .../org/apache/lucene/util/bkd/BKDWriter.java   |  10 +-
 .../apache/lucene/util/bkd/HeapPointWriter.java |   7 +-
 .../util/packed/DeltaPackedLongValues.java      |   9 +-
 .../lucene/util/packed/MonotonicLongValues.java |  11 +-
 .../lucene/util/packed/PackedLongValues.java    |   6 +-
 .../lucene/analysis/TestCharacterUtils.java     |   4 +-
 .../AbstractTestCompressionMode.java            |  11 +-
 .../lucene/codecs/lucene50/TestForUtil.java     |  14 +-
 .../org/apache/lucene/index/TestAddIndexes.java |  46 +++
 .../org/apache/lucene/index/TestCodecs.java     |   4 +-
 .../test/org/apache/lucene/index/TestDoc.java   |   4 +-
 .../apache/lucene/index/TestFieldsReader.java   |   2 +-
 .../apache/lucene/index/TestIndexWriter.java    | 126 +++++-
 .../index/TestIndexWriterThreadsToSegments.java |   2 +-
 .../index/TestOneMergeWrappingMergePolicy.java  |   2 +-
 .../apache/lucene/index/TestPendingDeletes.java |   6 +-
 .../lucene/index/TestPendingSoftDeletes.java    |  75 +++-
 .../lucene/index/TestPerSegmentDeletes.java     |   3 +-
 .../apache/lucene/index/TestSegmentInfos.java   |   6 +-
 .../apache/lucene/index/TestSegmentMerger.java  |   4 +-
 .../TestSoftDeletesDirectoryReaderWrapper.java  |   3 +-
 .../org/apache/lucene/search/TestBoolean2.java  |   4 +-
 .../lucene/search/TestDoubleValuesSource.java   |   3 +-
 .../lucene/search/TestLongValuesSource.java     |   3 +-
 .../apache/lucene/search/TestPhraseQuery.java   |   3 +-
 .../TestSimpleExplanationsWithFillerDocs.java   |   5 +-
 .../apache/lucene/util/BaseSortTestCase.java    |   4 +-
 .../lucene/util/StressRamUsageEstimator.java    |   5 +-
 .../org/apache/lucene/util/TestArrayUtil.java   |  87 ++++
 .../org/apache/lucene/util/TestBytesRef.java    |   8 +
 .../org/apache/lucene/util/TestCharsRef.java    |   8 +
 .../org/apache/lucene/util/TestIntsRef.java     |   8 +
 .../apache/lucene/util/TestLSBRadixSorter.java  |   4 +-
 .../org/apache/lucene/util/TestLongsRef.java    |  47 +++
 .../apache/lucene/util/TestMSBRadixSorter.java  |   4 +-
 .../lucene/util/TestStringMSBRadixSorter.java   |   4 +-
 .../lucene/util/packed/TestPackedInts.java      |   5 +-
 .../lucene/expressions/TestExpressionSorts.java |   3 +-
 .../search/highlight/TermVectorLeafReader.java  |   2 +-
 lucene/ivy-versions.properties                  |   2 +-
 .../apache/lucene/index/memory/MemoryIndex.java |   4 +-
 .../org/apache/lucene/index/IndexSplitter.java  |   2 +-
 .../lucene/search/intervals/IntervalQuery.java  |   4 +-
 .../lucene/document/TestHalfFloatPoint.java     |   2 +-
 .../spatial/prefix/tree/QuadPrefixTree.java     |   4 +-
 .../spatial/spatial4j/Geo3dShapeFactory.java    |  10 +-
 .../lucene/spatial/DistanceStrategyTest.java    |   4 +-
 .../lucene/spatial/spatial4j/Geo3dRptTest.java  |  23 +-
 .../analyzing/AnalyzingInfixSuggester.java      |   2 +-
 .../suggest/document/CompletionAnalyzer.java    |  21 +-
 .../suggest/document/CompletionQuery.java       |   2 +-
 .../suggest/document/CompletionTokenStream.java | 297 +-------------
 .../search/suggest/document/ContextQuery.java   |   5 +-
 .../suggest/document/ContextSuggestField.java   |   1 +
 .../suggest/document/FuzzyCompletionQuery.java  |   7 +-
 .../suggest/document/NRTSuggesterBuilder.java   |   3 +-
 .../suggest/document/PrefixCompletionQuery.java |   5 +-
 .../search/suggest/document/SuggestField.java   |   3 +-
 .../document/CompletionTokenStreamTest.java     | 177 ---------
 .../suggest/document/TestContextQuery.java      |   3 +-
 .../document/TestContextSuggestField.java       |  13 +-
 .../suggest/document/TestSuggestField.java      |  29 +-
 .../index/BaseFieldInfoFormatTestCase.java      |  14 +-
 .../index/BaseIndexFileFormatTestCase.java      |   2 +-
 .../index/BaseLiveDocsFormatTestCase.java       |   4 +-
 .../lucene/index/BaseMergePolicyTestCase.java   |   2 +-
 .../index/BaseStoredFieldsFormatTestCase.java   |   2 +-
 .../lucene/index/MismatchedLeafReader.java      |   3 +-
 .../lucene/index/RandomPostingsTester.java      |   4 +-
 .../lucene/search/BlockScoreQueryWrapper.java   |  10 +-
 .../lucene/store/MockDirectoryWrapper.java      |   2 +-
 .../util/automaton/AutomatonTestUtil.java       |   4 +-
 lucene/tools/forbiddenApis/lucene.txt           |  24 ++
 solr/CHANGES.txt                                | 100 ++++-
 solr/NOTICE.txt                                 |  14 +
 .../field/AbstractAnalyticsFieldTest.java       |  41 +-
 .../org/apache/solr/cloud/RecoveryStrategy.java |  29 +-
 .../apache/solr/cloud/ReplicateFromLeader.java  |   2 +-
 .../cloud/autoscaling/ComputePlanAction.java    |   8 +-
 .../org/apache/solr/core/CoreContainer.java     |  39 +-
 .../src/java/org/apache/solr/core/SolrCore.java |   2 +-
 .../apache/solr/core/SolrResourceLoader.java    |  12 +-
 .../solr/handler/admin/CollectionsHandler.java  |   5 +-
 .../solr/handler/component/ExpandComponent.java |  30 +-
 .../solr/handler/tagger/OffsetCorrector.java    | 178 +++++++++
 .../solr/handler/tagger/TagClusterReducer.java  | 103 +++++
 .../org/apache/solr/handler/tagger/TagLL.java   | 176 ++++++++
 .../org/apache/solr/handler/tagger/Tagger.java  | 230 +++++++++++
 .../handler/tagger/TaggerRequestHandler.java    | 397 +++++++++++++++++++
 .../solr/handler/tagger/TaggingAttribute.java   |  65 +++
 .../handler/tagger/TaggingAttributeImpl.java    |  79 ++++
 .../solr/handler/tagger/TermPrefixCursor.java   | 189 +++++++++
 .../solr/handler/tagger/XmlOffsetCorrector.java | 113 ++++++
 .../solr/handler/tagger/package-info.java       |  27 ++
 .../transform/ChildDocTransformerFactory.java   |  11 +-
 .../apache/solr/schema/CurrencyFieldType.java   |  18 +-
 .../solr/search/CollapsingQParserPlugin.java    |   2 +-
 .../java/org/apache/solr/search/Insanity.java   |   2 +-
 .../org/apache/solr/search/QParserPlugin.java   |  80 ++--
 .../apache/solr/search/QueryWrapperFilter.java  | 106 -----
 .../apache/solr/search/TermsQParserPlugin.java  |  22 +-
 .../solr/security/PKIAuthenticationPlugin.java  |  40 +-
 .../apache/solr/security/PublicKeyHandler.java  |  47 +++
 .../org/apache/solr/servlet/HttpSolrCall.java   |   4 +-
 .../apache/solr/servlet/LoadAdminUiServlet.java |   6 +-
 .../apache/solr/servlet/SolrDispatchFilter.java |  43 +-
 .../solr/uninverting/UninvertingReader.java     |   2 +-
 .../apache/solr/update/CdcrTransactionLog.java  |  20 +-
 .../org/apache/solr/update/CdcrUpdateLog.java   |   3 -
 .../apache/solr/update/HdfsTransactionLog.java  |  18 +-
 .../org/apache/solr/update/HdfsUpdateLog.java   |  84 ++--
 .../org/apache/solr/update/TransactionLog.java  |  56 +--
 .../java/org/apache/solr/update/UpdateLog.java  | 255 ++++++------
 .../solr/collection1/conf/schema-tagger.xml     | 187 +++++++++
 .../solr/collection1/conf/solrconfig-tagger.xml |  59 +++
 .../apache/solr/cloud/RollingRestartTest.java   |  10 +-
 .../autoscaling/AutoScalingHandlerTest.java     |  21 +
 .../cloud/autoscaling/sim/SimCloudManager.java  |   2 +-
 .../sim/SimClusterStateProvider.java            |  50 ++-
 .../autoscaling/sim/SimNodeStateProvider.java   | 128 +++---
 .../autoscaling/sim/SimSolrCloudTestCase.java   |  56 ++-
 .../sim/TestClusterStateProvider.java           |   6 +-
 .../cloud/autoscaling/sim/TestLargeCluster.java | 118 ++++--
 .../apache/solr/core/ResourceLoaderTest.java    |   6 +-
 .../tagger/EmbeddedSolrNoSerializeTest.java     | 153 +++++++
 .../handler/tagger/RandomizedTaggerTest.java    | 150 +++++++
 .../apache/solr/handler/tagger/Tagger2Test.java | 176 ++++++++
 .../apache/solr/handler/tagger/TaggerTest.java  | 296 ++++++++++++++
 .../solr/handler/tagger/TaggerTestCase.java     | 251 ++++++++++++
 .../handler/tagger/TaggingAttributeTest.java    |  73 ++++
 .../handler/tagger/WordLengthTaggingFilter.java | 110 +++++
 .../tagger/WordLengthTaggingFilterFactory.java  |  67 ++++
 .../handler/tagger/XmlInterpolationTest.java    | 224 +++++++++++
 .../solr/search/TestQueryWrapperFilter.java     | 241 -----------
 .../org/apache/solr/search/TestRecovery.java    |  58 +--
 .../apache/solr/search/TestRecoveryHdfs.java    |  46 +--
 .../solr/search/TestStandardQParsers.java       |  10 +-
 .../security/TestPKIAuthenticationPlugin.java   |   7 +-
 .../apache/solr/update/TransactionLogTest.java  |  14 +-
 solr/licenses/metrics-core-3.2.2.jar.sha1       |   1 -
 solr/licenses/metrics-core-3.2.6.jar.sha1       |   1 +
 solr/licenses/metrics-ganglia-3.2.2.jar.sha1    |   1 -
 solr/licenses/metrics-ganglia-3.2.6.jar.sha1    |   1 +
 solr/licenses/metrics-graphite-3.2.2.jar.sha1   |   1 -
 solr/licenses/metrics-graphite-3.2.6.jar.sha1   |   1 +
 solr/licenses/metrics-jetty9-3.2.2.jar.sha1     |   1 -
 solr/licenses/metrics-jetty9-3.2.6.jar.sha1     |   1 +
 solr/licenses/metrics-jvm-3.2.2.jar.sha1        |   1 -
 solr/licenses/metrics-jvm-3.2.6.jar.sha1        |   1 +
 solr/solr-ref-guide/src/_includes/head.html     |   1 -
 solr/solr-ref-guide/src/_layouts/page.html      |  12 -
 solr/solr-ref-guide/src/collections-api.adoc    |  42 ++
 solr/solr-ref-guide/src/css/comments.css        | 160 --------
 solr/solr-ref-guide/src/searching.adoc          |  33 +-
 solr/solr-ref-guide/src/the-tagger-handler.adoc | 265 +++++++++++++
 .../client/solrj/cloud/autoscaling/Clause.java  |   2 +-
 .../client/solrj/cloud/autoscaling/Policy.java  |   7 +-
 .../solr/common/cloud/ClusterProperties.java    |  18 +
 .../apache/solr/common/cloud/ZkStateReader.java |   6 +
 242 files changed, 6843 insertions(+), 2181 deletions(-)
----------------------------------------------------------------------



[28/43] lucene-solr:jira/http2: LUCENE-8343: change suggesters to use Long instead of long weight during indexing, and double instead of long score at suggest time

Posted by da...@apache.org.
LUCENE-8343: change suggesters to use Long instead of long weight during indexing, and double instead of long score at suggest time


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/398074d0
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/398074d0
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/398074d0

Branch: refs/heads/jira/http2
Commit: 398074d0f878d4ba262245e35fa1ea285e52b791
Parents: a619038 1a83a14
Author: Mike McCandless <mi...@apache.org>
Authored: Tue Sep 11 12:03:40 2018 -0400
Committer: Mike McCandless <mi...@apache.org>
Committed: Tue Sep 11 12:03:40 2018 -0400

----------------------------------------------------------------------
 lucene/MIGRATE.txt                              |  5 ++
 .../analyzing/BlendedInfixSuggester.java        |  7 +-
 .../analyzing/BlendedInfixSuggesterTest.java    | 73 +++++++++++++++-----
 solr/solr-ref-guide/src/suggester.adoc          |  8 ++-
 4 files changed, 73 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/398074d0/lucene/MIGRATE.txt
----------------------------------------------------------------------
diff --cc lucene/MIGRATE.txt
index 90ed4fc,8467d72..e0e70a3
--- a/lucene/MIGRATE.txt
+++ b/lucene/MIGRATE.txt
@@@ -76,45 -76,3 +76,50 @@@ separate query
  Thanks to other optimizations that were added to Lucene 8, this query will be
  able to efficiently select the top-scoring document without having to visit
  all matches.
 +
 +## TopFieldCollector always assumes fillFields=true ##
 +
 +Because filling sort values doesn't have a significant overhead, the fillFields
 +option has been removed from TopFieldCollector factory methods. Everything
 +behaves as if it was set to true.
 +
 +## TopFieldCollector no longer takes a trackDocScores option ##
 +
 +Computing scores at collection time is less efficient than running a second
 +request in order to only compute scores for documents that made it to the top
 +hits. As a consequence, the trackDocScores option has been removed and can be
 +replaced with the new TopFieldCollector#populateScores helper method.
 +
 +## IndexSearcher.search(After) may return lower bounds of the hit count and TopDocs.totalHits is no longer a long ##
 +
 +Lucene 8 received optimizations for collection of top-k matches by not visiting
 +all matches. However these optimizations won't help if all matches still need
 +to be visited in order to compute the total number of hits. As a consequence,
 +IndexSearcher's search and searchAfter methods were changed to only count hits
 +accurately up to 1,000, and Topdocs.totalHits was changed from a long to an
 +object that says whether the hit count is accurate or a lower bound of the
 +actual hit count.
 +
 +## RAMDirectory, RAMFile, RAMInputStream, RAMOutputStream are deprecated ##
 +
 +This RAM-based directory implementation is an old piece of code that uses inefficient
 +thread synchronization primitives and can be confused as "faster" than the NIO-based
 +MMapDirectory. It is deprecated and scheduled for removal in future versions of 
 +Lucene. (LUCENE-8467, LUCENE-8438)
 +
 +## LeafCollector.setScorer() now takes a Scorable rather than a Scorer ##
 +
 +Scorer has a number of methods that should never be called from Collectors, for example
 +those that advance the underlying iterators.  To hide these, LeafCollector.setScorer()
 +now takes a Scorable, an abstract class that Scorers can extend, with methods
 +docId() and score() (LUCENE-6228)
 +
 +## Scorers must have non-null Weights ##
 +
 +If a custom Scorer implementation does not have an associated Weight, it can probably
 +be replaced with a Scorable instead.
++
++## Suggesters now return Long instead of long for weight() during indexing, and double
++instead of long at suggest time ##
++
++Most code should just require recompilation, though possibly requiring some added casts.


[09/43] lucene-solr:jira/http2: LUCENE-8491: Adjust maxRadius to prevent constructing circles that are too big

Posted by da...@apache.org.
LUCENE-8491: Adjust maxRadius to prevent constructing circles that are too big


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/6fbcda60
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/6fbcda60
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/6fbcda60

Branch: refs/heads/jira/http2
Commit: 6fbcda60a21834d9259e78c97ca71e7d80689c68
Parents: 66c671e
Author: iverase <iv...@apache.org>
Authored: Fri Sep 7 14:48:52 2018 +0200
Committer: iverase <iv...@apache.org>
Committed: Fri Sep 7 14:48:52 2018 +0200

----------------------------------------------------------------------
 .../spatial/spatial4j/Geo3dShapeWGS84ModelRectRelationTest.java    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6fbcda60/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/Geo3dShapeWGS84ModelRectRelationTest.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/Geo3dShapeWGS84ModelRectRelationTest.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/Geo3dShapeWGS84ModelRectRelationTest.java
index 94fdde9..e1234a4 100644
--- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/Geo3dShapeWGS84ModelRectRelationTest.java
+++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/Geo3dShapeWGS84ModelRectRelationTest.java
@@ -38,7 +38,7 @@ public class Geo3dShapeWGS84ModelRectRelationTest extends ShapeRectRelationTestC
     Geo3dSpatialContextFactory factory = new Geo3dSpatialContextFactory();
     factory.planetModel = planetModel;
     this.ctx = factory.newSpatialContext();
-    this.maxRadius = 178;
+    this.maxRadius = 175;
     ((Geo3dShapeFactory)ctx.getShapeFactory()).setCircleAccuracy(1e-12);
   }
 


[26/43] lucene-solr:jira/http2: SOLR-12701: format/style consistency fixes for math expression docs; CSS change to make bold monospace appear properly

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a619038e/solr/solr-ref-guide/src/term-vectors.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/term-vectors.adoc b/solr/solr-ref-guide/src/term-vectors.adoc
index 32c4bfa..2510099 100644
--- a/solr/solr-ref-guide/src/term-vectors.adoc
+++ b/solr/solr-ref-guide/src/term-vectors.adoc
@@ -16,9 +16,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
-TF-IDF term vectors are often used to represent text documents when performing text mining
-and machine learning operations. This section of the user guide describes how to
-use math expressions to perform text analysis and create TF-IDF term vectors.
+Term frequency-inverse document frequency (TF-IDF) term vectors are often used to
+represent text documents when performing text mining and machine learning operations. The math expressions
+library can be used to perform text analysis and create TF-IDF term vectors.
 
 == Text Analysis
 
@@ -26,17 +26,16 @@ The `analyze` function applies a Solr analyzer to a text field and returns the t
 emitted by the analyzer in an array. Any analyzer chain that is attached to a field in Solr's
 schema can be used with the `analyze` function.
 
-In the example below, the text "hello world" is analyzed using the analyzer chain attached to the *subject* field in
-the schema. The *subject* field is defined as the field type *text_general* and the text is analyzed using the
-analysis chain configured for the *text_general* field type.
+In the example below, the text "hello world" is analyzed using the analyzer chain attached to the `subject` field in
+the schema. The `subject` field is defined as the field type `text_general` and the text is analyzed using the
+analysis chain configured for the `text_general` field type.
 
 [source,text]
 ----
 analyze("hello world", subject)
 ----
 
-When this expression is sent to the /stream handler it
-responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -63,13 +62,12 @@ responds with:
 The `analyze` function can be used inside of a `select` function to annotate documents with the tokens
 generated by the analysis.
 
-The example below is performing a `search` in collection1. Each tuple returned by the `search`
-contains an *id* and *subject*. For each tuple, the
-`select` function is selecting the *id* field and calling the `analyze` function on the *subject* field.
-The analyzer chain specified by the *subject_bigram* field is configured to perform a bigram analysis.
+The example below performs a `search` in "collection1". Each tuple returned by the `search` function
+contains an `id` and `subject`. For each tuple, the
+`select` function selects the `id` field and calls the `analyze` function on the `subject` field.
+The analyzer chain specified by the `subject_bigram` field is configured to perform a bigram analysis.
 The tokens generated by the `analyze` function are added to each tuple in a field called `terms`.
 
-Notice in the output that an array of bigram terms have been added to the tuples.
 
 [source,text]
 ----
@@ -78,8 +76,7 @@ select(search(collection1, q="*:*", fl="id, subject", sort="id asc"),
        analyze(subject, subject_bigram) as terms)
 ----
 
-When this expression is sent to the /stream handler it
-responds with:
+Notice in the output that an array of bigram terms have been added to the tuples:
 
 [source,json]
 ----
@@ -111,42 +108,37 @@ responds with:
 
 == TF-IDF Term Vectors
 
-The `termVectors` function can be used to build *TF-IDF*
-term vectors from the terms generated by the `analyze` function.
-
-The `termVectors` function operates over a list of tuples that contain a field
-called *id* and a field called *terms*. Notice
-that this is the exact output structure of the *document annotation* example above.
-
-The `termVectors` function builds a *matrix* from the list of tuples. There is *row* in the
-matrix for each tuple in the list. There is a *column* in the matrix for each term in the *terms*
-field.
-
-The example below builds on the *document annotation* example.
-The list of tuples are stored in variable *a*. The `termVectors` function
-operates over variable *a* and builds a matrix with *2 rows* and *4 columns*.
+The `termVectors` function can be used to build TF-IDF term vectors from the terms generated by the `analyze` function.
 
-The `termVectors` function also sets the *row* and *column* labels of the term vectors matrix.
-The row labels are the document ids and the
-column labels are the terms.
+The `termVectors` function operates over a list of tuples that contain a field called `id` and a field called `terms`.
+Notice that this is the exact output structure of the document annotation example above.
 
-In the example below, the `getRowLabels` and `getColumnLabels` functions return
-the row and column labels which are then stored in variables *c* and *d*.
-The *echo* parameter is echoing variables *c* and *d*, so the output includes
-the row and column labels.
+The `termVectors` function builds a matrix from the list of tuples. There is row in the
+matrix for each tuple in the list. There is a column in the matrix for each term in the `terms` field.
 
 [source,text]
 ----
-let(echo="c, d",
-    a=select(search(collection3, q="*:*", fl="id, subject", sort="id asc"),
+let(echo="c, d", <1>
+    a=select(search(collection3, q="*:*", fl="id, subject", sort="id asc"), <2>
              id,
              analyze(subject, subject_bigram) as terms),
-    b=termVectors(a, minTermLength=4, minDocFreq=0, maxDocFreq=1),
-    c=getRowLabels(b),
+    b=termVectors(a, minTermLength=4, minDocFreq=0, maxDocFreq=1), <3>
+    c=getRowLabels(b), <4>
     d=getColumnLabels(b))
 ----
 
-When this expression is sent to the /stream handler it
+The example below builds on the document annotation example.
+
+<1> The `echo` parameter will echo variables *`c`* and *`d`*, so the output includes
+the row and column labels, which will be defined later in the expression.
+<2> The list of tuples are stored in variable *`a`*. The `termVectors` function
+operates over variable *`a`* and builds a matrix with 2 rows and 4 columns.
+<3> The `termVectors` function sets the row and column labels of the term vectors matrix as variable *`b`*.
+The row labels are the document ids and the column labels are the terms.
+<4> The `getRowLabels` and `getColumnLabels` functions return
+the row and column labels which are then stored in variables *`c`* and *`d`*.
+
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -188,7 +180,7 @@ let(a=select(search(collection3, q="*:*", fl="id, subject", sort="id asc"),
     b=termVectors(a, minTermLength=4, minDocFreq=0, maxDocFreq=1))
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -230,8 +222,15 @@ the noisy terms helps keep the term vector matrix small enough to fit comfortabl
 
 There are four parameters designed to filter noisy terms from the term vector matrix:
 
-* *minTermLength*: The minimum term length required to include the term in the matrix.
-* *minDocFreq*: The minimum *percentage* (0 to 1) of documents the term must appear in to be included in the index.
-* *maxDocFreq*: The maximum *percentage* (0 to 1) of documents the term can appear in to be included in the index.
-* *exclude*: A comma delimited list of strings used to exclude terms. If a term contains any of the exclude strings that
+`minTermLength`::
+The minimum term length required to include the term in the matrix.
+
+minDocFreq::
+The minimum percentage, expressed as a number between 0 and 1, of documents the term must appear in to be included in the index.
+
+maxDocFreq::
+The maximum percentage, expressed as a number between 0 and 1, of documents the term can appear in to be included in the index.
+
+exclude::
+A comma delimited list of strings used to exclude terms. If a term contains any of the exclude strings that
 term will be excluded from the term vector.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a619038e/solr/solr-ref-guide/src/time-series.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/time-series.adoc b/solr/solr-ref-guide/src/time-series.adoc
index e765270..c68ee3c 100644
--- a/solr/solr-ref-guide/src/time-series.adoc
+++ b/solr/solr-ref-guide/src/time-series.adoc
@@ -38,7 +38,7 @@ timeseries(collection1,
            count(*))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -121,7 +121,7 @@ let(a=timeseries(collection1,
     b=col(a, count(*)))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -192,7 +192,7 @@ let(a=timeseries(collection1,
     c=movingAvg(b, 3))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -242,7 +242,7 @@ let(a=timeseries(collection1, q=*:*,
     c=expMovingAvg(b, 3))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -292,7 +292,7 @@ let(a=timeseries(collection1,
     c=movingMedian(b, 3))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -353,7 +353,7 @@ let(a=timeseries(collection1,
     c=diff(b))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -403,7 +403,7 @@ let(a=array(1,2,5,2,1,2,5,2,1,2,5),
      b=diff(a, 4))
 ----
 
-Expression is sent to the /stream handler it responds with:
+Expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a619038e/solr/solr-ref-guide/src/variables.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/variables.adoc b/solr/solr-ref-guide/src/variables.adoc
index 99ac750..894a5ac 100644
--- a/solr/solr-ref-guide/src/variables.adoc
+++ b/solr/solr-ref-guide/src/variables.adoc
@@ -16,19 +16,17 @@
 // specific language governing permissions and limitations
 // under the License.
 
+
 == The Let Expression
 
 The `let` expression sets variables and returns
-the value of the last variable by default. The output of any streaming expression
-or math expression can be set to a variable.
+the value of the last variable by default. The output of any streaming expression or math expression can be set to a variable.
 
-Below is a simple example setting three variables *a*, *b*
-and *c*. Variables *a* and *b* are set to arrays. The variable *c* is set
+Below is a simple example setting three variables *`a`*, *`b`*
+and *`c`*. Variables *`a`* and *`b`* are set to arrays. The variable *`c`* is set
 to the output of the `ebeAdd` function which performs element-by-element
 addition of the two arrays.
 
-Notice that the last variable, *c*, is returned.
-
 [source,text]
 ----
 let(a=array(1, 2, 3),
@@ -36,8 +34,7 @@ let(a=array(1, 2, 3),
     c=ebeAdd(a, b))
 ----
 
-When this expression is sent to the /stream handler it
-responds with:
+In the response, notice that the last variable, *`c`*, is returned:
 
 [source,json]
 ----
@@ -62,7 +59,7 @@ responds with:
 
 == Echoing Variables
 
-All variables can be output by setting the *echo* variable to *true*.
+All variables can be output by setting the `echo` variable to `true`.
 
 [source,text]
 ----
@@ -72,7 +69,7 @@ let(echo=true,
     c=ebeAdd(a, b))
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -106,8 +103,8 @@ responds with:
 }
 ----
 
-A specific set of variables can be echoed by providing a comma delimited
-list of variables to the echo parameter.
+A specific set of variables can be echoed by providing a comma delimited list of variables to the echo parameter.
+Because variables have been provided, the `true` value is assumed.
 
 [source,text]
 ----
@@ -117,8 +114,7 @@ let(echo="a,b",
     c=ebeAdd(a, b))
 ----
 
-When this expression is sent to the /stream handler it
-responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -150,13 +146,13 @@ responds with:
 
 Variables can be cached in-memory on the Solr node where the math expression
 was run. A cached variable can then be used in future expressions. Any object
-that can be set to a variable, including data structures and mathematical models can
+that can be set to a variable, including data structures and mathematical models, can
 be cached in-memory for future use.
 
 The `putCache` function adds a variable to the cache.
 
-In the example below an array is cached in the *workspace* workspace1
-and bound to the *key* key1. The workspace allows different users to cache
+In the example below an array is cached in the `workspace` "workspace1"
+and bound to the `key` "key1". The workspace allows different users to cache
 objects in their own workspace. The `putCache` function returns
 the variable that was added to the cache.
 
@@ -168,8 +164,7 @@ let(a=array(1, 2, 3),
     d=putCache(workspace1, key1, c))
 ----
 
-When this expression is sent to the /stream handler it
-responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -192,20 +187,16 @@ responds with:
 }
 ----
 
-The `getCache` function retrieves an object from the
-cache by its workspace and key.
-
-In the example below the `getCache` function retrieves
-the array the was cached above and assigns it to variable *a*.
+The `getCache` function retrieves an object from the cache by its workspace and key.
 
+In the example below the `getCache` function retrieves the array the was cached above and assigns it to variable *`a`*.
 
 [source,text]
 ----
 let(a=getCache(workspace1, key1))
 ----
 
-When this expression is sent to the /stream handler it
-responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -228,18 +219,16 @@ responds with:
 }
 ----
 
-The `listCache` function can be used to list the workspaces or the
-keys in a specific workspace.
+The `listCache` function can be used to list the workspaces or the keys in a specific workspace.
 
-In the example below `listCache` returns all the workspaces in the cache
-as an array of strings.
+In the example below `listCache` returns all the workspaces in the cache as an array of strings.
 
 [source,text]
 ----
 let(a=listCache())
 ----
 
-When this expression is sent to the /stream handler it
+When this expression is sent to the `/stream` handler it
 responds with:
 
 [source,json]
@@ -264,14 +253,12 @@ responds with:
 
 In the example below all the keys in a specific workspace are listed:
 
-
 [source,text]
 ----
 let(a=listCache(workspace1))
 ----
 
-When this expression is sent to the /stream handler it
-responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -296,17 +283,14 @@ The `removeCache` function can be used to remove a a key from a specific
 workspace. This `removeCache` function removes the key from the cache
 and returns the object that was removed.
 
-In the example below the array that was cached above is removed from the
-cache.
-
+In the example below the array that was cached above is removed from the cache.
 
 [source,text]
 ----
 let(a=removeCache(workspace1, key1))
 ----
 
-When this expression is sent to the /stream handler it
-responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a619038e/solr/solr-ref-guide/src/vector-math.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/vector-math.adoc b/solr/solr-ref-guide/src/vector-math.adoc
index 22d610f..e06008d 100644
--- a/solr/solr-ref-guide/src/vector-math.adoc
+++ b/solr/solr-ref-guide/src/vector-math.adoc
@@ -16,23 +16,20 @@
 // specific language governing permissions and limitations
 // under the License.
 
-This section of the user guide covers vector math and
-vector manipulation functions.
+This section covers vector math and vector manipulation functions.
 
 == Arrays
 
 Arrays can be created with the `array` function.
 
-For example the expression below creates a numeric array with
-three elements:
+For example, the expression below creates a numeric array with three elements:
 
 [source,text]
 ----
 array(1, 2, 3)
 ----
 
-When this expression is sent to the /stream handler it responds with
-a json array.
+When this expression is sent to the `/stream` handler it responds with a JSON array:
 
 [source,json]
 ----
@@ -66,7 +63,7 @@ For example, an array can be reversed with the `rev` function:
 rev(array(1, 2, 3))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -89,15 +86,14 @@ When this expression is sent to the /stream handler it responds with:
 }
 ----
 
-Another example is the `length` function,
-which returns the length of an array:
+Another example is the `length` function, which returns the length of an array:
 
 [source,text]
 ----
 length(array(1, 2, 3))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -124,7 +120,7 @@ copies elements of an array from a start and end range.
 copyOfRange(array(1,2,3,4,5,6), 1, 4)
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -149,21 +145,18 @@ When this expression is sent to the /stream handler it responds with:
 
 == Vector Summarizations and Norms
 
-There are a set of functions that perform
-summerizations and return norms of arrays. These functions
-operate over an array and return a single
-value. The following vector summarizations and norm functions are available:
+There are a set of functions that perform summarizations and return norms of arrays. These functions
+operate over an array and return a single value. The following vector summarizations and norm functions are available:
 `mult`, `add`, `sumSq`, `mean`, `l1norm`, `l2norm`, `linfnorm`.
 
-The example below is using the `mult` function,
-which multiples all the values of an array.
+The example below shows the `mult` function, which multiples all the values of an array.
 
 [source,text]
 ----
 mult(array(2,4,8))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -184,14 +177,14 @@ When this expression is sent to the /stream handler it responds with:
 
 The vector norm functions provide different formulas for calculating vector magnitude.
 
-The example below calculates the *l2norm* of an array.
+The example below calculates the `l2norm` of an array.
 
 [source,text]
 ----
 l2norm(array(2,4,8))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -212,12 +205,11 @@ When this expression is sent to the /stream handler it responds with:
 
 == Scalar Vector Math
 
-Scalar vector math functions add, subtract, multiple or divide a scalar value with every value in a vector.
+Scalar vector math functions add, subtract, multiply or divide a scalar value with every value in a vector.
 The following functions perform these operations: `scalarAdd`, `scalarSubtract`, `scalarMultiply`
 and `scalarDivide`.
 
-
-Below is an example of the `scalarMultiply` function, which multiplies the scalar value 3 with
+Below is an example of the `scalarMultiply` function, which multiplies the scalar value `3` with
 every value of an array.
 
 [source,text]
@@ -225,7 +217,7 @@ every value of an array.
 scalarMultiply(3, array(1,2,3))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -251,7 +243,7 @@ When this expression is sent to the /stream handler it responds with:
 == Element-By-Element Vector Math
 
 Two vectors can be added, subtracted, multiplied and divided using element-by-element
-vector math functions. The following element-by-element vector math functions are:
+vector math functions. The available element-by-element vector math functions are:
 `ebeAdd`, `ebeSubtract`, `ebeMultiply`, `ebeDivide`.
 
 The expression below performs the element-by-element subtraction of two arrays.
@@ -261,7 +253,7 @@ The expression below performs the element-by-element subtraction of two arrays.
 ebeSubtract(array(10, 15, 20), array(1,2,3))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -297,7 +289,7 @@ Below is an example of the `dotProduct` function:
 dotProduct(array(2,3,0,0,0,1), array(2,0,1,0,0,3))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -323,7 +315,7 @@ Below is an example of the `cosineSimilarity` function:
 cosineSimilarity(array(2,3,0,0,0,1), array(2,0,1,0,0,3))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -340,4 +332,4 @@ When this expression is sent to the /stream handler it responds with:
     ]
   }
 }
-----
\ No newline at end of file
+----

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a619038e/solr/solr-ref-guide/src/vectorization.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/vectorization.adoc b/solr/solr-ref-guide/src/vectorization.adoc
index 2770320..5fdfadc 100644
--- a/solr/solr-ref-guide/src/vectorization.adoc
+++ b/solr/solr-ref-guide/src/vectorization.adoc
@@ -18,11 +18,10 @@
 
 This section of the user guide explores techniques
 for retrieving streams of data from Solr and vectorizing the
-*numeric* fields.
+numeric fields.
 
-The next chapter of the user guide covers
-Text Analysis and Term Vectors which describes how to
-vectorize *text* fields.
+See the section <<term-vectors.adoc#term-vectors,Text Analysis and Term Vectors>> which describes how to
+vectorize text fields.
 
 == Streams
 
@@ -32,42 +31,42 @@ to vectorize and analyze the results sets.
 
 Below are some of the key stream sources:
 
-* *random*: Random sampling is widely used in statistics, probability and machine learning.
+* *`random`*: Random sampling is widely used in statistics, probability and machine learning.
 The `random` function returns a random sample of search results that match a
 query. The random samples can be vectorized and operated on by math expressions and the results
 can be used to describe and make inferences about the entire population.
 
-* *timeseries*: The `timeseries`
+* *`timeseries`*: The `timeseries`
 expression provides fast distributed time series aggregations, which can be
 vectorized and analyzed with math expressions.
 
-* *knnSearch*: K-nearest neighbor is a core machine learning algorithm. The `knnSearch`
+* *`knnSearch`*: K-nearest neighbor is a core machine learning algorithm. The `knnSearch`
 function is a specialized knn algorithm optimized to find the k-nearest neighbors of a document in
 a distributed index. Once the nearest neighbors are retrieved they can be vectorized
 and operated on by machine learning and text mining algorithms.
 
-* *sql*: SQL is the primary query language used by data scientists. The `sql` function supports
+* *`sql`*: SQL is the primary query language used by data scientists. The `sql` function supports
 data retrieval using a subset of SQL which includes both full text search and
 fast distributed aggregations. The result sets can then be vectorized and operated
 on by math expressions.
 
-* *jdbc*: The `jdbc` function allows data from any JDBC compliant data source to be combined with
+* *`jdbc`*: The `jdbc` function allows data from any JDBC compliant data source to be combined with
 streams originating from Solr. Result sets from outside data sources can be vectorized and operated
 on by math expressions in the same manner as result sets originating from Solr.
 
-* *topic*: Messaging is an important foundational technology for large scale computing. The `topic`
+* *`topic`*: Messaging is an important foundational technology for large scale computing. The `topic`
 function provides publish/subscribe messaging capabilities by treating
 Solr Cloud as a distributed message queue. Topics are extremely powerful
 because they allow subscription by query. Topics can be use to support a broad set of
 use cases including bulk text mining operations and AI alerting.
 
-* *nodes*: Graph queries are frequently used by recommendation engines and are an important
+* *`nodes`*: Graph queries are frequently used by recommendation engines and are an important
 machine learning tool. The `nodes` function provides fast, distributed, breadth
 first graph traversal over documents in a Solr Cloud collection. The node sets collected
 by the `nodes` function can be operated on by statistical and machine learning expressions to
 gain more insight into the graph.
 
-* *search*: Ranked search results are a powerful tool for finding the most relevant
+* *`search`*: Ranked search results are a powerful tool for finding the most relevant
 documents from a large document corpus. The `search` expression
 returns the top N ranked search results that match any
 Solr query, including geo-spatial queries. The smaller set of relevant
@@ -79,7 +78,7 @@ text mining expressions to gather insights about the data set.
 The output of any streaming expression can be set to a variable.
 Below is a very simple example using the `random` function to fetch
 three random samples from collection1. The random samples are returned
-as *tuples*, which contain name/value pairs.
+as tuples which contain name/value pairs.
 
 
 [source,text]
@@ -87,7 +86,7 @@ as *tuples*, which contain name/value pairs.
 let(a=random(collection1, q="*:*", rows="3", fl="price_f"))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -116,10 +115,10 @@ When this expression is sent to the /stream handler it responds with:
 }
 ----
 
-== Creating a Vector with the *col* Function
+== Creating a Vector with the col Function
 
 The `col` function iterates over a list of tuples and copies the values
-from a specific column into an *array*.
+from a specific column into an array.
 
 The output of the `col` function is an numeric array that can be set to a
 variable and operated on by math expressions.
@@ -157,7 +156,7 @@ let(a=random(collection1, q="*:*", rows="3", fl="price_f"),
 
 Once a vector has been created any math expression that operates on vectors
 can be applied. In the example below the `mean` function is applied to
-the vector assigned to variable *b*.
+the vector assigned to variable *`b`*.
 
 [source,text]
 ----
@@ -166,7 +165,7 @@ let(a=random(collection1, q="*:*", rows="15000", fl="price_f"),
     c=mean(b))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -191,13 +190,14 @@ Matrices can be created by vectorizing multiple numeric fields
 and adding them to a matrix. The matrices can then be operated on by
 any math expression that operates on matrices.
 
+[TIP]
+====
 Note that this section deals with the creation of matrices
-from numeric data. The next chapter of the user guide covers
-Text Analysis and Term Vectors which describes how to build TF-IDF
-term vector matrices from text fields.
+from numeric data. The section <<term-vectors.adoc#term-vectors,Text Analysis and Term Vectors>> describes how to build TF-IDF term vector matrices from text fields.
+====
 
 Below is a simple example where four random samples are taken
-from different sub-populations in the data. The *price_f* field of
+from different sub-populations in the data. The `price_f` field of
 each random sample is
 vectorized and the vectors are added as rows to a matrix.
 Then the `sumRows`
@@ -218,7 +218,7 @@ let(a=random(collection1, q="market:A", rows="5000", fl="price_f"),
     j=sumRows(i))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -244,14 +244,14 @@ When this expression is sent to the /stream handler it responds with:
 
 == Latitude / Longitude Vectors
 
-The `latlonVectors` function wraps a list of tuples and parses a lat/long location field into
+The `latlonVectors` function wraps a list of tuples and parses a lat/lon location field into
 a matrix of lat/long vectors. Each row in the matrix is a vector that contains the lat/long
 pair for the corresponding tuple in the list. The row labels for the matrix are
-automatically set to the *id* field in the tuples. The the lat/lon matrix can then be operated
-on by distance based machine learning functions using the `haversineMeters` distance measure.
+automatically set to the `id` field in the tuples. The lat/lon matrix can then be operated
+on by distance-based machine learning functions using the `haversineMeters` distance measure.
 
 The `latlonVectors` function takes two parameters: a list of tuples and a named parameter called
-*field*. The field parameter tells the `latlonVectors` function which field to parse the lat/lon
+`field`, which tells the `latlonVectors` function which field to parse the lat/lon
 vectors from.
 
 Below is an example of the `latlonVectors`.
@@ -262,7 +262,7 @@ let(a=random(collection1, q="*:*", fl="id, loc_p", rows="5"),
     b=latlonVectors(a, field="loc_p"))
 ----
 
-When this expression is sent to the /stream handler it responds with:
+When this expression is sent to the `/stream` handler it responds with:
 
 [source,json]
 ----
@@ -301,5 +301,3 @@ When this expression is sent to the /stream handler it responds with:
   }
 }
 ----
-
-


[16/43] lucene-solr:jira/http2: SOLR-11943: Fix RefGuide for latlonVectors and haversineMeters functions.

Posted by da...@apache.org.
SOLR-11943: Fix RefGuide for latlonVectors and haversineMeters functions.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e5232f68
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e5232f68
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e5232f68

Branch: refs/heads/jira/http2
Commit: e5232f68cf73e7248ea55ef98367d5f9f5c40c23
Parents: 304836e
Author: Joel Bernstein <jb...@apache.org>
Authored: Sun Sep 9 20:53:54 2018 -0400
Committer: Joel Bernstein <jb...@apache.org>
Committed: Sun Sep 9 20:53:54 2018 -0400

----------------------------------------------------------------------
 solr/solr-ref-guide/src/vectorization.adoc | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e5232f68/solr/solr-ref-guide/src/vectorization.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/vectorization.adoc b/solr/solr-ref-guide/src/vectorization.adoc
index 09b6a01..2770320 100644
--- a/solr/solr-ref-guide/src/vectorization.adoc
+++ b/solr/solr-ref-guide/src/vectorization.adoc
@@ -246,9 +246,9 @@ When this expression is sent to the /stream handler it responds with:
 
 The `latlonVectors` function wraps a list of tuples and parses a lat/long location field into
 a matrix of lat/long vectors. Each row in the matrix is a vector that contains the lat/long
-pair for the corresponding tuple in the list. The column labels for the matrix are
+pair for the corresponding tuple in the list. The row labels for the matrix are
 automatically set to the *id* field in the tuples. The the lat/lon matrix can then be operated
-on by machine learning functions using the `haversineMeters` distance measure.
+on by distance based machine learning functions using the `haversineMeters` distance measure.
 
 The `latlonVectors` function takes two parameters: a list of tuples and a named parameter called
 *field*. The field parameter tells the `latlonVectors` function which field to parse the lat/lon


[31/43] lucene-solr:jira/http2: LUCENE-8459: add SearcherTaxonomyManager constructor taking already opened readers

Posted by da...@apache.org.
LUCENE-8459: add SearcherTaxonomyManager constructor taking already opened readers


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a9551404
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a9551404
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a9551404

Branch: refs/heads/jira/http2
Commit: a9551404fd057c83abe3b47bbf9124d335790cbe
Parents: 89bc082
Author: Mike McCandless <mi...@apache.org>
Authored: Tue Sep 11 14:57:43 2018 -0400
Committer: Mike McCandless <mi...@apache.org>
Committed: Tue Sep 11 14:57:43 2018 -0400

----------------------------------------------------------------------
 lucene/CHANGES.txt                                    |  4 ++++
 .../facet/taxonomy/SearcherTaxonomyManager.java       | 14 ++++++++++++++
 .../facet/taxonomy/TestSearcherTaxonomyManager.java   | 14 ++++++++++++++
 3 files changed, 32 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a9551404/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index a9b97a7..5035625 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -219,6 +219,10 @@ API Changes:
 * LUCENE-8422: Static helper functions for Matches and MatchesIterator implementations
   have been moved from Matches to MatchesUtils (Alan Woodward)
 
+* LUCENE-8459: SearcherTaxonomyManager now has a constructor taking already opened
+  IndexReaders, allowing the caller to pass a FilterDirectoryReader, for example.
+  (Mike McCandless)
+
 Bug Fixes:
 
 * LUCENE-8445: Tighten condition when two planes are identical to prevent constructing

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a9551404/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/SearcherTaxonomyManager.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/SearcherTaxonomyManager.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/SearcherTaxonomyManager.java
index e618aa5..82fd2dd 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/SearcherTaxonomyManager.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/SearcherTaxonomyManager.java
@@ -98,6 +98,20 @@ public class SearcherTaxonomyManager extends ReferenceManager<SearcherTaxonomyMa
     taxoEpoch = -1;
   }
 
+  /**
+   * Creates this from already opened {@link IndexReader} and {@link DirectoryTaxonomyReader} instances.  Note that
+   * the incoming readers will be closed when you call {@link #close}.
+   */
+  public SearcherTaxonomyManager(IndexReader reader, DirectoryTaxonomyReader taxoReader, SearcherFactory searcherFactory) throws IOException {
+    if (searcherFactory == null) {
+      searcherFactory = new SearcherFactory();
+    }
+    this.searcherFactory = searcherFactory;
+    current = new SearcherAndTaxonomy(SearcherManager.getSearcher(searcherFactory, reader, null), taxoReader);
+    this.taxoWriter = null;
+    taxoEpoch = -1;
+  }
+
   @Override
   protected void decRef(SearcherAndTaxonomy ref) throws IOException {
     ref.searcher.getIndexReader().decRef();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a9551404/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestSearcherTaxonomyManager.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestSearcherTaxonomyManager.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestSearcherTaxonomyManager.java
index d9a3f8e..e8c764e 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestSearcherTaxonomyManager.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestSearcherTaxonomyManager.java
@@ -32,14 +32,18 @@ import org.apache.lucene.facet.Facets;
 import org.apache.lucene.facet.FacetsCollector;
 import org.apache.lucene.facet.FacetsConfig;
 import org.apache.lucene.facet.taxonomy.SearcherTaxonomyManager.SearcherAndTaxonomy;
+import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
+import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexNotFoundException;
+import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.SegmentInfos;
 import org.apache.lucene.index.TieredMergePolicy;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.ReferenceManager;
+import org.apache.lucene.search.SearcherFactory;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.TestUtil;
@@ -347,4 +351,14 @@ public class TestSearcherTaxonomyManager extends FacetTestCase {
     expectThrows(IndexNotFoundException.class, mgr::maybeRefreshBlocking);
     IOUtils.close(w, tw, mgr, indexDir, taxoDir);
   }
+
+  private SearcherTaxonomyManager getSearcherTaxonomyManager(Directory indexDir, Directory taxoDir, SearcherFactory searcherFactory) throws IOException {
+    if (random().nextBoolean()) {
+      return new SearcherTaxonomyManager(indexDir, taxoDir, searcherFactory);
+    } else {
+      IndexReader reader = DirectoryReader.open(indexDir);
+      DirectoryTaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
+      return new SearcherTaxonomyManager(reader, taxoReader, searcherFactory);
+    }
+  }
 }


[42/43] lucene-solr:jira/http2: Fix Ma[n]datory typo in stream-decorator-reference.adoc file.

Posted by da...@apache.org.
Fix Ma[n]datory typo in stream-decorator-reference.adoc file.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d6641ffb
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d6641ffb
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d6641ffb

Branch: refs/heads/jira/http2
Commit: d6641ffb1d5fd5c9d3743561e2c5158951317713
Parents: 9f37a6b
Author: Christine Poerschke <cp...@apache.org>
Authored: Thu Sep 13 21:26:19 2018 +0100
Committer: Christine Poerschke <cp...@apache.org>
Committed: Thu Sep 13 21:26:19 2018 +0100

----------------------------------------------------------------------
 solr/solr-ref-guide/src/stream-decorator-reference.adoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d6641ffb/solr/solr-ref-guide/src/stream-decorator-reference.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/stream-decorator-reference.adoc b/solr/solr-ref-guide/src/stream-decorator-reference.adoc
index 08f1e7a..c2d6160 100644
--- a/solr/solr-ref-guide/src/stream-decorator-reference.adoc
+++ b/solr/solr-ref-guide/src/stream-decorator-reference.adoc
@@ -675,7 +675,7 @@ The `having` expression wraps a stream and applies a boolean operation to each t
 === having Parameters
 
 * `StreamExpression`: (Mandatory) The stream source for the having function.
-* `booleanEvaluator`: (Madatory) The following boolean operations are supported: `eq` (equals), `gt` (greater than), `lt` (less than), `gteq` (greater than or equal to), `lteq` (less than or equal to), `and`, `or`, `eor` (exclusive or), and `not`. Boolean evaluators can be nested with other evaluators to form complex boolean logic.
+* `booleanEvaluator`: (Mandatory) The following boolean operations are supported: `eq` (equals), `gt` (greater than), `lt` (less than), `gteq` (greater than or equal to), `lteq` (less than or equal to), `and`, `or`, `eor` (exclusive or), and `not`. Boolean evaluators can be nested with other evaluators to form complex boolean logic.
 
 The comparison evaluators compare the value in a specific field with a value, whether a string, number, or boolean. For example: `eq(field1, 10)`, returns `true` if `field1` is equal to 10.
 


[40/43] lucene-solr:jira/http2: SOLR-12759: assertFalse -> assumeFalse

Posted by da...@apache.org.
SOLR-12759: assertFalse -> assumeFalse


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/dea3d69f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/dea3d69f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/dea3d69f

Branch: refs/heads/jira/http2
Commit: dea3d69f95b5521aea5a04a82f9d8536284ed741
Parents: 4a5b914
Author: David Smiley <ds...@apache.org>
Authored: Thu Sep 13 09:43:25 2018 -0400
Committer: David Smiley <ds...@apache.org>
Committed: Thu Sep 13 09:43:25 2018 -0400

----------------------------------------------------------------------
 .../solr/handler/extraction/ExtractingRequestHandlerTest.java      | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dea3d69f/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java
----------------------------------------------------------------------
diff --git a/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java b/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java
index 5a76a0b..852b7ce 100644
--- a/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java
+++ b/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java
@@ -41,7 +41,7 @@ public class ExtractingRequestHandlerTest extends SolrTestCaseJ4 {
 
   @BeforeClass
   public static void beforeClass() throws Exception {
-    assertFalse("SOLR-12759 JDK 11 (1st release) and Tika 1.x can result in extracting dates in a bad format.",
+    assumeFalse("SOLR-12759 JDK 11 (1st release) and Tika 1.x can result in extracting dates in a bad format.",
         System.getProperty("java.version").startsWith("11"));
     initCore("solrconfig.xml", "schema.xml", getFile("extraction/solr").getAbsolutePath());
   }


[12/43] lucene-solr:jira/http2: SOLR-11943: Change scope of commons-math3 solr-core dependency from test to compile, for HaversineMetersEvaluator.java

Posted by da...@apache.org.
SOLR-11943: Change scope of commons-math3 solr-core dependency from test to compile, for HaversineMetersEvaluator.java


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/8f498920
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/8f498920
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/8f498920

Branch: refs/heads/jira/http2
Commit: 8f498920bd4f2d0059031251409c812bab55404d
Parents: f5ce384
Author: Steve Rowe <sa...@apache.org>
Authored: Fri Sep 7 16:30:22 2018 -0400
Committer: Steve Rowe <sa...@apache.org>
Committed: Fri Sep 7 16:30:22 2018 -0400

----------------------------------------------------------------------
 solr/core/ivy.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8f498920/solr/core/ivy.xml
----------------------------------------------------------------------
diff --git a/solr/core/ivy.xml b/solr/core/ivy.xml
index 5a0fd09..80aab20 100644
--- a/solr/core/ivy.xml
+++ b/solr/core/ivy.xml
@@ -39,7 +39,7 @@
     <dependency org="com.google.guava" name="guava" rev="${/com.google.guava/guava}" conf="compile"/>
     <dependency org="org.locationtech.spatial4j" name="spatial4j" rev="${/org.locationtech.spatial4j/spatial4j}" conf="compile"/>
     <dependency org="org.antlr" name="antlr4-runtime" rev="${/org.antlr/antlr4-runtime}"/>
-    <dependency org="org.apache.commons" name="commons-math3" rev="${/org.apache.commons/commons-math3}" conf="test"/>
+    <dependency org="org.apache.commons" name="commons-math3" rev="${/org.apache.commons/commons-math3}" conf="compile"/>
     <dependency org="org.ow2.asm" name="asm" rev="${/org.ow2.asm/asm}" conf="compile"/>
     <dependency org="org.ow2.asm" name="asm-commons" rev="${/org.ow2.asm/asm-commons}" conf="compile"/>
     <dependency org="org.restlet.jee" name="org.restlet" rev="${/org.restlet.jee/org.restlet}" conf="compile"/>


[43/43] lucene-solr:jira/http2: Merge with master

Posted by da...@apache.org.
Merge with master


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/9c65fe4f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/9c65fe4f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/9c65fe4f

Branch: refs/heads/jira/http2
Commit: 9c65fe4fe338721b1dd342210cd4b12eee6efb31
Parents: 802f367 d6641ff
Author: Cao Manh Dat <da...@apache.org>
Authored: Fri Sep 14 10:30:03 2018 +0700
Committer: Cao Manh Dat <da...@apache.org>
Committed: Fri Sep 14 10:30:03 2018 +0700

----------------------------------------------------------------------
 dev-tools/doap/solr.rdf                         |   2 +-
 dev-tools/scripts/buildAndPushRelease.py        |  35 +--
 dev-tools/scripts/smokeTestRelease.py           |  36 +--
 lucene/CHANGES.txt                              |  23 ++
 lucene/MIGRATE.txt                              |  10 +
 lucene/build.xml                                |  15 +-
 lucene/common-build.xml                         |  26 --
 .../java/org/apache/lucene/index/Sorter.java    |  27 --
 .../lucene/search/Boolean2ScorerSupplier.java   |   4 +-
 .../org/apache/lucene/search/BooleanScorer.java |  14 +-
 .../org/apache/lucene/search/BooleanWeight.java |   2 +-
 .../apache/lucene/search/CachingCollector.java  |  16 +-
 .../org/apache/lucene/search/FakeScorer.java    |  62 -----
 .../apache/lucene/search/MatchAllDocsQuery.java |   2 +-
 .../org/apache/lucene/search/ScoreAndDoc.java   |  35 +++
 .../java/org/apache/lucene/search/Scorer.java   |   7 +-
 .../org/apache/lucene/search/SortRescorer.java  |  10 +-
 .../java/org/apache/lucene/util/Version.java    |   7 +
 .../org/apache/lucene/util/bkd/BKDReader.java   |  58 ++++-
 .../org/apache/lucene/util/bkd/BKDWriter.java   |  56 ++++-
 .../lucene/search/MultiCollectorTest.java       |  20 +-
 .../search/TestBoolean2ScorerSupplier.java      |  80 ++++--
 .../org/apache/lucene/search/TestBooleanOr.java |   2 +-
 .../apache/lucene/search/TestBooleanScorer.java |   2 +-
 .../lucene/search/TestCachingCollector.java     |  23 +-
 .../lucene/search/TestConjunctionDISI.java      |  50 +++-
 .../lucene/search/TestConstantScoreQuery.java   |   4 +-
 .../search/TestMaxScoreSumPropagator.java       |  34 ++-
 .../lucene/search/TestMultiCollector.java       |  16 +-
 .../apache/lucene/search/TestQueryRescorer.java |   2 +-
 .../lucene/search/TestTopDocsCollector.java     |  22 +-
 .../lucene/search/TestTopFieldCollector.java    |   2 +-
 .../apache/lucene/expressions/FakeScorer.java   |  53 ----
 .../lucene/facet/DrillSidewaysScorer.java       |  24 +-
 .../facet/taxonomy/SearcherTaxonomyManager.java |  14 ++
 .../taxonomy/TestSearcherTaxonomyManager.java   |  14 ++
 .../search/grouping/BlockGroupingCollector.java |  19 +-
 .../lucene/search/grouping/FakeScorer.java      |  52 ----
 .../apache/lucene/search/join/FakeScorer.java   |  52 ----
 .../queries/function/FunctionRangeQuery.java    |   4 +-
 .../lucene/queries/function/FunctionValues.java |  15 +-
 .../lucene/queries/function/ValueSource.java    |  26 +-
 .../queries/function/ValueSourceScorer.java     |   8 +-
 .../docvalues/DocTermsIndexDocValues.java       |   5 +-
 .../function/docvalues/DoubleDocValues.java     |  11 +-
 .../function/docvalues/IntDocValues.java        |   5 +-
 .../function/docvalues/LongDocValues.java       |   5 +-
 .../function/valuesource/EnumFieldSource.java   |   5 +-
 .../document/FloatPointNearestNeighbor.java     |   2 +-
 .../apache/lucene/search/NearestNeighbor.java   |   2 +-
 .../Geo3dShapeWGS84ModelRectRelationTest.java   |   2 +-
 .../analyzing/BlendedInfixSuggester.java        |   7 +-
 .../analyzing/BlendedInfixSuggesterTest.java    |  73 ++++--
 solr/CHANGES.txt                                |  80 +++---
 solr/build.xml                                  |  12 +-
 .../ExtractingRequestHandlerTest.java           |   5 +-
 solr/core/ivy.xml                               |   2 +-
 .../cloud/autoscaling/AutoScalingHandler.java   |  13 +-
 .../solr/handler/component/QueryComponent.java  |  31 +--
 .../solr/search/CollapsingQParserPlugin.java    |  29 +--
 .../src/java/org/apache/solr/search/Filter.java |   2 +-
 .../apache/solr/search/FunctionRangeQuery.java  |  12 +-
 .../search/function/ValueSourceRangeFilter.java |   9 +-
 .../apache/solr/update/SolrCmdDistributor.java  |   5 +-
 solr/core/src/test-files/log4j2.xml             |  14 +-
 .../apache/solr/handler/RequestLoggingTest.java |  27 +-
 .../org/apache/solr/logging/TestLogWatcher.java |  91 +++----
 .../apache/solr/search/TestRankQueryPlugin.java |  32 +--
 .../solr/store/hdfs/HdfsDirectoryTest.java      |   7 +-
 solr/server/resources/log4j2-console.xml        |   8 +-
 solr/server/resources/log4j2.xml                |  30 +--
 solr/solr-ref-guide/build.xml                   |  18 +-
 .../src/common-query-parameters.adoc            |   4 +-
 solr/solr-ref-guide/src/css/ref-guide.css       |   5 +
 solr/solr-ref-guide/src/curve-fitting.adoc      |  26 +-
 solr/solr-ref-guide/src/dsp.adoc                |  66 ++---
 .../src/indexconfig-in-solrconfig.adoc          |  14 +-
 .../src/initparams-in-solrconfig.adoc           |   2 +-
 solr/solr-ref-guide/src/machine-learning.adoc   | 238 ++++++++----------
 solr/solr-ref-guide/src/math-expressions.adoc   |   2 +-
 solr/solr-ref-guide/src/matrix-math.adoc        |  20 +-
 solr/solr-ref-guide/src/numerical-analysis.adoc | 114 ++++-----
 solr/solr-ref-guide/src/other-parsers.adoc      |   6 +-
 .../src/other-schema-elements.adoc              |   2 +-
 .../src/pagination-of-results.adoc              |   2 +-
 .../src/probability-distributions.adoc          | 100 ++++----
 solr/solr-ref-guide/src/regression.adoc         |  77 +++---
 .../src/resource-and-plugin-loading.adoc        |  24 +-
 .../src/rule-based-replica-placement.adoc       |   2 +-
 solr/solr-ref-guide/src/scalar-math.adoc        |   4 +-
 solr/solr-ref-guide/src/simulations.adoc        | 125 +++++----
 solr/solr-ref-guide/src/solr-upgrade-notes.adoc |  73 +++++-
 solr/solr-ref-guide/src/spell-checking.adoc     |   2 +-
 solr/solr-ref-guide/src/statistics.adoc         |  22 +-
 .../src/stream-decorator-reference.adoc         |   2 +-
 .../src/stream-evaluator-reference.adoc         |   4 +-
 .../src/stream-source-reference.adoc            |   6 +-
 solr/solr-ref-guide/src/suggester.adoc          |   8 +-
 solr/solr-ref-guide/src/term-vectors.adoc       |  91 ++++---
 solr/solr-ref-guide/src/time-series.adoc        |  14 +-
 .../transforming-and-indexing-custom-json.adoc  |   2 +-
 .../src/uploading-data-with-index-handlers.adoc |  83 +++---
 solr/solr-ref-guide/src/variables.adoc          |  62 ++---
 solr/solr-ref-guide/src/vector-math.adoc        |  50 ++--
 solr/solr-ref-guide/src/vectorization.adoc      | 108 ++++++--
 .../client/solrj/cloud/autoscaling/Clause.java  | 252 ++++++++++++-------
 .../solrj/cloud/autoscaling/Condition.java      |  18 +-
 .../solrj/cloud/autoscaling/CoresVariable.java  |   5 +-
 .../cloud/autoscaling/FreeDiskVariable.java     |  40 ++-
 .../client/solrj/cloud/autoscaling/Operand.java |   1 +
 .../client/solrj/cloud/autoscaling/Policy.java  |   4 +-
 .../solrj/cloud/autoscaling/ReplicaCount.java   |  48 +++-
 .../client/solrj/cloud/autoscaling/Row.java     |  35 ++-
 .../solrj/cloud/autoscaling/Suggestion.java     |  43 +++-
 .../solrj/cloud/autoscaling/Variable.java       |  18 +-
 .../solrj/cloud/autoscaling/VariableBase.java   |  16 +-
 .../solrj/cloud/autoscaling/Violation.java      |  31 +--
 .../autoscaling/WithCollectionVariable.java     |   3 +-
 .../org/apache/solr/client/solrj/io/Lang.java   |   2 +-
 .../solrj/io/eval/LatLonVectorsEvaluator.java   | 115 +++++++++
 .../solrj/io/eval/LocationVectorsEvaluator.java | 105 --------
 .../client/solrj/io/stream/FacetStream.java     |   7 +-
 .../solrj/cloud/autoscaling/TestPolicy.java     |  25 +-
 .../solrj/cloud/autoscaling/TestPolicy2.java    | 193 +++++++++++++-
 .../apache/solr/client/solrj/io/TestLang.java   |   2 +-
 .../solrj/io/stream/MathExpressionTest.java     |   4 +-
 .../client/solrj/io/stream/StreamingTest.java   |   9 +
 .../apache/solr/cloud/SolrCloudTestCase.java    |  40 ++-
 128 files changed, 2143 insertions(+), 1762 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9c65fe4f/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
----------------------------------------------------------------------
diff --cc solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
index 710f03f,d7388f0..983617f
--- a/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
+++ b/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
@@@ -46,27 -56,140 +46,28 @@@ import org.slf4j.LoggerFactory
  /**
   * Used for distributing commands from a shard leader to its replicas.
   */
 -public class SolrCmdDistributor implements Closeable {
 +public class SolrCmdDistributor {
    private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 -  
 -  private StreamingSolrClients clients;
 -  private boolean finished = false; // see finish()
  
 -  private int retryPause = 500;
 -  
    private final List<Error> allErrors = new ArrayList<>();
 -  private final List<Error> errors = Collections.synchronizedList(new ArrayList<Error>());
 -  
 -  private final CompletionService<Object> completionService;
 -  private final Set<Future<Object>> pending = new HashSet<>();
 -  
 -  public static interface AbortCheck {
 -    public boolean abortCheck();
 -  }
 -  
 +  private Http2SolrClient client;
 +  private Phaser pendingTasksPhaser = new Phaser(1);
 +  private int retryPause = 500;
 +
    public SolrCmdDistributor(UpdateShardHandler updateShardHandler) {
 -    this.clients = new StreamingSolrClients(updateShardHandler);
 -    this.completionService = new ExecutorCompletionService<>(updateShardHandler.getUpdateExecutor());
 +    this.client = updateShardHandler.getUpdateOnlyHttpClient();
    }
- 
-   public SolrCmdDistributor(Http2SolrClient client, int retryPause) {
+   
+   /* For tests only */
 -  SolrCmdDistributor(StreamingSolrClients clients, int retryPause) {
 -    this.clients = clients;
++  SolrCmdDistributor(Http2SolrClient client, int retryPause) {
 +    this.client = client;
      this.retryPause = retryPause;
 -    completionService = new ExecutorCompletionService<>(clients.getUpdateExecutor());
    }
    
 -  public void finish() {    
 -    try {
 -      assert ! finished : "lifecycle sanity check";
 -      finished = true;
 -      
 -      blockAndDoRetries();
 -    } finally {
 -      clients.shutdown();
 -    }
 -  }
 -  
 -  public void close() {
 -    clients.shutdown();
 +  public void finish() {
 +    blockUntilFinished();
    }
  
 -  private void doRetriesIfNeeded() {
 -    // NOTE: retries will be forwards to a single url
 -    
 -    List<Error> errors = new ArrayList<>(this.errors);
 -    errors.addAll(clients.getErrors());
 -    List<Error> resubmitList = new ArrayList<>();
 -    
 -    if (log.isInfoEnabled() && errors.size() > 0) {
 -      log.info("SolrCmdDistributor found {} errors", errors.size());
 -    }
 -    
 -    if (log.isDebugEnabled() && errors.size() > 0) {
 -      StringBuilder builder = new StringBuilder("SolrCmdDistributor found:");
 -      int maxErrorsToShow = 10;
 -      for (Error e:errors) {
 -        if (maxErrorsToShow-- <= 0) break;
 -        builder.append("\n" + e);
 -      }
 -      if (errors.size() > 10) {
 -        builder.append("\n... and ");
 -        builder.append(errors.size() - 10);
 -        builder.append(" more");
 -      }
 -      log.debug(builder.toString());
 -    }
 -
 -    for (Error err : errors) {
 -      try {
 -        String oldNodeUrl = err.req.node.getUrl();
 -        
 -        /*
 -         * if this is a retryable request we may want to retry, depending on the error we received and
 -         * the number of times we have already retried
 -         */
 -        boolean isRetry = err.req.shouldRetry(err);
 -        
 -        if (testing_errorHook != null) Diagnostics.call(testing_errorHook,
 -            err.e);
 -        
 -        // this can happen in certain situations such as close
 -        if (isRetry) {
 -          err.req.retries++;
 -
 -          if (err.req.node instanceof ForwardNode) {
 -            SolrException.log(SolrCmdDistributor.log, "forwarding update to "
 -                + oldNodeUrl + " failed - retrying ... retries: "
 -                + err.req.retries + "/" + err.req.node.getMaxRetries() + ". "
 -                + err.req.cmd.toString() + " params:"
 -                + err.req.uReq.getParams() + " rsp:" + err.statusCode, err.e);
 -          } else {
 -            SolrException.log(SolrCmdDistributor.log, "FROMLEADER request to "
 -                + oldNodeUrl + " failed - retrying ... retries: "
 -                + err.req.retries + "/" + err.req.node.getMaxRetries() + ". "
 -                + err.req.cmd.toString() + " params:"
 -                + err.req.uReq.getParams() + " rsp:" + err.statusCode, err.e);
 -          }
 -          resubmitList.add(err);
 -        } else {
 -          allErrors.add(err);
 -        }
 -      } catch (Exception e) {
 -        // continue on
 -        log.error("Unexpected Error while doing request retries", e);
 -      }
 -    }
 -    
 -    if (resubmitList.size() > 0) {
 -      // Only backoff once for the full batch
 -      try {
 -        int backoffTime = Math.min(retryPause * resubmitList.get(0).req.retries, 2000);
 -        log.debug("Sleeping {}ms before re-submitting {} requests", backoffTime, resubmitList.size());
 -        Thread.sleep(backoffTime);
 -      } catch (InterruptedException e) {
 -        Thread.currentThread().interrupt();
 -        log.warn(null, e);
 -      }
 -    }
 -    
 -    clients.clearErrors();
 -    this.errors.clear();
 -    for (Error err : resubmitList) {
 -      submit(err.req, false);
 -    }
 -    
 -    if (resubmitList.size() > 0) {
 -      blockAndDoRetries();
 -    }
 -  }
 -  
    public void distribDelete(DeleteUpdateCommand cmd, List<Node> nodes, ModifiableSolrParams params) throws IOException {
      distribDelete(cmd, nodes, params, false, null, null);
    }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9c65fe4f/solr/core/src/test-files/log4j2.xml
----------------------------------------------------------------------
diff --cc solr/core/src/test-files/log4j2.xml
index 0240fcb,7d0ebf7..612cfe8
--- a/solr/core/src/test-files/log4j2.xml
+++ b/solr/core/src/test-files/log4j2.xml
@@@ -27,14 -27,13 +27,14 @@@
      </Console>
    </Appenders>
    <Loggers>
-     <AsyncLogger name="org.apache.zookeeper" level="WARN"/>
-     <AsyncLogger name="org.apache.hadoop" level="WARN"/>
-     <AsyncLogger name="org.apache.directory" level="WARN"/>
-     <AsyncLogger name="org.apache.solr.hadoop" level="INFO"/>
-     <AsyncLogger name="org.eclipse.jetty" level="INFO"/>
+     <Logger name="org.apache.zookeeper" level="WARN"/>
+     <Logger name="org.apache.hadoop" level="WARN"/>
+     <Logger name="org.apache.directory" level="WARN"/>
+     <Logger name="org.apache.solr.hadoop" level="INFO"/>
++    <Logger name="org.eclipse.jetty" level="INFO"/>
  
-     <AsyncRoot level="INFO">
+     <Root level="INFO">
        <AppenderRef ref="STDERR"/>
-     </AsyncRoot>
+     </Root>
    </Loggers>
  </Configuration>


[37/43] lucene-solr:jira/http2: Fix num[u]eric typo in stream-evaluator-reference.adoc file.

Posted by da...@apache.org.
Fix num[u]eric typo in stream-evaluator-reference.adoc file.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/41e972ee
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/41e972ee
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/41e972ee

Branch: refs/heads/jira/http2
Commit: 41e972ee8ac8a47a75b8032e95125bd4d0428c47
Parents: b32dcbb
Author: Christine Poerschke <cp...@apache.org>
Authored: Wed Sep 12 22:30:01 2018 +0100
Committer: Christine Poerschke <cp...@apache.org>
Committed: Wed Sep 12 22:30:01 2018 +0100

----------------------------------------------------------------------
 solr/solr-ref-guide/src/stream-evaluator-reference.adoc | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/41e972ee/solr/solr-ref-guide/src/stream-evaluator-reference.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/stream-evaluator-reference.adoc b/solr/solr-ref-guide/src/stream-evaluator-reference.adoc
index e9dce5d..a88915f 100644
--- a/solr/solr-ref-guide/src/stream-evaluator-reference.adoc
+++ b/solr/solr-ref-guide/src/stream-evaluator-reference.adoc
@@ -594,8 +594,8 @@ OR
 === distance Syntax
 
 [source,text]
-distance(numericArray1, numuericArray2) // Computes the euclidean distance for two numeric arrays.
-distance(numericArray1, numuericArray2, type=manhattan) // Computes the manhattan distance for two numeric arrays.
+distance(numericArray1, numericArray2) // Computes the euclidean distance for two numeric arrays.
+distance(numericArray1, numericArray2, type=manhattan) // Computes the manhattan distance for two numeric arrays.
 distance(matrix) // Computes the euclidean distance matrix for a matrix.
 distance(matrix, type=canberra) // Computes the canberra distance matrix for a matrix.
 


[05/43] lucene-solr:jira/http2: [LUCENE-8343] weight long overflow fix + test

Posted by da...@apache.org.
[LUCENE-8343] weight long overflow fix + test


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e0232f10
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e0232f10
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e0232f10

Branch: refs/heads/jira/http2
Commit: e0232f104509f28126d9ce060663f87508366338
Parents: 2b636e8
Author: Alessandro Benedetti <a....@sease.io>
Authored: Thu Jun 7 18:57:30 2018 +0100
Committer: Alessandro Benedetti <a....@sease.io>
Committed: Thu Jun 7 18:57:30 2018 +0100

----------------------------------------------------------------------
 .../analyzing/BlendedInfixSuggester.java        |  6 ++--
 .../analyzing/BlendedInfixSuggesterTest.java    | 32 ++++++++++++++------
 2 files changed, 27 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e0232f10/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
----------------------------------------------------------------------
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
index dc65f7a..63f432f 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
@@ -227,8 +227,10 @@ public class BlendedInfixSuggester extends AnalyzingInfixSuggester {
       if (weight == 0) {
         weight = 1;
       }
-      long scaledCoefficient = (long) (coefficient * 10);
-      long score = weight * scaledCoefficient;
+      if (weight < 1 / LINEAR_COEF && weight > -1 / LINEAR_COEF) {
+        weight *= 1 / LINEAR_COEF;
+      }
+      long score = (long) (weight * coefficient);
 
       LookupResult result;
       if (doHighlight) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e0232f10/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java
----------------------------------------------------------------------
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java
index 1e5a5da..296e404 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java
@@ -81,6 +81,20 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase {
     assertSuggestionsRanking(payload, suggester);
   }
 
+  /**
+   * Test to validate the suggestions ranking according to the position coefficient,
+   * even if the weight associated to the suggestion is very big, no overflow should happen.
+   */
+  public void testBlendedSort_fieldWeightLongMax_shouldRankSuggestionsByPositionMatchWithNoOverflow() throws IOException {
+    BytesRef payload = new BytesRef("star");
+    Input keys[] = new Input[]{
+        new Input("star wars: episode v - the empire strikes back", Long.MAX_VALUE, payload)
+    };
+    BlendedInfixSuggester suggester = getBlendedInfixSuggester(keys);
+
+    assertSuggestionsRanking(payload, suggester);
+  }
+
   private void assertSuggestionsRanking(BytesRef payload, BlendedInfixSuggester suggester) throws IOException {
     // we query for star wars and check that the weight
     // is smaller when we search for tokens that are far from the beginning
@@ -131,9 +145,9 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase {
     BlendedInfixSuggester suggester = new BlendedInfixSuggester(newFSDirectory(tempDir), a);
     suggester.build(new InputArrayIterator(keys));
 
-    assertEquals(10 * w, getInResults(suggester, "top", pl, 1));
-    assertEquals(w * (long) (10 * (1 - 0.10 * 2)), getInResults(suggester, "the", pl, 1));
-    assertEquals(w * (long) (10 * (1 - 0.10 * 3)), getInResults(suggester, "lake", pl, 1));
+    assertEquals(w, getInResults(suggester, "top", pl, 1));
+    assertEquals((int) (w * (1 - 0.10 * 2)), getInResults(suggester, "the", pl, 1));
+    assertEquals((int) (w * (1 - 0.10 * 3)), getInResults(suggester, "lake", pl, 1));
 
     suggester.close();
 
@@ -143,9 +157,9 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase {
                                           BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL, 1, false);
     suggester.build(new InputArrayIterator(keys));
 
-    assertEquals(10 * w, getInResults(suggester, "top", pl, 1));
-    assertEquals(w * (long) (10 * 1 / (1 + 2)), getInResults(suggester, "the", pl, 1));
-    assertEquals(w * (long) (10 * 1 / (1 + 3)), getInResults(suggester, "lake", pl, 1));
+    assertEquals(w, getInResults(suggester, "top", pl, 1));
+    assertEquals((int) (w * 1 / (1 + 2)), getInResults(suggester, "the", pl, 1));
+    assertEquals((int) (w * 1 / (1 + 3)), getInResults(suggester, "lake", pl, 1));
     suggester.close();
 
     // BlenderType.EXPONENTIAL_RECIPROCAL is using 1/(pow(1+p, exponent)) * w where w is weight and p the position of the word
@@ -155,9 +169,9 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase {
 
     suggester.build(new InputArrayIterator(keys));
 
-    assertEquals(10 * w, getInResults(suggester, "top", pl, 1));
-    assertEquals(w * (long) (10 * 1 / (Math.pow(1 + 2, 4.0))), getInResults(suggester, "the", pl, 1));
-    assertEquals(w * (long) (10 * 1 / (Math.pow(1 + 3, 4.0))), getInResults(suggester, "lake", pl, 1));
+    assertEquals(w, getInResults(suggester, "top", pl, 1));
+    assertEquals((int) (w * 1 / (Math.pow(1 + 2, 4.0))), getInResults(suggester, "the", pl, 1));
+    assertEquals((int) (w * 1 / (Math.pow(1 + 3, 4.0))), getInResults(suggester, "lake", pl, 1));
 
     suggester.close();
   }


[20/43] lucene-solr:jira/http2: SOLR-12738: removed unused method

Posted by da...@apache.org.
SOLR-12738: removed unused method


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/8fbeedf2
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/8fbeedf2
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/8fbeedf2

Branch: refs/heads/jira/http2
Commit: 8fbeedf27f8280f2c767d20d4558053adcf87cb7
Parents: 2ffcb87
Author: Noble Paul <no...@apache.org>
Authored: Mon Sep 10 23:54:16 2018 +1000
Committer: Noble Paul <no...@apache.org>
Committed: Mon Sep 10 23:54:16 2018 +1000

----------------------------------------------------------------------
 .../solr/cloud/autoscaling/AutoScalingHandler.java     | 13 ++++++-------
 .../solr/client/solrj/cloud/autoscaling/Policy.java    |  4 ++--
 2 files changed, 8 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8fbeedf2/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScalingHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScalingHandler.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScalingHandler.java
index 17734dc..899c5cd 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScalingHandler.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScalingHandler.java
@@ -315,18 +315,17 @@ public class AutoScalingHandler extends RequestHandlerBase implements Permission
         return currentConfig;
       }
     }
-    List<String> params = new ArrayList<>(currentConfig.getPolicy().getParams());
-    Map<String, List<Clause>> mergedPolicies = new HashMap<>(currentConfig.getPolicy().getPolicies());
-    Map<String, List<Clause>> newPolicies = null;
+    Map<String, List<Clause>> currentClauses = new HashMap<>(currentConfig.getPolicy().getPolicies());
+    Map<String, List<Clause>> newClauses = null;
     try {
-      newPolicies = Policy.policiesFromMap((Map<String, List<Map<String, Object>>>) op.getCommandData(),
-          params);
+      newClauses = Policy.clausesFromMap((Map<String, List<Map<String, Object>>>) op.getCommandData(),
+          new ArrayList<>() );
     } catch (Exception e) {
       op.addError(e.getMessage());
       return currentConfig;
     }
-    mergedPolicies.putAll(newPolicies);
-    Policy p = currentConfig.getPolicy().withPolicies(mergedPolicies).withParams(params);
+    currentClauses.putAll(newClauses);
+    Policy p = currentConfig.getPolicy().withPolicies(currentClauses);
     currentConfig = currentConfig.withPolicy(p);
     return currentConfig;
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8fbeedf2/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
index 210e324..2552f0a 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
@@ -138,7 +138,7 @@ public class Policy implements MapWriter {
     }
 
     this.policies = Collections.unmodifiableMap(
-        policiesFromMap((Map<String, List<Map<String, Object>>>) jsonMap.getOrDefault(POLICIES, emptyMap()), newParams));
+        clausesFromMap((Map<String, List<Map<String, Object>>>) jsonMap.getOrDefault(POLICIES, emptyMap()), newParams));
     List<Pair<String, Type>> params = newParams.stream()
         .map(s -> new Pair<>(s, VariableBase.getTagType(s)))
         .collect(toList());
@@ -241,7 +241,7 @@ public class Policy implements MapWriter {
     return getClusterPreferences().equals(policy.getClusterPreferences());
   }
 
-  public static Map<String, List<Clause>> policiesFromMap(Map<String, List<Map<String, Object>>> map, List<String> newParams) {
+  public static Map<String, List<Clause>> clausesFromMap(Map<String, List<Map<String, Object>>> map, List<String> newParams) {
     Map<String, List<Clause>> newPolicies = new HashMap<>();
     map.forEach((s, l1) ->
         newPolicies.put(s, l1.stream()


[30/43] lucene-solr:jira/http2: SOLR-12763: upgrade notes + some MergePolicy param fixes

Posted by da...@apache.org.
SOLR-12763: upgrade notes + some MergePolicy param fixes


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/89bc0824
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/89bc0824
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/89bc0824

Branch: refs/heads/jira/http2
Commit: 89bc082478ded31433352b426ad8806ab9238be0
Parents: d35d206
Author: Cassandra Targett <ct...@apache.org>
Authored: Tue Sep 11 10:50:21 2018 -0500
Committer: Cassandra Targett <ct...@apache.org>
Committed: Tue Sep 11 13:26:42 2018 -0500

----------------------------------------------------------------------
 .../src/indexconfig-in-solrconfig.adoc          | 14 ++--
 .../src/other-schema-elements.adoc              |  2 +-
 solr/solr-ref-guide/src/solr-upgrade-notes.adoc | 73 ++++++++++++++++++--
 .../src/uploading-data-with-index-handlers.adoc |  8 +--
 4 files changed, 79 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/89bc0824/solr/solr-ref-guide/src/indexconfig-in-solrconfig.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/indexconfig-in-solrconfig.adoc b/solr/solr-ref-guide/src/indexconfig-in-solrconfig.adoc
index bae9e84..2318865 100644
--- a/solr/solr-ref-guide/src/indexconfig-in-solrconfig.adoc
+++ b/solr/solr-ref-guide/src/indexconfig-in-solrconfig.adoc
@@ -75,17 +75,19 @@ Other policies available are the `LogByteSizeMergePolicy`, `LogDocMergePolicy`,
 ----
 
 [[merge-factors]]
-=== Controlling Segment Sizes: Merge Factors
+=== Controlling Segment Sizes
 
-The most common adjustment users make to the configuration of TieredMergePolicy (or LogByteSizeMergePolicy) are the "merge factors" to change how many segments should be merged at one time.
+The most common adjustment users make to the configuration of `TieredMergePolicy` (or `LogByteSizeMergePolicy`) are the "merge factors" to change how many segments should be merged at one time and, in the `TieredMergePolicy` case, the maximum size of an merged segment.
 
-For TieredMergePolicy, this is controlled by setting the `<int name="maxMergeAtOnce">` and `<int name="segmentsPerTier">` options, while LogByteSizeMergePolicy has a single `<int name="mergeFactor">` option (all of which default to `10`).
+For `TieredMergePolicy`, this is controlled by setting the `maxMergeAtOnce` (default `10`), `segmentsPerTier` (default `10`) and `maxMergedSegmentMB` (default `5000`) options.
 
-To understand why these options are important, consider what happens when an update is made to an index using LogByteSizeMergePolicy: Documents are always added to the most recently opened segment. When a segment fills up, a new segment is created and subsequent updates are placed there.
+`LogByteSizeMergePolicy` has a single `mergeFactor` option (default `10`).
 
-If creating a new segment would cause the number of lowest-level segments to exceed the `mergeFactor` value, then all those segments are merged together to form a single large segment. Thus, if the merge factor is 10, each merge results in the creation of a single segment that is roughly ten times larger than each of its ten constituents. When there are 10 of these larger segments, then they in turn are merged into an even larger single segment. This process can continue indefinitely.
+To understand why these options are important, consider what happens when an update is made to an index using `LogByteSizeMergePolicy`: Documents are always added to the most recently opened segment. When a segment fills up, a new segment is created and subsequent updates are placed there.
 
-When using TieredMergePolicy, the process is the same, but instead of a single `mergeFactor` value, the `segmentsPerTier` setting is used as the threshold to decide if a merge should happen, and the `maxMergeAtOnce` setting determines how many segments should be included in the merge.
+If creating a new segment would cause the number of lowest-level segments to exceed the `mergeFactor` value, then all those segments are merged together to form a single large segment. Thus, if the merge factor is `10`, each merge results in the creation of a single segment that is roughly ten times larger than each of its ten constituents. When there are 10 of these larger segments, then they in turn are merged into an even larger single segment. This process can continue indefinitely.
+
+When using `TieredMergePolicy`, the process is the same, but instead of a single `mergeFactor` value, the `segmentsPerTier` setting is used as the threshold to decide if a merge should happen, and the `maxMergeAtOnce` setting determines how many segments should be included in the merge.
 
 Choosing the best merge factors is generally a trade-off of indexing speed vs. searching speed. Having fewer segments in the index generally accelerates searches, because there are fewer places to look. It also can also result in fewer physical files on disk. But to keep the number of segments low, merges will occur more often, which can add load to the system and slow down updates to the index.
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/89bc0824/solr/solr-ref-guide/src/other-schema-elements.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/other-schema-elements.adoc b/solr/solr-ref-guide/src/other-schema-elements.adoc
index 54224ec..2bcf0fd 100644
--- a/solr/solr-ref-guide/src/other-schema-elements.adoc
+++ b/solr/solr-ref-guide/src/other-schema-elements.adoc
@@ -29,7 +29,7 @@ You can define the unique key field by naming it:
 <uniqueKey>id</uniqueKey>
 ----
 
-Schema defaults and `copyFields` cannot be used to populate the `uniqueKey` field. The `fieldType` of `uniqueKey` must not be analyzed. You can use `UUIDUpdateProcessorFactory` to have `uniqueKey` values generated automatically.
+Schema defaults and `copyFields` cannot be used to populate the `uniqueKey` field. The `fieldType` of `uniqueKey` must not be analyzed and must not be any of the `*PointField` types. You can use `UUIDUpdateProcessorFactory` to have `uniqueKey` values generated automatically.
 
 Further, the operation will fail if the `uniqueKey` field is used, but is multivalued (or inherits the multivalue-ness from the `fieldtype`). However, `uniqueKey` will continue to work, as long as the field is properly used.
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/89bc0824/solr/solr-ref-guide/src/solr-upgrade-notes.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/solr-upgrade-notes.adoc b/solr/solr-ref-guide/src/solr-upgrade-notes.adoc
index 63c7e4c..dca9ea6 100644
--- a/solr/solr-ref-guide/src/solr-upgrade-notes.adoc
+++ b/solr/solr-ref-guide/src/solr-upgrade-notes.adoc
@@ -1,5 +1,6 @@
 = Solr Upgrade Notes
 :page-children: major-changes-in-solr-8, major-changes-in-solr-7, major-changes-from-solr-5-to-solr-6
+:page-toclevels: 3
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -28,9 +29,33 @@ Detailed steps for upgrading a Solr cluster are in the section <<upgrading-a-sol
 == Upgrading to 7.x Releases
 
 === Solr 7.5
-When upgrading to Solr 7.4, users should be aware of the following major changes from v7.3:
 
-* When using the default TieredMergePolicy (TMP), optimize and expungeDeletes now respect the maxMergedSegmentMB configuration parameter, which defaults to 5,000 (5 gigaBytes). If it is absolutely necessary to control the number of segments present after optimize, specify maxSegments=# where # is a positive integer. maxSegments > 1 are honored on a "best effort" basis. TMP will also reclaim resources from segments that exceed maxMergedSegmentMB more aggressively.
+See the https://wiki.apache.org/solr/ReleaseNote75[7.5 Release Notes] for an overview of the main new features in Solr 7.5.
+
+When upgrading to Solr 7.5, users should be aware of the following major changes from v7.4:
+
+*Schema Changes*
+
+* Since Solr 7.0, Solr's schema field-guessing has created `_str` fields for all `_txt` fields, and returned those by default with queries. As of 7.5, `_str` fields will no longer be returned by default. They will still be available and can be requested with the `fl` parameter on queries. See also the section on <<schemaless-mode.adoc#enable-field-class-guessing,field guessing>> for more information about how schema field guessing works.
+* The Standard Filter, which has been non-operational since at least Solr v4, has been removed.
+
+*Index Merge Policy*
+
+* When using the <<indexconfig-in-solrconfig.adoc#mergepolicyfactory,`TieredMergePolicy`>>, the default merge policy for Solr, `optimize` and `expungeDeletes` now respect the `maxMergedSegmentMB` configuration parameter, which defaults to `5000` (5GB).
++
+If it is absolutely necessary to control the number of segments present after optimize, specify `maxSegments` as a positive integer. Setting `maxSegments` higher than `1` are honored on a "best effort" basis.
++
+The `TieredMergePolicy` will also reclaim resources from segments that exceed `maxMergedSegmentMB` more aggressively than earlier.
+
+*UIMA Removed*
+
+* The UIMA contrib has been removed from Solr and is no longer available.
+
+*Logging*
+
+* Solr's logging configuration file is now located in `server/resources/log4j2.xml` by default.
+
+* A bug for Windows users has been corrected. When using Solr's examples (`bin/solr start -e`) log files will now be put in the correct location (`example/` instead of `server`). See also <<installing-solr.adoc#solr-examples,Solr Examples>> and <<solr-control-script-reference.adoc#solr-control-script-reference,Solr Control Script Reference>> for more information.
 
 
 === Solr 7.4
@@ -39,10 +64,14 @@ See the https://wiki.apache.org/solr/ReleaseNote74[7.4 Release Notes] for an ove
 
 When upgrading to Solr 7.4, users should be aware of the following major changes from v7.3:
 
+*Logging*
+
 * Solr now uses Log4j v2.11. The Log4j configuration is now in `log4j2.xml` rather than `log4j.properties` files. This is a server side change only and clients using SolrJ won't need any changes. Clients can still use any logging implementation which is compatible with SLF4J. We now let Log4j handle rotation of solr logs at startup, and `bin/solr` start scripts will no longer attempt this nor move existing console or garbage collection logs into `logs/archived` either. See <<configuring-logging.adoc#configuring-logging,Configuring Logging>> for more details about Solr logging.
 
 * Configuring `slowQueryThresholdMillis` now logs slow requests to a separate file named `solr_slow_requests.log`. Previously they would get logged in the `solr.log` file.
 
+*Legacy Scaling (non-SolrCloud)*
+
 * In the <<index-replication.adoc#index-replication,master-slave model>> of scaling Solr, a slave no longer commits an empty index when a completely new index is detected on master during replication. To return to the previous behavior pass `false` to `skipCommitOnMasterVersionZero` in the slave section of replication handler configuration, or pass it to the `fetchindex` command.
 
 If you are upgrading from a version earlier than Solr 7.3, please see previous version notes below.
@@ -53,26 +82,40 @@ See the https://wiki.apache.org/solr/ReleaseNote73[7.3 Release Notes] for an ove
 
 When upgrading to Solr 7.3, users should be aware of the following major changes from v7.2:
 
+*ConfigSets*
+
 * Collections created without specifying a configset name have used a copy of the `_default` configset since Solr 7.0. Before 7.3, the copied configset was named the same as the collection name, but from 7.3 onwards it will be named with a new ".AUTOCREATED" suffix. This is to prevent overwriting custom configset names.
 
-* The `rq` parameter used with Learning to Rank rerank query parsing no longer considers the `defType` parameter. See <<learning-to-rank.adoc#running-a-rerank-query,Running a Rerank Query>> for more information about this parameter.
+*Learning to Rank*
+
+* The `rq` parameter used with Learning to Rank `rerank` query parsing no longer considers the `defType` parameter. See <<learning-to-rank.adoc#running-a-rerank-query,Running a Rerank Query>> for more information about this parameter.
+
+*Autoscaling & AutoAddReplicas*
+
+* The behaviour of the autoscaling system will now pause all triggers from execution between the start of actions and the end of a cool down period. The triggers will resume after the cool down period expires. Previously, the cool down period was a fixed period started after actions for a trigger event completed and during this time all triggers continued to run but any events were rejected and tried later.
+
+* The throttling mechanism used to limit the rate of autoscaling events processed has been removed. This deprecates the `actionThrottlePeriodSeconds` setting in the <<solrcloud-autoscaling-api.adoc#change-autoscaling-properties,`set-properties` Autoscaling API>> which is now non-operational. Use the `triggerCooldownPeriodSeconds` parameter instead to pause event processing.
 
 * The default value of `autoReplicaFailoverWaitAfterExpiration`, used with the AutoAddReplicas feature, has increased to 120 seconds from the previous default of 30 seconds. This affects how soon Solr adds new replicas to replace the replicas on nodes which have either crashed or shutdown.
 
+*Logging*
+
 * The default Solr log file size and number of backups have been raised to 32MB and 10 respectively. See the section <<configuring-logging.adoc#configuring-logging,Configuring Logging>> for more information about how to configure logging.
 
+*SolrCloud*
+
 * The old Leader-In-Recovery implementation (implemented in Solr 4.9) is now deprecated and replaced. Solr will support rolling upgrades from old 7.x versions of Solr to future 7.x releases until the last release of the 7.x major version.
 +
 This means to upgrade to Solr 8 in the future, you will need to be on Solr 7.3 or higher.
 
 * Replicas which are not up-to-date are no longer allowed to become leader. Use the <<collections-api.adoc#forceleader,FORCELEADER command>> of the Collections API to allow these replicas become leader.
 
-* The behaviour of the autoscaling system will now pause all triggers from execution between the start of actions and the end of a cool down period. The triggers will resume after the cool down period expires. Previously, the cool down period was a fixed period started after actions for a trigger event completed and during this time all triggers continued to run but any events were rejected and tried later.
-
-* The throttling mechanism used to limit the rate of autoscaling events processed has been removed. This deprecates the `actionThrottlePeriodSeconds` setting in the <<solrcloud-autoscaling-api.adoc#change-autoscaling-properties,`set-properties` Autoscaling API>> which is now non-operational. Use the `triggerCooldownPeriodSeconds` parameter instead to pause event processing.
+*Spatial*
 
 * If you are using the spatial JTS library with Solr, you must upgrade to 1.15.0. This new version of JTS is now dual-licensed to include a BSD style license. See the section on <<spatial-search.adoc#spatial-search,Spatial Search>> for more information.
 
+*Highlighting*
+
 * The top-level `<highlighting>` element in `solrconfig.xml` is now officially deprecated in favour of the equivalent `<searchComponent>` syntax. This element has been out of use in default Solr installations for several releases already.
 
 If you are upgrading from a version earlier than Solr 7.2, please see previous version notes below.
@@ -83,6 +126,8 @@ See the https://wiki.apache.org/solr/ReleaseNote72[7.2 Release Notes] for an ove
 
 When upgrading to Solr 7.2, users should be aware of the following major changes from v7.1:
 
+*Local Parameters*
+
 * Starting a query string with <<local-parameters-in-queries.adoc#local-parameters-in-queries,local parameters>> `{!myparser ...}` is used to switch from one query parser to another, and is intended for use by Solr system developers, not end users doing searches. To reduce negative side-effects of unintended hack-ability, Solr now limits the cases when local parameters will be parsed to only contexts in which the default parser is "<<other-parsers.adoc#lucene-query-parser,lucene>>" or "<<other-parsers.adoc#function-query-parser,func>>".
 +
 So, if `defType=edismax` then `q={!myparser ...}` won't work. In that example, put the desired query parser into the `defType` parameter.
@@ -91,6 +136,8 @@ Another example is if `deftype=edismax` then `hl.q={!myparser ...}` won't work f
 +
 If you must have full backwards compatibility, use `luceneMatchVersion=7.1.0` or an earlier version.
 
+*eDisMax Parser*
+
 * The eDisMax parser by default no longer allows subqueries that specify a Solr parser using either local parameters, or the older `\_query_` magic field trick.
 +
 For example, `{!prefix f=myfield v=enterp}` or `\_query_:"{!prefix f=myfield v=enterp}"` are not supported by default any longer. If you want to allow power-users to do this, set `uf=* _query_` or some other value that includes `\_query_`.
@@ -105,6 +152,8 @@ See the https://wiki.apache.org/solr/ReleaseNote71[7.1 Release Notes] for an ove
 
 When upgrading to Solr 7.1, users should be aware of the following major changes from v7.0:
 
+*AutoAddReplicas*
+
 * The feature to automatically add replicas if a replica goes down, previously available only when storing indexes in HDFS, has been ported to the autoscaling framework. Due to this, `autoAddReplicas` is now available to all users even if their indexes are on local disks.
 +
 Existing users of this feature should not have to change anything. However, they should note these changes:
@@ -114,18 +163,28 @@ Existing users of this feature should not have to change anything. However, they
 +
 More information about the changes to this feature can be found in the section <<solrcloud-autoscaling-auto-add-replicas.adoc#solrcloud-autoscaling-auto-add-replicas,SolrCloud Automatically Adding Replicas>>.
 
-* Shard and cluster metric reporter configuration now require a class attribute.
+*Metrics Reporters*
+
+* Shard and cluster metric reporter configuration now require a `class` attribute.
 ** If a reporter configures the `group="shard"` attribute then please also configure the `class="org.apache.solr.metrics.reporters.solr.SolrShardReporter"` attribute.
 ** If a reporter configures the `group="cluster"` attribute then please also configure the  `class="org.apache.solr.metrics.reporters.solr.SolrClusterReporter"` attribute.
 +
 See the section <<metrics-reporting.adoc#shard-and-cluster-reporters,Shard and Cluster Reporters>> for more information.
 
+*Streaming Expressions*
+
 * All Stream Evaluators in `solrj.io.eval` have been refactored to have a simpler and more robust structure. This simplifies and condenses the code required to implement a new Evaluator and makes it much easier for evaluators to handle differing data types (primitives, objects, arrays, lists, and so forth).
 
+*ReplicationHandler*
+
 * In the ReplicationHandler, the `master.commitReserveDuration` sub-element is deprecated. Instead please configure a direct `commitReserveDuration` element for use in all modes (master, slave, cloud).
 
+*RunExecutableListener*
+
 * The `RunExecutableListener` was removed for security reasons. If you want to listen to events caused by updates, commits, or optimize, write your own listener as native Java class as part of a Solr plugin.
 
+*XML Query Parser*
+
 * In the XML query parser (`defType=xmlparser` or `{!xmlparser ... }`) the resolving of external entities is now disallowed by default.
 
 If you are upgrading from a version earlier than Solr 7.0, please see <<major-changes-in-solr-7.adoc#major-changes-in-solr-7,Major Changes in Solr 7>> before starting your upgrade.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/89bc0824/solr/solr-ref-guide/src/uploading-data-with-index-handlers.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/uploading-data-with-index-handlers.adoc b/solr/solr-ref-guide/src/uploading-data-with-index-handlers.adoc
index 381c0f6..0f523d8 100644
--- a/solr/solr-ref-guide/src/uploading-data-with-index-handlers.adoc
+++ b/solr/solr-ref-guide/src/uploading-data-with-index-handlers.adoc
@@ -96,13 +96,13 @@ The `<commit>` and `<optimize>` elements accept these optional attributes:
 `waitSearcher`::
 Default is `true`. Blocks until a new searcher is opened and registered as the main query searcher, making the changes visible.
 
-`expungeDeletes`:: (commit only) Default is `false`. Merges segments that have more than 10% deleted docs, expunging the deleted documents in the process. Resulting segments will respect maxMergedSegmentMB.
-
+`expungeDeletes`:: (commit only) Default is `false`. Merges segments that have more than 10% deleted docs, expunging the deleted documents in the process. Resulting segments will respect `maxMergedSegmentMB`.
++
 WARNING: expungeDeletes is "less expensive" than optimize, but the same warnings apply.
 
-`maxSegments`:: (optimize only) Default is unlimited, resulting segments respect the maxMergedSegmentMB setting. Makes a "best effort" attempt to merge the segments down to no more than this number of segments but does not guarantee that the goal will be achieved. Unless there is tangible evidence that optimizing to a small number of segments is beneficial, this parameter should be omitted and the default behavior accepted.
+`maxSegments`:: (optimize only) Default is unlimited, resulting segments respect the `maxMergedSegmentMB` setting. Makes a best effort attempt to merge the segments down to no more than this number of segments but does not guarantee that the goal will be achieved. Unless there is tangible evidence that optimizing to a small number of segments is beneficial, this parameter should be omitted and the default behavior accepted.
 
-Here are examples of <commit> and <optimize> using optional attributes:
+Here are examples of `<commit>` and `<optimize>` using optional attributes:
 
 [source,xml]
 ----


[39/43] lucene-solr:jira/http2: SOLR-12766: Improve backoff for internal retries

Posted by da...@apache.org.
SOLR-12766: Improve backoff for internal retries

When retrying internal update requests, backoff only once for the full batch of retries
instead of for every request.
Make backoff linear with the number of retries


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/4a5b914e
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/4a5b914e
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/4a5b914e

Branch: refs/heads/jira/http2
Commit: 4a5b914eaa8683009191748bf6c0b1be14d59661
Parents: 6e8c05f
Author: Tomas Fernandez Lobbe <tf...@apache.org>
Authored: Wed Sep 12 16:29:17 2018 -0700
Committer: Tomas Fernandez Lobbe <tf...@apache.org>
Committed: Wed Sep 12 21:11:34 2018 -0700

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  2 ++
 .../apache/solr/update/SolrCmdDistributor.java  | 21 +++++++++++++-------
 2 files changed, 16 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a5b914e/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 113ca13..64b8e58 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -385,6 +385,8 @@ Optimizations
 
 * SOLR-12723: Reduce object creation in HashBasedRouter. (ab)
 
+* SOLR-12766: When retrying internal requests, backoff only once for the full batch of retries (Tomás Fernández Löbbe)
+
 Other Changes
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a5b914e/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java b/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
index d5aafec..3a65f17 100644
--- a/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
+++ b/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
@@ -79,7 +79,8 @@ public class SolrCmdDistributor implements Closeable {
     this.completionService = new ExecutorCompletionService<>(updateShardHandler.getUpdateExecutor());
   }
   
-  public SolrCmdDistributor(StreamingSolrClients clients, int retryPause) {
+  /* For tests only */
+  SolrCmdDistributor(StreamingSolrClients clients, int retryPause) {
     this.clients = clients;
     this.retryPause = retryPause;
     completionService = new ExecutorCompletionService<>(clients.getUpdateExecutor());
@@ -156,12 +157,6 @@ public class SolrCmdDistributor implements Closeable {
                 + err.req.cmd.toString() + " params:"
                 + err.req.uReq.getParams() + " rsp:" + err.statusCode, err.e);
           }
-          try {
-            Thread.sleep(retryPause); //TODO: Do we want this wait for every error?
-          } catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-            log.warn(null, e);
-          }
           resubmitList.add(err);
         } else {
           allErrors.add(err);
@@ -172,6 +167,18 @@ public class SolrCmdDistributor implements Closeable {
       }
     }
     
+    if (resubmitList.size() > 0) {
+      // Only backoff once for the full batch
+      try {
+        int backoffTime = retryPause * resubmitList.get(0).req.retries;
+        log.debug("Sleeping {}ms before re-submitting {} requests", backoffTime, resubmitList.size());
+        Thread.sleep(backoffTime);
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+        log.warn(null, e);
+      }
+    }
+    
     clients.clearErrors();
     this.errors.clear();
     for (Error err : resubmitList) {


[29/43] lucene-solr:jira/http2: SOLR-11836: FacetStream works with bucketSizeLimit of -1 which will fetch all the buckets

Posted by da...@apache.org.
SOLR-11836: FacetStream works with bucketSizeLimit of -1 which will fetch all the buckets


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d35d2063
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d35d2063
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d35d2063

Branch: refs/heads/jira/http2
Commit: d35d2063a817b4b4a6975115860624686afe8964
Parents: 398074d
Author: Varun Thacker <va...@apache.org>
Authored: Tue Sep 11 10:58:04 2018 -0700
Committer: Varun Thacker <va...@apache.org>
Committed: Tue Sep 11 10:58:04 2018 -0700

----------------------------------------------------------------------
 solr/CHANGES.txt                                            | 3 +++
 solr/solr-ref-guide/src/stream-source-reference.adoc        | 2 +-
 .../org/apache/solr/client/solrj/io/stream/FacetStream.java | 7 +++++--
 .../apache/solr/client/solrj/io/stream/StreamingTest.java   | 9 +++++++++
 4 files changed, 18 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d35d2063/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index f9e5b56..c3ab65b 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -339,6 +339,9 @@ Bug Fixes
 
 * SOLR-12733: SolrMetricReporterTest failure (Erick Erickson, David Smiley)
 
+* SOLR-11836: FacetStream works with bucketSizeLimit of -1 which will fetch all the buckets.
+  (Alfonso Muñoz-Pomer Fuentes via Varun Thacker)
+
 Optimizations
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d35d2063/solr/solr-ref-guide/src/stream-source-reference.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/stream-source-reference.adoc b/solr/solr-ref-guide/src/stream-source-reference.adoc
index 042463c..c31639a 100644
--- a/solr/solr-ref-guide/src/stream-source-reference.adoc
+++ b/solr/solr-ref-guide/src/stream-source-reference.adoc
@@ -131,7 +131,7 @@ The `facet` function provides aggregations that are rolled up over buckets. Unde
 * `q`: (Mandatory) The query to build the aggregations from.
 * `buckets`: (Mandatory) Comma separated list of fields to rollup over. The comma separated list represents the dimensions in a multi-dimensional rollup.
 * `bucketSorts`: Comma separated list of sorts to apply to each dimension in the buckets parameters. Sorts can be on the computed metrics or on the bucket values.
-* `bucketSizeLimit`: The number of buckets to include. This value is applied to each dimension.
+* `bucketSizeLimit`: The number of buckets to include. This value is applied to each dimension. '-1' will fetch all the buckets.
 * `metrics`: List of metrics to compute for the buckets. Currently supported metrics are `sum(col)`, `avg(col)`, `min(col)`, `max(col)`, `count(*)`.
 
 === facet Syntax

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d35d2063/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java
index 4010ff42..4564ba0 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java
@@ -161,8 +161,8 @@ public class FacetStream extends TupleStream implements Expressible  {
     int limitInt = 0;
     try{
       limitInt = Integer.parseInt(limitStr);
-      if(limitInt <= 0){
-        throw new IOException(String.format(Locale.ROOT,"invalid expression %s - limit '%s' must be greater than 0.",expression, limitStr));
+      if(limitInt <= 0 && limitInt != -1){
+        throw new IOException(String.format(Locale.ROOT,"invalid expression %s - limit '%s' must be greater than 0 or -1.",expression, limitStr));
       }
     }
     catch(NumberFormatException e){
@@ -223,6 +223,9 @@ public class FacetStream extends TupleStream implements Expressible  {
     this.buckets = buckets;
     this.metrics = metrics;
     this.bucketSizeLimit   = bucketSizeLimit;
+    if (this.bucketSizeLimit == -1) {
+      this.bucketSizeLimit = Integer.MAX_VALUE;
+    }
     this.collection = collection;
     this.bucketSorts = bucketSorts;
     

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d35d2063/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java
index 8f21100..ea3ec36 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java
@@ -1076,6 +1076,15 @@ public void testParallelRankStream() throws Exception {
       assertEquals(7.5, avgi.doubleValue(), 0.1);
       assertEquals(5.5, avgf.doubleValue(), 0.1);
       assertEquals(2, count.doubleValue(), 0.1);
+
+      sorts[0] = new FieldComparator("a_s", ComparatorOrder.ASCENDING);
+
+      facetStream = new FacetStream(zkHost, COLLECTIONORALIAS, sParamsA, buckets, metrics, sorts, -1);
+      facetStream.setStreamContext(streamContext);
+      tuples = getTuples(facetStream);
+
+      assertEquals(3, tuples.size());
+
     } finally {
       solrClientCache.close();
     }


[15/43] lucene-solr:jira/http2: SOLR-11943: Update RefGuide for latlonVectors and haversineMeters functions.

Posted by da...@apache.org.
SOLR-11943: Update RefGuide for latlonVectors and haversineMeters functions.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/304836e6
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/304836e6
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/304836e6

Branch: refs/heads/jira/http2
Commit: 304836e6759175e9541a9218125f44cf56ca106e
Parents: f406ff9
Author: Joel Bernstein <jb...@apache.org>
Authored: Sun Sep 9 20:44:25 2018 -0400
Committer: Joel Bernstein <jb...@apache.org>
Committed: Sun Sep 9 20:44:25 2018 -0400

----------------------------------------------------------------------
 solr/solr-ref-guide/src/machine-learning.adoc |  9 +--
 solr/solr-ref-guide/src/math-expressions.adoc |  2 +-
 solr/solr-ref-guide/src/vectorization.adoc    | 64 +++++++++++++++++++++-
 3 files changed, 69 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/304836e6/solr/solr-ref-guide/src/machine-learning.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/machine-learning.adoc b/solr/solr-ref-guide/src/machine-learning.adoc
index ca0ae74..abbca4b 100644
--- a/solr/solr-ref-guide/src/machine-learning.adoc
+++ b/solr/solr-ref-guide/src/machine-learning.adoc
@@ -179,10 +179,11 @@ numeric arrays or a *distance matrix* for the columns of a matrix.
 There are four distance measure functions that return a function
 that performs the actual distance calculation:
 
-* euclidean() (default)
-* manhattan()
-* canberra()
-* earthMovers()
+* euclidean (default)
+* manhattan
+* canberra
+* earthMovers
+* haversineMeters (Geospatial distance measure)
 
 The distance measure functions can be used with all machine learning functions
 that support different distance measures.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/304836e6/solr/solr-ref-guide/src/math-expressions.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/math-expressions.adoc b/solr/solr-ref-guide/src/math-expressions.adoc
index 27600b6..de150cf 100644
--- a/solr/solr-ref-guide/src/math-expressions.adoc
+++ b/solr/solr-ref-guide/src/math-expressions.adoc
@@ -38,7 +38,7 @@ record in your Solr Cloud cluster computable.
 
 *<<matrix-math.adoc#matrix-math,Matrix Math>>*: Matrix creation, manipulation, and matrix math.
 
-*<<vectorization.adoc#vectorization,Streams and Vectorization>>*: Retrieving streams and vectorizing numeric fields.
+*<<vectorization.adoc#vectorization,Streams and Vectorization>>*: Retrieving streams and vectorizing numeric and lat/long point fields.
 
 *<<term-vectors.adoc#term-vectors,Text Analysis and Term Vectors>>*: Using math expressions for text analysis and TF-IDF term vectors.
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/304836e6/solr/solr-ref-guide/src/vectorization.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/vectorization.adoc b/solr/solr-ref-guide/src/vectorization.adoc
index b01dcc8..09b6a01 100644
--- a/solr/solr-ref-guide/src/vectorization.adoc
+++ b/solr/solr-ref-guide/src/vectorization.adoc
@@ -240,4 +240,66 @@ When this expression is sent to the /stream handler it responds with:
     ]
   }
 }
-----
\ No newline at end of file
+----
+
+== Latitude / Longitude Vectors
+
+The `latlonVectors` function wraps a list of tuples and parses a lat/long location field into
+a matrix of lat/long vectors. Each row in the matrix is a vector that contains the lat/long
+pair for the corresponding tuple in the list. The column labels for the matrix are
+automatically set to the *id* field in the tuples. The the lat/lon matrix can then be operated
+on by machine learning functions using the `haversineMeters` distance measure.
+
+The `latlonVectors` function takes two parameters: a list of tuples and a named parameter called
+*field*. The field parameter tells the `latlonVectors` function which field to parse the lat/lon
+vectors from.
+
+Below is an example of the `latlonVectors`.
+
+[source,text]
+----
+let(a=random(collection1, q="*:*", fl="id, loc_p", rows="5"),
+    b=latlonVectors(a, field="loc_p"))
+----
+
+When this expression is sent to the /stream handler it responds with:
+
+[source,json]
+----
+{
+  "result-set": {
+    "docs": [
+      {
+        "b": [
+          [
+            42.87183530723629,
+            76.74102353397778
+          ],
+          [
+            42.91372904094898,
+            76.72874889228416
+          ],
+          [
+            42.911528804897564,
+            76.70537292977619
+          ],
+          [
+            42.91143870500213,
+            76.74749913047408
+          ],
+          [
+            42.904666267479705,
+            76.73933236046092
+          ]
+        ]
+      },
+      {
+        "EOF": true,
+        "RESPONSE_TIME": 21
+      }
+    ]
+  }
+}
+----
+
+


[06/43] lucene-solr:jira/http2: Merge remote-tracking branch 'upstream/master' into LUCENE-8343

Posted by da...@apache.org.
Merge remote-tracking branch 'upstream/master' into LUCENE-8343


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/1a83a146
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/1a83a146
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/1a83a146

Branch: refs/heads/jira/http2
Commit: 1a83a146684f20f431ba646fecb7db311e1e0afa
Parents: e0232f1 331ccf3
Author: Alessandro Benedetti <a....@sease.io>
Authored: Wed Jul 18 10:19:21 2018 +0100
Committer: Alessandro Benedetti <a....@sease.io>
Committed: Wed Jul 18 10:19:21 2018 +0100

----------------------------------------------------------------------
 build.xml                                       |     4 +-
 dev-tools/doap/lucene.rdf                       |    14 +
 dev-tools/doap/solr.rdf                         |    14 +
 dev-tools/idea/.idea/ant.xml                    |     2 -
 dev-tools/idea/.idea/modules.xml                |     2 -
 dev-tools/idea/.idea/workspace.xml              |    18 -
 .../idea/lucene/analysis/uima/analysis-uima.iml |    30 -
 dev-tools/idea/solr/contrib/uima/uima.iml       |    36 -
 .../maven/lucene/analysis/pom.xml.template      |     1 -
 .../maven/lucene/analysis/uima/pom.xml.template |    74 -
 dev-tools/maven/solr/contrib/pom.xml.template   |     1 -
 .../maven/solr/contrib/uima/pom.xml.template    |    83 -
 dev-tools/scripts/SOLR-2452.patch.hack.pl       |    15 -
 dev-tools/scripts/addVersion.py                 |     1 -
 dev-tools/scripts/jenkins.build.ref.guide.sh    |     2 +
 dev-tools/scripts/reproduceJenkinsFailures.py   |    12 +-
 .../test-patch/lucene-solr-yetus-personality.sh |     2 +-
 lucene/CHANGES.txt                              |   131 +-
 lucene/MIGRATE.txt                              |    24 +
 lucene/analysis/README.txt                      |     6 -
 lucene/analysis/build.xml                       |     7 +-
 lucene/analysis/common/build.xml                |     1 +
 .../lucene/analysis/bg/BulgarianAnalyzer.java   |    10 +-
 .../lucene/analysis/bn/BengaliAnalyzer.java     |    17 +-
 .../lucene/analysis/br/BrazilianAnalyzer.java   |     8 +-
 .../lucene/analysis/ca/CatalanAnalyzer.java     |    11 +-
 .../lucene/analysis/ckb/SoraniAnalyzer.java     |     9 +-
 .../commongrams/CommonGramsFilterFactory.java   |     8 +-
 .../lucene/analysis/core/StopAnalyzer.java      |    12 -
 .../lucene/analysis/core/StopFilterFactory.java |     7 +-
 .../lucene/analysis/cz/CzechAnalyzer.java       |    13 +-
 .../lucene/analysis/da/DanishAnalyzer.java      |    10 +-
 .../lucene/analysis/de/GermanAnalyzer.java      |     9 +-
 .../lucene/analysis/el/GreekAnalyzer.java       |     8 +-
 .../lucene/analysis/en/EnglishAnalyzer.java     |    42 +-
 .../lucene/analysis/es/SpanishAnalyzer.java     |    10 +-
 .../lucene/analysis/eu/BasqueAnalyzer.java      |    10 +-
 .../lucene/analysis/fa/PersianAnalyzer.java     |     4 +-
 .../lucene/analysis/fi/FinnishAnalyzer.java     |    10 +-
 .../lucene/analysis/fr/FrenchAnalyzer.java      |    11 +-
 .../lucene/analysis/ga/IrishAnalyzer.java       |     9 +-
 .../lucene/analysis/gl/GalicianAnalyzer.java    |    10 +-
 .../lucene/analysis/hi/HindiAnalyzer.java       |     4 +-
 .../lucene/analysis/hu/HungarianAnalyzer.java   |    10 +-
 .../lucene/analysis/hy/ArmenianAnalyzer.java    |    10 +-
 .../lucene/analysis/id/IndonesianAnalyzer.java  |    10 +-
 .../lucene/analysis/it/ItalianAnalyzer.java     |     9 +-
 .../lucene/analysis/lt/LithuanianAnalyzer.java  |    10 +-
 .../lucene/analysis/lv/LatvianAnalyzer.java     |    10 +-
 .../miscellaneous/ConditionalTokenFilter.java   |    64 +-
 .../miscellaneous/WordDelimiterGraphFilter.java |     1 +
 .../analysis/ngram/EdgeNGramTokenFilter.java    |     6 +
 .../lucene/analysis/ngram/NGramTokenFilter.java |     6 +
 .../lucene/analysis/nl/DutchAnalyzer.java       |    12 +-
 .../lucene/analysis/no/NorwegianAnalyzer.java   |    10 +-
 .../lucene/analysis/pt/PortugueseAnalyzer.java  |    10 +-
 .../lucene/analysis/ro/RomanianAnalyzer.java    |    10 +-
 .../lucene/analysis/ru/RussianAnalyzer.java     |    10 +-
 .../analysis/shingle/FixedShingleFilter.java    |     1 +
 .../analysis/standard/ASCIITLD.jflex-macro      |   263 +-
 .../analysis/standard/ClassicAnalyzer.java      |     4 +-
 .../standard/StandardFilterFactory.java         |    50 -
 .../standard/UAX29URLEmailAnalyzer.java         |    10 +-
 .../standard/UAX29URLEmailTokenizerImpl.java    | 47679 +++++++++--------
 .../standard/UAX29URLEmailTokenizerImpl.jflex   |    26 +-
 .../lucene/analysis/standard/package.html       |     6 +-
 .../lucene/analysis/sv/SwedishAnalyzer.java     |    10 +-
 .../lucene/analysis/tr/TurkishAnalyzer.java     |    10 +-
 .../lucene/analysis/util/UnicodeProps.java      |     6 +-
 ...ache.lucene.analysis.util.TokenFilterFactory |     1 -
 .../lucene/analysis/core/TestAnalyzers.java     |     9 +-
 .../lucene/analysis/core/TestRandomChains.java  |    21 +-
 .../lucene/analysis/core/TestStopAnalyzer.java  |     5 +-
 .../analysis/core/TestStopFilterFactory.java    |     3 +-
 .../TestConditionalTokenFilter.java             |    81 +-
 .../miscellaneous/TestWordDelimiterFilter.java  |    28 +-
 .../TestWordDelimiterGraphFilter.java           |    48 +-
 .../ngram/EdgeNGramTokenFilterTest.java         |    10 +
 .../analysis/ngram/NGramTokenFilterTest.java    |    21 +-
 .../analysis/sinks/TestTeeSinkTokenFilter.java  |    11 +-
 .../standard/TestStandardFactories.java         |     5 -
 .../standard/TestUAX29URLEmailTokenizer.java    |   158 +
 .../lucene/analysis/th/TestThaiAnalyzer.java    |     4 +-
 .../TestWikipediaTokenizerFactory.java          |     6 +-
 .../standard/GenerateJflexTLDMacros.java        |   148 +-
 lucene/analysis/icu/src/data/uax29/Default.rbbi |    46 +-
 .../icu/src/data/utr30/DiacriticFolding.txt     |     4 +
 .../icu/src/data/utr30/NativeDigitFolding.txt   |    20 +
 .../icu/segmentation/BreakIteratorWrapper.java  |    12 +-
 lucene/analysis/icu/src/java/overview.html      |     2 +-
 .../analysis/icu/segmentation/Default.brk       |   Bin 50760 -> 43176 bytes
 .../icu/segmentation/MyanmarSyllable.brk        |   Bin 21272 -> 21808 bytes
 .../org/apache/lucene/analysis/icu/utr30.nrm    |   Bin 59056 -> 59232 bytes
 .../Latin-dont-break-on-hyphens.rbbi            |   187 +-
 .../icu/segmentation/TestICUTokenizer.java      |    15 +
 .../analysis/ja/TestJapaneseAnalyzer.java       |     2 +-
 .../analysis/ja/TestJapaneseTokenizer.java      |    12 +-
 .../analysis/morfologik/MorfologikAnalyzer.java |    11 +-
 .../uk/UkrainianMorfologikAnalyzer.java         |     9 +-
 .../morfologik/TestMorfologikAnalyzer.java      |     4 +-
 .../lucene/analysis/pl/PolishAnalyzer.java      |    10 +-
 .../src/java/org/egothor/stemmer/Trie.java      |    16 +-
 lucene/analysis/uima/build.xml                  |    50 -
 lucene/analysis/uima/ivy.xml                    |    30 -
 .../lucene/analysis/uima/BaseUIMATokenizer.java |    96 -
 .../analysis/uima/UIMAAnnotationsTokenizer.java |    90 -
 .../uima/UIMAAnnotationsTokenizerFactory.java   |    47 -
 .../lucene/analysis/uima/UIMABaseAnalyzer.java  |    44 -
 .../analysis/uima/UIMATypeAwareAnalyzer.java    |    44 -
 .../uima/UIMATypeAwareAnnotationsTokenizer.java |   113 -
 ...IMATypeAwareAnnotationsTokenizerFactory.java |    50 -
 .../lucene/analysis/uima/ae/AEProvider.java     |    34 -
 .../analysis/uima/ae/AEProviderFactory.java     |    76 -
 .../analysis/uima/ae/BasicAEProvider.java       |    87 -
 .../uima/ae/OverridingParamsAEProvider.java     |    69 -
 .../lucene/analysis/uima/ae/package-info.java   |    21 -
 .../lucene/analysis/uima/package-info.java      |    21 -
 lucene/analysis/uima/src/java/overview.html     |    29 -
 ...apache.lucene.analysis.util.TokenizerFactory |    17 -
 .../src/resources/uima/AggregateSentenceAE.xml  |    70 -
 .../test-files/uima/TestAggregateSentenceAE.xml |    55 -
 .../test-files/uima/TestEntityAnnotatorAE.xml   |    66 -
 .../src/test-files/uima/TestPoSTaggerAE.xml     |    44 -
 .../src/test-files/uima/TestWSTokenizerAE.xml   |    78 -
 .../analysis/uima/UIMABaseAnalyzerTest.java     |   137 -
 .../uima/UIMATypeAwareAnalyzerTest.java         |    70 -
 .../analysis/uima/ae/AEProviderFactoryTest.java |    44 -
 .../analysis/uima/ae/BasicAEProviderTest.java   |    36 -
 .../uima/ae/OverridingParamsAEProviderTest.java |    59 -
 .../analysis/uima/an/SampleEntityAnnotator.java |    64 -
 .../analysis/uima/an/SamplePoSTagger.java       |    57 -
 .../uima/an/SampleWSTokenizerAnnotator.java     |    66 -
 .../index/TestBackwardsCompatibility.java       |    11 +-
 .../org/apache/lucene/index/index.7.4.0-cfs.zip |   Bin 0 -> 15484 bytes
 .../apache/lucene/index/index.7.4.0-nocfs.zip   |   Bin 0 -> 15493 bytes
 .../org/apache/lucene/index/sorted.7.4.0.zip    |   Bin 0 -> 158939 bytes
 .../lucene/index/unsupported.6.6.5-cfs.zip      |   Bin 0 -> 15765 bytes
 .../lucene/index/unsupported.6.6.5-nocfs.zip    |   Bin 0 -> 15763 bytes
 .../conf/english-porter-comparison.alg          |     6 +-
 .../lucene/benchmark/byTask/tasks/ReadTask.java |     7 -
 .../byTask/tasks/SearchWithSortTask.java        |     9 -
 .../lucene/benchmark/byTask/utils/Config.java   |     2 +-
 lucene/build.xml                                |     1 -
 .../lucene/classification/BM25NBClassifier.java |     4 +-
 .../classification/KNearestFuzzyClassifier.java |     2 +-
 .../KNearestNeighborClassifier.java             |     2 +-
 lucene/common-build.xml                         |     2 -
 .../org/apache/lucene/analysis/Analyzer.java    |     2 -
 .../lucene/analysis/FilteringTokenFilter.java   |     2 -
 .../lucene/analysis/TokenStreamToAutomaton.java |     8 +-
 .../analysis/standard/StandardAnalyzer.java     |    38 +-
 .../analysis/standard/StandardFilter.java       |    39 -
 .../lucene/analysis/standard/package-info.java  |     1 -
 .../java/org/apache/lucene/geo/GeoUtils.java    |    38 +
 .../src/java/org/apache/lucene/geo/Polygon.java |    49 +-
 .../java/org/apache/lucene/geo/Polygon2D.java   |    22 +-
 .../org/apache/lucene/index/CheckIndex.java     |    13 +
 .../apache/lucene/index/FilterCodecReader.java  |    23 +
 .../apache/lucene/index/FilterLeafReader.java   |     5 -
 .../org/apache/lucene/index/IndexWriter.java    |    65 +-
 .../apache/lucene/index/LeafReaderContext.java  |     4 +-
 .../apache/lucene/index/PendingSoftDeletes.java |     6 +-
 .../org/apache/lucene/index/PostingsEnum.java   |     9 -
 .../org/apache/lucene/index/ReaderPool.java     |    37 +-
 .../apache/lucene/index/SegmentCommitInfo.java  |     4 +-
 .../index/SoftDeletesRetentionMergePolicy.java  |    35 +-
 .../apache/lucene/index/TieredMergePolicy.java  |   605 +-
 .../apache/lucene/search/DocIdSetIterator.java  |    41 +
 .../org/apache/lucene/search/IndexSearcher.java |    32 +-
 .../java/org/apache/lucene/search/Matches.java  |    13 +-
 .../apache/lucene/search/MultiCollector.java    |    14 +
 .../org/apache/lucene/search/QueryRescorer.java |     2 +-
 .../lucene/search/SloppyPhraseMatcher.java      |     4 +-
 .../org/apache/lucene/search/SortRescorer.java  |     2 +-
 .../org/apache/lucene/search/TermQuery.java     |     7 +
 .../java/org/apache/lucene/search/TopDocs.java  |    35 +-
 .../apache/lucene/search/TopDocsCollector.java  |     2 +-
 .../apache/lucene/search/TopFieldCollector.java |    81 +-
 .../org/apache/lucene/search/TopFieldDocs.java  |     5 +-
 .../lucene/search/TopScoreDocCollector.java     |    16 +-
 .../org/apache/lucene/util/PriorityQueue.java   |    11 +-
 .../lucene/util/fst/PositiveIntOutputs.java     |     3 +-
 .../src/test/org/apache/lucene/TestDemo.java    |     4 +-
 .../org/apache/lucene/TestExternalCodecs.java   |    14 +-
 .../lucene/analysis/TestCharArraySet.java       |     4 +-
 .../perfield/TestPerFieldDocValuesFormat.java   |     2 +-
 .../org/apache/lucene/index/Test2BTerms.java    |     2 +-
 .../index/TestBinaryDocValuesUpdates.java       |     5 +-
 .../lucene/index/TestDirectoryReaderReopen.java |     8 +-
 .../apache/lucene/index/TestIndexSorting.java   |     5 +-
 .../apache/lucene/index/TestIndexWriter.java    |    26 +-
 .../lucene/index/TestIndexWriterDelete.java     |     1 +
 .../lucene/index/TestIndexWriterExceptions.java |     9 +-
 .../index/TestIndexWriterWithThreads.java       |    15 +-
 .../apache/lucene/index/TestLogMergePolicy.java |    17 +
 .../org/apache/lucene/index/TestManyFields.java |     2 +-
 .../apache/lucene/index/TestNoMergePolicy.java  |    25 +
 .../index/TestNumericDocValuesUpdates.java      |     1 +
 .../lucene/index/TestPendingSoftDeletes.java    |     2 +
 .../apache/lucene/index/TestReadOnlyIndex.java  |     4 +-
 .../org/apache/lucene/index/TestReaderPool.java |    32 +
 .../TestSoftDeletesDirectoryReaderWrapper.java  |     2 +-
 .../TestSoftDeletesRetentionMergePolicy.java    |    65 +-
 .../lucene/index/TestTieredMergePolicy.java     |   396 +-
 .../index/TestUpgradeIndexMergePolicy.java      |    13 +
 .../lucene/search/MultiCollectorTest.java       |     3 +
 .../org/apache/lucene/search/TestBoolean2.java  |     8 +-
 .../lucene/search/TestConstantScoreQuery.java   |     4 +-
 .../lucene/search/TestDocIdSetIterator.java     |    55 +
 .../lucene/search/TestDoubleValuesSource.java   |     4 +-
 .../lucene/search/TestElevationComparator.java  |     2 +-
 .../lucene/search/TestFieldValueQuery.java      |     6 +-
 .../apache/lucene/search/TestIndexSearcher.java |    36 +-
 .../lucene/search/TestLongValuesSource.java     |     4 +-
 .../lucene/search/TestMultiCollector.java       |    74 +-
 .../lucene/search/TestMultiPhraseQuery.java     |     8 +-
 .../search/TestNormsFieldExistsQuery.java       |     6 +-
 .../apache/lucene/search/TestPhraseQuery.java   |     6 +-
 .../apache/lucene/search/TestPrefixQuery.java   |     2 +-
 .../apache/lucene/search/TestSearchAfter.java   |     9 +-
 .../lucene/search/TestShardSearching.java       |     4 +-
 .../apache/lucene/search/TestSortRandom.java    |     2 +-
 .../search/TestSortedNumericSortField.java      |     4 +-
 .../lucene/search/TestSortedSetSortField.java   |     4 +-
 .../org/apache/lucene/search/TestTermQuery.java |    30 +
 .../lucene/search/TestTopDocsCollector.java     |    22 +-
 .../apache/lucene/search/TestTopDocsMerge.java  |    10 +-
 .../lucene/search/TestTopFieldCollector.java    |   155 +-
 .../TestTopFieldCollectorEarlyTermination.java  |     7 +-
 .../spans/TestSpanMultiTermQueryWrapper.java    |    56 +-
 .../apache/lucene/search/spans/TestSpans.java   |     2 +-
 .../apache/lucene/util/TestPriorityQueue.java   |    19 +-
 .../org/apache/lucene/util/TestSetOnce.java     |    10 +-
 .../org/apache/lucene/util/fst/TestFSTs.java    |     4 +-
 .../lucene/expressions/TestDemoExpressions.java |    10 +-
 .../lucene/expressions/TestExpressionSorts.java |     4 +-
 .../org/apache/lucene/facet/DrillSideways.java  |     6 +-
 .../apache/lucene/facet/FacetsCollector.java    |    21 +-
 .../org/apache/lucene/facet/FacetsConfig.java   |    37 +-
 .../apache/lucene/facet/range/DoubleRange.java  |    16 +
 .../apache/lucene/facet/range/LongRange.java    |    16 +
 .../directory/DirectoryTaxonomyWriter.java      |     5 +
 .../writercache/LruTaxonomyWriterCache.java     |     3 +-
 .../writercache/NameHashIntCacheLRU.java        |     4 +-
 .../writercache/UTF8TaxonomyWriterCache.java    |    30 +-
 .../apache/lucene/facet/TestDrillDownQuery.java |    40 +
 .../apache/lucene/facet/TestDrillSideways.java  |     5 +-
 .../facet/range/TestRangeFacetCounts.java       |    26 +
 .../TestTaxonomyFacetSumValueSource.java        |     2 +-
 .../directory/TestDirectoryTaxonomyWriter.java  |     2 +
 .../TestUTF8TaxonomyWriterCache.java            |    12 +-
 .../search/grouping/BlockGroupingCollector.java |     8 +-
 .../lucene/search/grouping/TopGroups.java       |     6 +-
 .../search/grouping/TopGroupsCollector.java     |    93 +-
 .../lucene/search/uhighlight/PhraseHelper.java  |     4 +-
 .../TestUnifiedHighlighterStrictPhrases.java    |    32 +-
 lucene/ivy-versions.properties                  |    28 +-
 .../lucene/search/join/TestBlockJoin.java       |    20 +-
 .../apache/lucene/search/join/TestJoinUtil.java |     3 +-
 .../join/TestParentChildrenBlockJoinQuery.java  |     1 -
 lucene/licenses/Tagger-2.3.1.jar.sha1           |     1 -
 lucene/licenses/Tagger-LICENSE-ASL.txt          |   202 -
 lucene/licenses/Tagger-NOTICE.txt               |     7 -
 .../licenses/WhitespaceTokenizer-2.3.1.jar.sha1 |     1 -
 .../WhitespaceTokenizer-LICENSE-ASL.txt         |   202 -
 lucene/licenses/WhitespaceTokenizer-NOTICE.txt  |     7 -
 lucene/licenses/commons-compress-1.14.jar.sha1  |     1 -
 .../licenses/commons-compress-1.16.1.jar.sha1   |     1 +
 lucene/licenses/icu4j-61.1.jar.sha1             |     1 -
 lucene/licenses/icu4j-62.1.jar.sha1             |     1 +
 ...jetty-continuation-9.4.10.v20180503.jar.sha1 |     1 -
 ...jetty-continuation-9.4.11.v20180605.jar.sha1 |     1 +
 .../jetty-http-9.4.10.v20180503.jar.sha1        |     1 -
 .../jetty-http-9.4.11.v20180605.jar.sha1        |     1 +
 .../licenses/jetty-io-9.4.10.v20180503.jar.sha1 |     1 -
 .../licenses/jetty-io-9.4.11.v20180605.jar.sha1 |     1 +
 .../jetty-server-9.4.10.v20180503.jar.sha1      |     1 -
 .../jetty-server-9.4.11.v20180605.jar.sha1      |     1 +
 .../jetty-servlet-9.4.10.v20180503.jar.sha1     |     1 -
 .../jetty-servlet-9.4.11.v20180605.jar.sha1     |     1 +
 .../jetty-util-9.4.10.v20180503.jar.sha1        |     1 -
 .../jetty-util-9.4.11.v20180605.jar.sha1        |     1 +
 lucene/licenses/opennlp-tools-1.8.3.jar.sha1    |     1 -
 lucene/licenses/opennlp-tools-1.8.4.jar.sha1    |     1 +
 lucene/licenses/uimaj-core-2.3.1.jar.sha1       |     1 -
 lucene/licenses/uimaj-core-LICENSE-ASL.txt      |   202 -
 lucene/licenses/uimaj-core-NOTICE.txt           |    13 -
 .../search/DiversifiedTopDocsCollector.java     |    16 +-
 .../util/fst/UpToTwoPositiveIntOutputs.java     |     2 +
 lucene/module-build.xml                         |    22 -
 .../queries/function/FunctionScoreQuery.java    |    22 +
 .../function/TestFunctionQueryExplanations.java |     2 +-
 .../function/TestFunctionScoreQuery.java        |    10 +-
 .../function/TestIndexReaderFunctions.java      |     2 +-
 .../queries/function/TestValueSources.java      |     2 +-
 .../queries/payloads/TestPayloadTermQuery.java  |     2 -
 .../document/FloatPointNearestNeighbor.java     |     2 +-
 .../org/apache/lucene/document/LatLonPoint.java |     2 +-
 .../org/apache/lucene/document/LatLonShape.java |   113 +
 .../document/LatLonShapeBoundingBoxQuery.java   |   415 +
 .../java/org/apache/lucene/geo/Tessellator.java |   910 +
 .../src/java/org/apache/lucene/geo/package.html |    31 +
 .../apache/lucene/document/TestLatLonShape.java |   163 +
 .../lucene/document/TestLatLonShapeQueries.java |   278 +
 .../org/apache/lucene/geo/TestTessellator.java  |    45 +
 .../spatial3d/geom/GeoComplexPolygon.java       |     2 +-
 .../spatial3d/geom/GeoDegeneratePath.java       |     2 +-
 .../lucene/spatial3d/geom/GeoStandardPath.java  |     2 +-
 .../org/apache/lucene/spatial3d/geom/Plane.java |    13 +-
 .../apache/lucene/spatial3d/geom/XYZBounds.java |    12 +-
 .../apache/lucene/spatial3d/geom/PlaneTest.java |     2 +-
 .../spatial3d/geom/RandomGeoPolygonTest.java    |     3 +-
 .../analyzing/AnalyzingInfixSuggester.java      |     2 +-
 .../analyzing/SuggestStopFilterFactory.java     |     8 +-
 .../search/suggest/document/TopSuggestDocs.java |     8 +-
 .../document/TopSuggestDocsCollector.java       |     2 +-
 .../jaspell/JaspellTernarySearchTrie.java       |     6 +-
 .../analyzing/AnalyzingSuggesterTest.java       |    31 +-
 .../analyzing/TestSuggestStopFilterFactory.java |     4 +-
 .../document/TestPrefixCompletionQuery.java     |   132 +-
 .../lucene/analysis/ValidatingTokenFilter.java  |     9 +-
 .../java/org/apache/lucene/geo/GeoTestUtil.java |    39 +-
 .../index/BaseDocValuesFormatTestCase.java      |    18 +-
 .../lucene/index/BaseMergePolicyTestCase.java   |   293 +-
 .../apache/lucene/index/RandomIndexWriter.java  |     4 +-
 .../org/apache/lucene/util/LuceneTestCase.java  |     4 +-
 .../java/org/apache/lucene/util/TestUtil.java   |     1 -
 .../lucene/analysis/TestMockAnalyzer.java       |     8 +-
 .../lucene/index/TestAssertingLeafReader.java   |     2 +-
 lucene/tools/forbiddenApis/base.txt             |    18 +
 lucene/tools/junit4/cached-timehints.txt        |  1164 -
 lucene/tools/junit4/tests.policy                |     3 +-
 .../src/groovy/check-source-patterns.groovy     |     4 +
 .../lucene/validation/LibVersionsCheckTask.java |     4 +-
 solr/CHANGES.txt                                |   187 +-
 solr/NOTICE.txt                                 |    14 +-
 solr/bin/solr                                   |    65 +-
 solr/bin/solr.cmd                               |   113 +-
 solr/bin/solr.in.cmd                            |     0
 solr/common-build.xml                           |     3 +-
 .../analytics/util/FacetRangeGenerator.java     |     8 +-
 .../mapping/FillMissingFunctionTest.java        |    18 +-
 .../function/mapping/FilterFunctionTest.java    |    10 +-
 .../function/mapping/IfFunctionTest.java        |    26 +-
 .../function/mapping/RemoveFunctionTest.java    |    22 +-
 .../function/mapping/ReplaceFunctionTest.java   |    22 +-
 .../analytics/legacy/LegacyNoFacetTest.java     |     8 +-
 .../legacy/facet/LegacyFieldFacetTest.java      |    10 +-
 .../legacy/facet/LegacyQueryFacetTest.java      |     6 +-
 .../value/CastingBooleanValueStreamTest.java    |     2 +-
 .../value/CastingBooleanValueTest.java          |     6 +-
 .../value/CastingDoubleValueStreamTest.java     |     2 +-
 .../analytics/value/CastingDoubleValueTest.java |     6 +-
 .../value/CastingFloatValueStreamTest.java      |     2 +-
 .../analytics/value/CastingFloatValueTest.java  |     6 +-
 .../value/CastingIntValueStreamTest.java        |     2 +-
 .../analytics/value/CastingIntValueTest.java    |     6 +-
 .../value/CastingLongValueStreamTest.java       |     2 +-
 .../analytics/value/CastingLongValueTest.java   |     6 +-
 .../solr/analytics/value/ConstantValueTest.java |    24 +-
 .../handler/clustering/ClusteringComponent.java |     2 +-
 .../clustering/src/test-files/log4j2.xml        |    37 -
 .../handler/dataimport/SolrEntityProcessor.java |     2 +-
 .../dataimporthandler/src/test-files/log4j2.xml |    36 -
 .../dataimport/AbstractDIHCacheTestCase.java    |    16 +-
 .../dataimport/TestBuiltInEvaluators.java       |     2 +-
 .../dataimport/TestNumberFormatTransformer.java |    10 +-
 .../dataimport/TestSortedMapBackedCache.java    |    12 +-
 solr/contrib/extraction/ivy.xml                 |     2 +-
 .../handler/extraction/ParseContextConfig.java  |    12 +-
 .../langid/solr/collection1/conf/schema.xml     |     1 -
 .../java/org/apache/solr/ltr/LTRRescorer.java   |     4 +-
 .../org/apache/solr/ltr/model/LinearModel.java  |     2 +-
 .../ltr/model/MultipleAdditiveTreesModel.java   |     4 +-
 .../LTRFeatureLoggerTransformerFactory.java     |     2 +-
 solr/contrib/ltr/src/test-files/log4j2.xml      |    40 -
 .../solr/ltr/TestLTRReRankingPipeline.java      |     6 +-
 ...stFeatureExtractionFromMultipleSegments.java |    36 +-
 .../prometheus-exporter/bin/solr-exporter       |     2 +-
 .../prometheus-exporter/bin/solr-exporter.cmd   |     2 +-
 .../contrib/prometheus-exporter/conf/log4j2.xml |    53 -
 .../prometheus/collector/SolrCollector.java     |    24 +-
 .../solr/prometheus/exporter/SolrExporter.java  |     6 +-
 .../solr/prometheus/scraper/SolrScraper.java    |    10 +-
 .../src/test-files/conf/log4j2.xml              |    35 -
 .../configsets/collection1/conf/managed-schema  |     2 +-
 solr/contrib/uima/README.txt                    |   109 -
 solr/contrib/uima/build.xml                     |    63 -
 solr/contrib/uima/ivy.xml                       |    35 -
 .../uima/processor/FieldMappingException.java   |    27 -
 .../uima/processor/SolrUIMAConfiguration.java   |   117 -
 .../processor/SolrUIMAConfigurationReader.java  |   116 -
 .../solr/uima/processor/UIMAToSolrMapper.java   |    87 -
 .../processor/UIMAUpdateRequestProcessor.java   |   189 -
 .../UIMAUpdateRequestProcessorFactory.java      |    72 -
 .../solr/uima/processor/package-info.java       |    25 -
 solr/contrib/uima/src/java/overview.html        |    21 -
 .../apache/uima/desc/AggregateSentenceAE.xml    |    41 -
 .../org/apache/uima/desc/HmmTagger.xml          |   121 -
 .../apache/uima/desc/OpenCalaisAnnotator.xml    |   194 -
 .../uima/desc/OverridingParamsExtServicesAE.xml |   147 -
 .../desc/TextCategorizationAEDescriptor.xml     |   102 -
 .../desc/TextConceptTaggingAEDescriptor.xml     |   196 -
 .../desc/TextKeywordExtractionAEDescriptor.xml  |   107 -
 .../desc/TextLanguageDetectionAEDescriptor.xml  |   107 -
 .../TextRankedEntityExtractionAEDescriptor.xml  |   403 -
 .../apache/uima/desc/WhitespaceTokenizer.xml    |   115 -
 .../desc/baseAlchemyTypeSystemDescriptor.xml    |    41 -
 .../collection1/conf/aggregate-uima-config.xml  |    48 -
 .../solr/collection1/conf/uima-fields.xml       |     9 -
 .../src/test-files/uima/AggregateSentenceAE.xml |    70 -
 .../test-files/uima/DummyEntityAEDescriptor.xml |    68 -
 .../uima/DummyExceptionAEDescriptor.xml         |    40 -
 .../uima/DummySentimentAnalysisAEDescriptor.xml |    60 -
 .../contrib/uima/src/test-files/uima/TestAE.xml |    72 -
 .../src/test-files/uima/TestExceptionAE.xml     |    54 -
 .../uima/solr/collection1/conf/protwords.txt    |    21 -
 .../uima/solr/collection1/conf/schema.xml       |   612 -
 .../uima/solr/collection1/conf/solrconfig.xml   |   773 -
 .../uima/solr/collection1/conf/spellings.txt    |     2 -
 .../uima/solr/collection1/conf/stopwords.txt    |    58 -
 .../uima/solr/collection1/conf/synonyms.txt     |    31 -
 .../uima/src/test-files/uima/stoptypes.txt      |    25 -
 .../test-files/uima/uima-tokenizers-schema.xml  |   613 -
 .../uima/uima-tokenizers-solrconfig.xml         |   653 -
 .../UIMATokenizersSolrIntegrationTest.java      |    72 -
 .../UIMAUpdateRequestProcessorTest.java         |   186 -
 .../uima/processor/an/DummyEntityAnnotator.java |    47 -
 .../processor/an/DummyExceptionAnnotator.java   |    30 -
 .../processor/an/DummySentimentAnnotator.java   |    60 -
 .../solr/uima/ts/DummySentimentAnnotation.java  |    81 -
 .../uima/ts/DummySentimentAnnotation_Type.java  |    80 -
 .../apache/solr/uima/ts/EntityAnnotation.java   |    99 -
 .../solr/uima/ts/EntityAnnotation_Type.java     |   102 -
 .../src/java/org/apache/solr/api/ApiBag.java    |     6 +-
 .../solrj/embedded/EmbeddedSolrServer.java      |    54 +-
 .../java/org/apache/solr/cloud/Overseer.java    |    33 +-
 .../solr/cloud/OverseerTaskProcessor.java       |     8 +-
 .../org/apache/solr/cloud/RecoveryStrategy.java |    16 +-
 .../org/apache/solr/cloud/SyncStrategy.java     |     3 +-
 .../org/apache/solr/cloud/ZkController.java     |    56 +
 .../cloud/api/collections/AddReplicaCmd.java    |     2 +-
 .../solr/cloud/api/collections/Assign.java      |     2 +-
 .../solr/cloud/api/collections/BackupCmd.java   |     2 +-
 .../api/collections/CreateCollectionCmd.java    |     2 +-
 .../api/collections/MaintainRoutedAliasCmd.java |     2 +-
 .../solr/cloud/api/collections/MigrateCmd.java  |     3 +-
 .../OverseerCollectionMessageHandler.java       |    15 +-
 .../solr/cloud/api/collections/RestoreCmd.java  |    37 +-
 .../cloud/api/collections/SplitShardCmd.java    |   188 +-
 .../cloud/api/collections/TimeRoutedAlias.java  |     2 +
 .../cloud/autoscaling/AutoScalingHandler.java   |     6 +-
 .../cloud/autoscaling/IndexSizeTrigger.java     |     9 +-
 .../cloud/overseer/ClusterStateMutator.java     |     4 +-
 .../solr/cloud/overseer/CollectionMutator.java  |    28 +-
 .../org/apache/solr/core/CoreContainer.java     |    90 +-
 .../apache/solr/core/HdfsDirectoryFactory.java  |     2 +-
 .../src/java/org/apache/solr/core/SolrCore.java |    12 +-
 .../org/apache/solr/handler/IndexFetcher.java   |     2 +-
 .../apache/solr/handler/RequestHandlerBase.java |     3 +
 .../apache/solr/handler/SolrConfigHandler.java  |    11 +-
 .../solr/handler/admin/CollectionsHandler.java  |    55 +-
 .../solr/handler/admin/ConfigSetsHandler.java   |     5 +
 .../solr/handler/admin/CoreAdminOperation.java  |    22 +-
 .../handler/admin/MetricsHistoryHandler.java    |   295 +-
 .../admin/SegmentsInfoRequestHandler.java       |     8 +-
 .../solr/handler/component/ExpandComponent.java |     6 +-
 .../component/QueryElevationComponent.java      |  1312 +-
 .../handler/component/RangeFacetRequest.java    |     8 +-
 .../handler/component/RealTimeGetComponent.java |    33 +-
 .../handler/component/SpatialHeatmapFacets.java |   405 +-
 .../handler/component/TermVectorComponent.java  |     2 +-
 .../apache/solr/handler/loader/JsonLoader.java  |   167 +-
 .../apache/solr/metrics/rrd/SolrRrdBackend.java |    30 +-
 .../solr/metrics/rrd/SolrRrdBackendFactory.java |    76 +-
 .../org/apache/solr/request/SimpleFacets.java   |    27 +-
 .../solr/request/json/JsonQueryConverter.java   |    28 +-
 .../apache/solr/response/CSVResponseWriter.java |     2 +-
 .../solr/response/GraphMLResponseWriter.java    |     5 -
 .../solr/response/JSONResponseWriter.java       |   592 +-
 .../org/apache/solr/response/JSONWriter.java    |   181 +
 .../apache/solr/response/PHPResponseWriter.java |     4 +-
 .../response/PHPSerializedResponseWriter.java   |     2 +-
 .../solr/response/QueryResponseWriterUtil.java  |     2 +-
 .../solr/response/RubyResponseWriter.java       |     4 +-
 .../solr/response/SmileResponseWriter.java      |     4 +-
 .../solr/response/TextResponseWriter.java       |   222 +-
 .../apache/solr/response/WriteableValue.java    |    25 -
 .../transform/BaseEditorialTransformer.java     |    17 +-
 .../transform/ElevatedMarkerFactory.java        |     6 +-
 .../transform/ExcludedMarkerFactory.java        |     6 +-
 .../transform/RawValueTransformerFactory.java   |     9 +-
 .../response/transform/WriteableGeoJSON.java    |     6 +-
 .../apache/solr/schema/AbstractEnumField.java   |   101 +-
 .../solr/schema/FileExchangeRateProvider.java   |   106 +-
 .../org/apache/solr/schema/IndexSchema.java     |     5 +-
 .../solr/schema/LatLonPointSpatialField.java    |     2 +-
 .../apache/solr/search/AbstractReRankQuery.java |     7 +-
 .../solr/search/CollapsingQParserPlugin.java    |    25 +-
 .../apache/solr/search/ExportQParserPlugin.java |     2 +-
 .../java/org/apache/solr/search/Grouping.java   |    33 +-
 .../apache/solr/search/MaxScoreCollector.java   |    55 +
 .../java/org/apache/solr/search/QParser.java    |    30 +-
 .../org/apache/solr/search/ReRankCollector.java |    12 +-
 .../search/SignificantTermsQParserPlugin.java   |     2 +-
 .../apache/solr/search/SolrIndexSearcher.java   |    21 +-
 .../solr/search/facet/FacetFieldMerger.java     |     3 +-
 .../solr/search/facet/FacetFieldProcessor.java  |     7 +
 .../FacetFieldProcessorByEnumTermsStream.java   |     6 +
 .../apache/solr/search/facet/FacetHeatmap.java  |   520 +
 .../apache/solr/search/facet/FacetModule.java   |    29 +-
 .../solr/search/facet/FacetProcessor.java       |    49 +-
 .../apache/solr/search/facet/FacetRange.java    |   386 +-
 .../solr/search/facet/FacetRangeMerger.java     |    68 +-
 .../apache/solr/search/facet/FacetRequest.java  |   150 +-
 .../search/facet/FacetRequestSortedMerger.java  |    29 +-
 .../distributed/command/QueryCommand.java       |    26 +-
 .../distributed/command/QueryCommandResult.java |     8 +-
 .../TopGroupsShardResponseProcessor.java        |    14 +-
 .../TopGroupsResultTransformer.java             |    10 +-
 .../GroupedEndResultTransformer.java            |     4 +-
 ...tContinuesRecorderAuthenticationHandler.java |     2 +-
 .../apache/solr/update/AddUpdateCommand.java    |   204 +-
 .../org/apache/solr/update/CdcrUpdateLog.java   |     2 +-
 .../apache/solr/update/CommitUpdateCommand.java |     2 +-
 .../solr/update/DirectUpdateHandler2.java       |    97 +-
 .../org/apache/solr/update/DocumentBuilder.java |    37 +-
 .../org/apache/solr/update/HdfsUpdateLog.java   |     1 +
 .../java/org/apache/solr/update/PeerSync.java   |   794 +-
 .../apache/solr/update/PeerSyncWithLeader.java  |   372 +
 .../org/apache/solr/update/SolrCoreState.java   |     9 +
 .../org/apache/solr/update/TransactionLog.java  |     1 +
 .../java/org/apache/solr/update/UpdateLog.java  |     9 +
 .../IgnoreLargeDocumentProcessorFactory.java    |    39 +-
 .../processor/NestedUpdateProcessorFactory.java |   137 +
 .../TimeRoutedAliasUpdateProcessor.java         |    32 +-
 .../java/org/apache/solr/util/FastWriter.java   |   157 -
 .../org/apache/solr/util/SafeXMLParsing.java    |   120 +
 .../src/java/org/apache/solr/util/SolrCLI.java  |    13 +-
 .../org/apache/solr/util/TestInjection.java     |    22 +-
 .../collection1/conf/schema-copyfield-test.xml  |     4 -
 .../solr/collection1/conf/schema-hash.xml       |     4 -
 .../conf/schema-luceneMatchVersion.xml          |     2 -
 .../conf/schema-not-required-unique-key.xml     |     1 -
 .../collection1/conf/schema-required-fields.xml |     4 -
 .../solr/collection1/conf/schema-rest.xml       |     3 -
 .../collection1/conf/schema-spellchecker.xml    |     3 -
 .../solr/collection1/conf/schema-sql.xml        |     4 -
 .../test-files/solr/collection1/conf/schema.xml |     6 -
 .../solr/collection1/conf/schema11.xml          |     1 -
 .../solr/collection1/conf/schema12.xml          |     3 -
 .../solr/collection1/conf/schema15.xml          |     6 +-
 .../solr/collection1/conf/schemasurround.xml    |     3 -
 .../conf/solrconfig-update-processor-chains.xml |     5 +
 .../configsets/_default/conf/managed-schema     |     2 +-
 .../configsets/_default/conf/solrconfig.xml     |     2 +
 .../cloud-minimal/conf/solrconfig.xml           |     3 +
 .../test-files/solr/crazy-path-to-schema.xml    |     1 -
 .../org/apache/solr/TestDistributedSearch.java  |    33 +-
 .../apache/solr/cloud/AliasIntegrationTest.java |     3 -
 .../cloud/AssignBackwardCompatibilityTest.java  |     2 +-
 .../solr/cloud/BasicDistributedZkTest.java      |    56 +-
 .../cloud/ChaosMonkeyNothingIsSafeTest.java     |     2 +-
 .../solr/cloud/ChaosMonkeySafeLeaderTest.java   |     2 +-
 ...aosMonkeySafeLeaderWithPullReplicasTest.java |     2 +-
 .../solr/cloud/CollectionsAPISolrJTest.java     |    48 +
 .../solr/cloud/CreateRoutedAliasTest.java       |     4 -
 .../apache/solr/cloud/DeleteReplicaTest.java    |     5 +-
 .../solr/cloud/DocValuesNotIndexedTest.java     |     6 +-
 .../org/apache/solr/cloud/ForceLeaderTest.java  |     2 +-
 .../apache/solr/cloud/HttpPartitionTest.java    |     4 +-
 .../solr/cloud/LIRRollingUpdatesTest.java       |     8 +-
 .../cloud/LeaderElectionIntegrationTest.java    |     2 +-
 .../cloud/LeaderFailoverAfterPartitionTest.java |     2 +-
 .../solr/cloud/LeaderTragicEventTest.java       |   185 +
 .../solr/cloud/LeaderVoteWaitTimeoutTest.java   |     2 +-
 .../solr/cloud/LegacyCloudClusterPropTest.java  |     2 +-
 .../cloud/MetricsHistoryIntegrationTest.java    |     9 +-
 .../solr/cloud/MissingSegmentRecoveryTest.java  |     2 +-
 .../apache/solr/cloud/MoveReplicaHDFSTest.java  |     4 +-
 .../org/apache/solr/cloud/MoveReplicaTest.java  |     2 +-
 .../solr/cloud/OverseerTaskQueueTest.java       |     3 +-
 .../org/apache/solr/cloud/RecoveryZkTest.java   |     1 +
 .../solr/cloud/ReplaceNodeNoTargetTest.java     |     2 +-
 .../solr/cloud/ReplicationFactorTest.java       |     3 +-
 .../apache/solr/cloud/RollingRestartTest.java   |     2 +
 .../solr/cloud/SaslZkACLProviderTest.java       |     1 +
 .../cloud/SegmentTerminateEarlyTestState.java   |    22 +-
 .../cloud/SharedFSAutoReplicaFailoverTest.java  |     2 +-
 .../apache/solr/cloud/SolrCloudExampleTest.java |    12 +-
 .../apache/solr/cloud/TestCloudConsistency.java |     2 +
 .../solr/cloud/TestCloudJSONFacetSKG.java       |    53 +-
 .../apache/solr/cloud/TestCloudPivotFacet.java  |    45 +-
 .../apache/solr/cloud/TestCloudRecovery.java    |     1 -
 .../solr/cloud/TestCloudSearcherWarming.java    |    41 +-
 .../apache/solr/cloud/TestConfigSetsAPI.java    |    58 +-
 .../org/apache/solr/cloud/TestPrepRecovery.java |     4 +-
 .../org/apache/solr/cloud/TestPullReplica.java  |     4 +-
 .../apache/solr/cloud/TestSegmentSorting.java   |     4 +-
 .../cloud/TestSolrCloudWithKerberosAlt.java     |     4 +-
 .../TestStressCloudBlindAtomicUpdates.java      |     2 +-
 .../solr/cloud/UnloadDistributedZkTest.java     |     2 +-
 .../AbstractCloudBackupRestoreTestCase.java     |   139 +-
 .../CollectionsAPIDistributedZkTest.java        |     2 +-
 .../api/collections/CustomCollectionTest.java   |     2 +-
 .../HdfsCollectionsAPIDistributedZkTest.java    |     2 +
 .../cloud/api/collections/ShardSplitTest.java   |   101 +-
 .../api/collections/TestCollectionAPI.java      |   120 +
 .../TestCollectionsAPIViaSolrCloudCluster.java  |     2 +-
 .../collections/TestHdfsCloudBackupRestore.java |     5 +-
 .../AutoAddReplicasIntegrationTest.java         |     2 +-
 .../autoscaling/AutoScalingHandlerTest.java     |     4 +-
 .../autoscaling/ComputePlanActionTest.java      |     4 +-
 .../cloud/autoscaling/IndexSizeTriggerTest.java |    15 +-
 .../cloud/autoscaling/NodeLostTriggerTest.java  |     2 +-
 .../ScheduledTriggerIntegrationTest.java        |     3 +-
 .../cloud/autoscaling/ScheduledTriggerTest.java |     2 +-
 .../autoscaling/SearchRateTriggerTest.java      |     2 +
 .../solr/cloud/autoscaling/TestPolicyCloud.java |    23 +-
 .../cloud/autoscaling/sim/SimCloudManager.java  |    53 +-
 .../sim/SimClusterStateProvider.java            |   112 +-
 .../autoscaling/sim/TestComputePlanAction.java  |     2 +
 .../autoscaling/sim/TestExecutePlanAction.java  |     2 +
 .../sim/TestGenericDistributedQueue.java        |     2 +-
 .../cloud/autoscaling/sim/TestLargeCluster.java |    16 +-
 .../cloud/autoscaling/sim/TestPolicyCloud.java  |     2 +-
 .../sim/TestSimDistributedQueue.java            |     1 +
 .../autoscaling/sim/TestTriggerIntegration.java |     3 +
 .../cloud/cdcr/BaseCdcrDistributedZkTest.java   |     2 +-
 .../solr/cloud/cdcr/CdcrBootstrapTest.java      |     2 +-
 .../cdcr/CdcrReplicationDistributedZkTest.java  |     9 +
 .../cloud/hdfs/HdfsBasicDistributedZkTest.java  |     2 +
 .../solr/cloud/hdfs/HdfsNNFailoverTest.java     |     2 +
 .../hdfs/HdfsRestartWhileUpdatingTest.java      |     3 +-
 .../solr/cloud/hdfs/HdfsSyncSliceTest.java      |     2 +
 ...HdfsTlogReplayBufferedWhileIndexingTest.java |     3 +-
 .../HdfsWriteToMultipleCollectionsTest.java     |     2 +
 .../apache/solr/cloud/hdfs/StressHdfsTest.java  |     2 +-
 .../apache/solr/core/ResourceLoaderTest.java    |     4 +-
 .../org/apache/solr/core/TestCodecSupport.java  |     2 +-
 .../apache/solr/core/TestDynamicLoading.java    |     2 +-
 .../apache/solr/core/TestMergePolicyConfig.java |    16 +-
 .../apache/solr/core/TestSolrConfigHandler.java |    38 +
 .../handler/AnalysisRequestHandlerTestBase.java |     6 +-
 .../DocumentAnalysisRequestHandlerTest.java     |    47 +-
 .../FieldAnalysisRequestHandlerTest.java        |    62 +-
 .../org/apache/solr/handler/JsonLoaderTest.java |   318 +-
 .../solr/handler/TestReplicationHandler.java    |     4 +-
 .../org/apache/solr/handler/TestSQLHandler.java |     2 +-
 .../admin/AutoscalingHistoryHandlerTest.java    |     1 -
 .../admin/MetricsHistoryHandlerTest.java        |     9 +-
 .../solr/handler/admin/TestCollectionAPIs.java  |     8 +-
 .../component/CustomHighlightComponentTest.java |     7 +-
 .../component/QueryElevationComponentTest.java  |   315 +-
 .../component/SpatialHeatmapFacetsTest.java     |   215 -
 .../handler/component/TestPivotHelperCode.java  |     2 +
 .../tagger/EmbeddedSolrNoSerializeTest.java     |    11 +-
 .../solr/index/hdfs/CheckHdfsIndexTest.java     |     2 +
 .../org/apache/solr/legacy/TestLegacyTerms.java |     8 +-
 .../metrics/rrd/SolrRrdBackendFactoryTest.java  |    84 +-
 .../apache/solr/request/SimpleFacetsTest.java   |    12 +-
 .../apache/solr/response/JSONWriterTest.java    |    53 +-
 .../solr/rest/schema/TestFieldTypeResource.java |     1 -
 .../solr/schema/SchemaApiFailureTest.java       |     4 -
 .../apache/solr/schema/TestCloudSchemaless.java |     2 +-
 .../search/CurrencyRangeFacetCloudTest.java     |     9 +-
 .../apache/solr/search/QueryEqualityTest.java   |     3 +-
 .../SignificantTermsQParserPluginTest.java      |    35 +
 .../org/apache/solr/search/TestQueryTypes.java  |     4 +-
 .../apache/solr/search/TestRankQueryPlugin.java |     4 +-
 .../org/apache/solr/search/TestRecovery.java    |     4 +-
 .../apache/solr/search/TestRecoveryHdfs.java    |     2 +
 .../apache/solr/search/TestSolr4Spatial2.java   |     5 +-
 .../test/org/apache/solr/search/TestSort.java   |     3 +-
 .../apache/solr/search/TestStressRecovery.java  |     2 +-
 .../org/apache/solr/search/facet/DebugAgg.java  |     2 +
 .../solr/search/facet/RangeFacetCloudTest.java  |   786 +
 .../search/facet/SpatialHeatmapFacetsTest.java  |   349 +
 .../search/facet/TestJsonFacetRefinement.java   |   137 +-
 .../solr/search/facet/TestJsonFacets.java       |   101 +-
 .../solr/search/json/TestJsonRequest.java       |    11 +-
 .../TestSolrCloudWithHadoopAuthPlugin.java      |     2 +
 .../solr/spelling/SpellCheckCollatorTest.java   |     2 +-
 .../TestHighFrequencyDictionaryFactory.java     |    10 +-
 .../solr/store/blockcache/BlockCacheTest.java   |     1 +
 .../store/blockcache/BlockDirectoryTest.java    |     2 +
 .../solr/store/hdfs/HdfsDirectoryTest.java      |     2 +-
 .../TestDocTermOrdsUninvertLimit.java           |     1 +
 .../solr/uninverting/TestFieldCacheSort.java    |    20 +-
 .../uninverting/TestFieldCacheSortRandom.java   |     4 +-
 .../apache/solr/update/AddBlockUpdateTest.java  |   169 +-
 .../update/DirectUpdateHandlerOptimizeTest.java |   101 -
 .../solr/update/DirectUpdateHandlerTest.java    |     2 +-
 .../solr/update/MaxSizeAutoCommitTest.java      |     4 +
 .../update/PeerSyncWithBufferUpdatesTest.java   |   223 +
 ...ithLeaderAndIndexFingerprintCachingTest.java |    36 +
 .../solr/update/PeerSyncWithLeaderTest.java     |    39 +
 .../apache/solr/update/TestHdfsUpdateLog.java   |     2 +-
 .../solr/update/TestInPlaceUpdatesDistrib.java  |     2 +-
 .../solr/update/TestNestedUpdateProcessor.java  |   195 +
 .../update/processor/AtomicUpdatesTest.java     |    16 +-
 .../FieldMutatingUpdateProcessorTest.java       |    40 +-
 ...ommitOptimizeUpdateProcessorFactoryTest.java |     2 +-
 ...IgnoreLargeDocumentProcessorFactoryTest.java |    97 +-
 .../SignatureUpdateProcessorFactoryTest.java    |     4 +-
 ...atelessScriptUpdateProcessorFactoryTest.java |    12 +-
 .../TimeRoutedAliasUpdateProcessorTest.java     |   228 +-
 .../TrackingUpdateProcessorFactory.java         |   226 +
 .../org/apache/solr/util/CircularListTest.java  |    12 +-
 .../apache/solr/util/SolrPluginUtilsTest.java   |     2 +-
 .../org/apache/solr/util/TestFastWriter.java    |   124 -
 .../apache/solr/util/TestSafeXMLParsing.java    |    99 +
 .../test/org/apache/solr/util/TestUtils.java    |     8 +-
 solr/example/README.txt                         |     4 +-
 solr/example/resources/log4j2.xml               |    76 -
 .../licenses/AlchemyAPIAnnotator-2.3.1.jar.sha1 |     1 -
 .../AlchemyAPIAnnotator-LICENSE-ASL.txt         |   202 -
 solr/licenses/AlchemyAPIAnnotator-NOTICE.txt    |     7 -
 .../licenses/OpenCalaisAnnotator-2.3.1.jar.sha1 |     1 -
 .../OpenCalaisAnnotator-LICENSE-ASL.txt         |   202 -
 solr/licenses/OpenCalaisAnnotator-NOTICE.txt    |     7 -
 solr/licenses/Tagger-2.3.1.jar.sha1             |     1 -
 solr/licenses/Tagger-LICENSE-ASL.txt            |   202 -
 solr/licenses/Tagger-NOTICE.txt                 |     7 -
 .../licenses/WhitespaceTokenizer-2.3.1.jar.sha1 |     1 -
 .../WhitespaceTokenizer-LICENSE-ASL.txt         |   202 -
 solr/licenses/WhitespaceTokenizer-NOTICE.txt    |     7 -
 solr/licenses/commons-compress-1.14.jar.sha1    |     1 -
 solr/licenses/commons-compress-1.16.1.jar.sha1  |     1 +
 solr/licenses/commons-digester-2.1.jar.sha1     |     1 -
 solr/licenses/fontbox-2.0.8.jar.sha1            |     1 -
 solr/licenses/fontbox-2.0.9.jar.sha1            |     1 +
 solr/licenses/icu4j-61.1.jar.sha1               |     1 -
 solr/licenses/icu4j-62.1.jar.sha1               |     1 +
 solr/licenses/jackcess-2.1.10.jar.sha1          |     1 +
 solr/licenses/jackcess-2.1.8.jar.sha1           |     1 -
 solr/licenses/jdom-2.0.2.jar.sha1               |     1 -
 solr/licenses/jdom-LICENSE-BSD_LIKE.txt         |    56 -
 solr/licenses/jdom-NOTICE.txt                   |     6 -
 solr/licenses/jdom2-2.0.6.jar.sha1              |     1 +
 solr/licenses/jdom2-LICENSE-BSD_LIKE.txt        |    56 +
 solr/licenses/jdom2-NOTICE.txt                  |     6 +
 ...jetty-continuation-9.4.10.v20180503.jar.sha1 |     1 -
 ...jetty-continuation-9.4.11.v20180605.jar.sha1 |     1 +
 .../jetty-deploy-9.4.10.v20180503.jar.sha1      |     1 -
 .../jetty-deploy-9.4.11.v20180605.jar.sha1      |     1 +
 .../jetty-http-9.4.10.v20180503.jar.sha1        |     1 -
 .../jetty-http-9.4.11.v20180605.jar.sha1        |     1 +
 .../licenses/jetty-io-9.4.10.v20180503.jar.sha1 |     1 -
 .../licenses/jetty-io-9.4.11.v20180605.jar.sha1 |     1 +
 .../jetty-jmx-9.4.10.v20180503.jar.sha1         |     1 -
 .../jetty-jmx-9.4.11.v20180605.jar.sha1         |     1 +
 .../jetty-rewrite-9.4.10.v20180503.jar.sha1     |     1 -
 .../jetty-rewrite-9.4.11.v20180605.jar.sha1     |     1 +
 .../jetty-security-9.4.10.v20180503.jar.sha1    |     1 -
 .../jetty-security-9.4.11.v20180605.jar.sha1    |     1 +
 .../jetty-server-9.4.10.v20180503.jar.sha1      |     1 -
 .../jetty-server-9.4.11.v20180605.jar.sha1      |     1 +
 .../jetty-servlet-9.4.10.v20180503.jar.sha1     |     1 -
 .../jetty-servlet-9.4.11.v20180605.jar.sha1     |     1 +
 .../jetty-servlets-9.4.10.v20180503.jar.sha1    |     1 -
 .../jetty-servlets-9.4.11.v20180605.jar.sha1    |     1 +
 .../jetty-util-9.4.10.v20180503.jar.sha1        |     1 -
 .../jetty-util-9.4.11.v20180605.jar.sha1        |     1 +
 .../jetty-webapp-9.4.10.v20180503.jar.sha1      |     1 -
 .../jetty-webapp-9.4.11.v20180605.jar.sha1      |     1 +
 .../jetty-xml-9.4.10.v20180503.jar.sha1         |     1 -
 .../jetty-xml-9.4.11.v20180605.jar.sha1         |     1 +
 solr/licenses/jsoup-1.11.2.jar.sha1             |     1 +
 solr/licenses/jsoup-1.8.2.jar.sha1              |     1 -
 solr/licenses/opennlp-tools-1.8.3.jar.sha1      |     1 -
 solr/licenses/opennlp-tools-1.8.4.jar.sha1      |     1 +
 solr/licenses/pdfbox-2.0.8.jar.sha1             |     1 -
 solr/licenses/pdfbox-2.0.9.jar.sha1             |     1 +
 solr/licenses/pdfbox-tools-2.0.8.jar.sha1       |     1 -
 solr/licenses/pdfbox-tools-2.0.9.jar.sha1       |     1 +
 solr/licenses/start.jar.sha1                    |     2 +-
 solr/licenses/tika-core-1.17.jar.sha1           |     1 -
 solr/licenses/tika-core-1.18.jar.sha1           |     1 +
 solr/licenses/tika-java7-1.17.jar.sha1          |     1 -
 solr/licenses/tika-java7-1.18.jar.sha1          |     1 +
 solr/licenses/tika-parsers-1.17.jar.sha1        |     1 -
 solr/licenses/tika-parsers-1.18.jar.sha1        |     1 +
 solr/licenses/tika-xmp-1.17.jar.sha1            |     1 -
 solr/licenses/tika-xmp-1.18.jar.sha1            |     1 +
 solr/licenses/uimaj-core-2.3.1.jar.sha1         |     1 -
 solr/licenses/uimaj-core-LICENSE-ASL.txt        |   202 -
 solr/licenses/uimaj-core-NOTICE.txt             |    13 -
 solr/licenses/xz-1.6.jar.sha1                   |     1 -
 solr/licenses/xz-1.8.jar.sha1                   |     1 +
 solr/server/README.txt                          |     2 +-
 solr/server/resources/log4j2-console.xml        |    39 +
 solr/server/scripts/cloud-scripts/log4j2.xml    |    37 -
 .../scripts/cloud-scripts/snapshotscli.sh       |     2 +-
 solr/server/scripts/cloud-scripts/zkcli.bat     |     4 +-
 solr/server/scripts/cloud-scripts/zkcli.sh      |     2 +-
 .../configsets/_default/conf/managed-schema     |     2 +-
 .../configsets/_default/conf/solrconfig.xml     |     2 +
 solr/solr-ref-guide/src/about-filters.adoc      |     3 +-
 solr/solr-ref-guide/src/analyzers.adoc          |     1 -
 .../src/basic-authentication-plugin.adoc        |     2 +
 solr/solr-ref-guide/src/blockjoin-faceting.adoc |     2 +-
 solr/solr-ref-guide/src/cdcr-config.adoc        |    66 +-
 solr/solr-ref-guide/src/collections-api.adoc    |    21 +-
 solr/solr-ref-guide/src/configsets-api.adoc     |     2 +
 .../solr-ref-guide/src/configuring-logging.adoc |     2 +-
 .../src/distributed-requests.adoc               |    40 +-
 solr/solr-ref-guide/src/enabling-ssl.adoc       |     2 +
 .../solr-ref-guide/src/filter-descriptions.adoc |    15 +-
 solr/solr-ref-guide/src/format-of-solr-xml.adoc |     3 +-
 solr/solr-ref-guide/src/how-to-contribute.adoc  |     4 +-
 .../src/images/diagrams/splitshard-seq.png      |   Bin 0 -> 98871 bytes
 .../src/images/diagrams/splitshard-seq.puml     |   115 +
 solr/solr-ref-guide/src/index-replication.adoc  |     4 +-
 .../src/indexconfig-in-solrconfig.adoc          |     5 +-
 .../src/indexing-and-basic-data-operations.adoc |     4 +-
 solr/solr-ref-guide/src/json-facet-api.adoc     |   363 +-
 solr/solr-ref-guide/src/json-query-dsl.adoc     |    17 +
 solr/solr-ref-guide/src/json-request-api.adoc   |     6 +-
 .../src/kerberos-authentication-plugin.adoc     |     4 +-
 solr/solr-ref-guide/src/language-analysis.adoc  |     2 +-
 solr/solr-ref-guide/src/learning-to-rank.adoc   |     4 +-
 solr/solr-ref-guide/src/managed-resources.adoc  |     2 +-
 solr/solr-ref-guide/src/meta-docs/jekyll.adoc   |     4 +-
 solr/solr-ref-guide/src/meta-docs/publish.adoc  |   165 +-
 solr/solr-ref-guide/src/metrics-history.adoc    |   450 +-
 solr/solr-ref-guide/src/metrics-reporting.adoc  |     2 +-
 solr/solr-ref-guide/src/schemaless-mode.adoc    |     4 +-
 ...tting-up-an-external-zookeeper-ensemble.adoc |    24 +-
 .../src/solr-control-script-reference.adoc      |    85 +-
 solr/solr-ref-guide/src/solr-tutorial.adoc      |     4 +-
 solr/solr-ref-guide/src/solr-upgrade-notes.adoc |    26 +
 .../src/solrcloud-autoscaling-api.adoc          |     2 +-
 ...olrcloud-autoscaling-policy-preferences.adoc |    71 +-
 .../src/solrcloud-autoscaling-triggers.adoc     |    21 +-
 solr/solr-ref-guide/src/spatial-search.adoc     |    18 +-
 solr/solr-ref-guide/src/suggester.adoc          |     1 -
 .../src/the-extended-dismax-query-parser.adoc   |     4 +-
 .../src/the-query-elevation-component.adoc      |    11 +
 solr/solr-ref-guide/src/the-tagger-handler.adoc |   177 +-
 .../solr-ref-guide/src/time-routed-aliases.adoc |    33 +-
 solr/solr-ref-guide/src/tokenizers.adoc         |     2 +-
 solr/solr-ref-guide/src/uima-integration.adoc   |   122 -
 .../src/update-request-processors.adoc          |     6 +-
 .../src/updatehandlers-in-solrconfig.adoc       |     6 +-
 .../src/updating-parts-of-documents.adoc        |    21 +-
 .../src/uploading-data-with-index-handlers.adoc |     8 +-
 ...zookeeper-to-manage-configuration-files.adoc |     2 +
 solr/solr-ref-guide/src/v2-api.adoc             |     2 +-
 .../cloud/autoscaling/AddReplicaSuggester.java  |     5 +-
 .../client/solrj/cloud/autoscaling/Clause.java  |   503 +-
 .../cloud/autoscaling/MoveReplicaSuggester.java |     2 +-
 .../client/solrj/cloud/autoscaling/Operand.java |    93 +-
 .../client/solrj/cloud/autoscaling/Policy.java  |   406 +-
 .../solrj/cloud/autoscaling/PolicyHelper.java   |     2 +-
 .../solrj/cloud/autoscaling/Preference.java     |     2 +-
 .../solrj/cloud/autoscaling/ReplicaCount.java   |    22 +-
 .../solrj/cloud/autoscaling/ReplicaInfo.java    |     2 +
 .../client/solrj/cloud/autoscaling/Row.java     |     2 +-
 .../solrj/cloud/autoscaling/SealedClause.java   |    29 +
 .../solrj/cloud/autoscaling/Suggester.java      |    20 +-
 .../solrj/cloud/autoscaling/Suggestion.java     |   492 +-
 .../solrj/cloud/autoscaling/Violation.java      |    69 +-
 .../solr/client/solrj/impl/CloudSolrClient.java |    33 +-
 .../solrj/impl/SolrClientNodeStateProvider.java |    93 +-
 .../solrj/io/eval/ConvolutionEvaluator.java     |     2 +-
 .../solrj/io/eval/NormalizeEvaluator.java       |     2 +-
 .../client/solrj/io/eval/RankEvaluator.java     |     2 +-
 .../client/solrj/io/eval/ScaleEvaluator.java    |     2 +-
 .../solr/client/solrj/io/stream/EvalStream.java |     5 -
 .../client/solrj/io/stream/PriorityStream.java  |     5 -
 .../solrj/io/stream/SignificantTermsStream.java |     2 +-
 .../solr/client/solrj/io/stream/SolrStream.java |     5 -
 .../client/solrj/io/stream/TopicStream.java     |     5 -
 .../client/solrj/io/stream/TupleStream.java     |     5 -
 .../solrj/request/AbstractUpdateRequest.java    |     3 +-
 .../solrj/request/CollectionAdminRequest.java   |   135 +-
 .../request/ContentStreamUpdateRequest.java     |    26 +-
 .../solrj/request/StreamingUpdateRequest.java   |    76 +
 .../solr/client/solrj/request/V2Request.java    |     7 +-
 .../request/schema/AbstractSchemaRequest.java   |     8 -
 .../solrj/request/schema/SchemaRequest.java     |    86 +-
 .../solrj/response/AnalysisResponseBase.java    |    29 +-
 .../response/DocumentAnalysisResponse.java      |    12 +-
 .../solrj/response/FieldAnalysisResponse.java   |    14 +-
 .../java/org/apache/solr/common/MapWriter.java  |     5 +
 .../org/apache/solr/common/SolrDocument.java    |     1 -
 .../apache/solr/common/SolrDocumentBase.java    |    10 +
 .../apache/solr/common/SolrInputDocument.java   |     8 +-
 .../org/apache/solr/common/SolrInputField.java  |     4 +-
 .../org/apache/solr/common/cloud/Aliases.java   |     6 +-
 .../apache/solr/common/cloud/ZkStateReader.java |    15 +
 .../common/params/CollectionAdminParams.java    |    21 +-
 .../apache/solr/common/params/CommonParams.java |     6 +
 .../common/params/QueryElevationParams.java     |     9 +-
 .../apache/solr/common/params/UpdateParams.java |     2 +-
 .../solr/common/util/ContentStreamBase.java     |     6 +-
 .../org/apache/solr/common/util/FastWriter.java |   157 +
 .../solr/common/util/JsonRecordReader.java      |    32 +-
 .../apache/solr/common/util/JsonTextWriter.java |   499 +
 .../apache/solr/common/util/SolrJSONWriter.java |   114 +
 .../org/apache/solr/common/util/TextWriter.java |   231 +
 .../java/org/apache/solr/common/util/Utils.java |    12 +
 .../apache/solr/common/util/WriteableValue.java |    25 +
 solr/solrj/src/test-files/log4j2.xml            |    39 -
 .../solrj/solr/collection1/conf/schema-sql.xml  |     4 -
 .../solrj/solr/collection1/conf/schema.xml      |     4 -
 .../solr/configsets/shared/conf/schema.xml      |     4 -
 .../solr/configsets/streaming/conf/schema.xml   |     4 -
 .../solrj/solr/crazy-path-to-schema.xml         |     1 -
 ...ollectionAdminRequestRequiredParamsTest.java |     6 +-
 .../solr/client/solrj/SolrExampleTests.java     |    23 +-
 .../solr/client/solrj/SolrExampleTestsBase.java |     2 +-
 .../solrj/cloud/autoscaling/TestPolicy.java     |   609 +-
 .../solrj/io/graph/GraphExpressionTest.java     |    44 +-
 .../solrj/io/stream/MathExpressionTest.java     |     6 +-
 .../solrj/io/stream/StreamExpressionTest.java   |     2 +-
 .../io/stream/eval/NormalizeEvaluatorTest.java  |     6 +-
 .../client/solrj/request/TestV2Request.java     |     8 +-
 .../solrj/response/AnlysisResponseBaseTest.java |    13 +
 .../response/DocumentAnalysisResponseTest.java  |     2 +-
 .../response/FieldAnalysisResponseTest.java     |     2 +-
 .../response/TestDelegationTokenResponse.java   |     2 +-
 .../apache/solr/common/SolrDocumentTest.java    |     8 +-
 .../cloud/TestCollectionStateWatchers.java      |     8 +
 .../solr/common/params/SolrParamTest.java       |     6 +-
 .../apache/solr/common/util/TestFastWriter.java |   125 +
 .../solr/common/util/TestJsonRecordReader.java  |    10 +-
 .../solr/common/util/TestNamedListCodec.java    |     4 +-
 .../solr/common/util/TestSolrJsonWriter.java    |    60 +
 .../apache/solr/common/util/TestTimeSource.java |    12 +-
 .../solr/common/util/TestValidatingJsonMap.java |     2 +-
 .../solr/BaseDistributedSearchTestCase.java     |    17 +-
 .../java/org/apache/solr/SolrTestCaseJ4.java    |    27 +-
 .../cloud/AbstractFullDistribZkTestBase.java    |     2 +-
 .../org/apache/solr/cloud/ConfigRequest.java    |    55 +
 .../apache/solr/cloud/MiniSolrCloudCluster.java |     1 +
 .../solr/core/MockConcurrentMergeScheduler.java |    35 +
 .../apache/solr/core/MockDirectoryFactory.java  |     6 +-
 solr/test-framework/src/test-files/log4j2.xml   |    39 -
 solr/webapp/web/css/angular/cloud.css           |    15 +
 solr/webapp/web/css/angular/common.css          |     4 +-
 solr/webapp/web/css/angular/jquery-ui.min.css   |    28 +
 .../web/css/angular/jquery-ui.structure.min.css |    24 +
 solr/webapp/web/index.html                      |     3 +
 solr/webapp/web/js/angular/controllers/cloud.js |    64 +-
 .../web/js/angular/controllers/segments.js      |     2 +-
 solr/webapp/web/libs/jquery-ui.min.js           |    30 +
 solr/webapp/web/partials/cloud.html             |     1 +
 949 files changed, 44281 insertions(+), 43922 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a83a146/solr/solr-ref-guide/src/suggester.adoc
----------------------------------------------------------------------


[17/43] lucene-solr:jira/http2: SOLR-11943: Fix RefGuide for latlonVectors and haversineMeters functions.

Posted by da...@apache.org.
SOLR-11943: Fix RefGuide for latlonVectors and haversineMeters functions.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/70f00191
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/70f00191
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/70f00191

Branch: refs/heads/jira/http2
Commit: 70f001918ebaacbc83b1a7b2c0209b6f37a80813
Parents: e5232f6
Author: Joel Bernstein <jb...@apache.org>
Authored: Sun Sep 9 20:59:13 2018 -0400
Committer: Joel Bernstein <jb...@apache.org>
Committed: Sun Sep 9 20:59:13 2018 -0400

----------------------------------------------------------------------
 solr/solr-ref-guide/src/math-expressions.adoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/70f00191/solr/solr-ref-guide/src/math-expressions.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/math-expressions.adoc b/solr/solr-ref-guide/src/math-expressions.adoc
index de150cf..3974989 100644
--- a/solr/solr-ref-guide/src/math-expressions.adoc
+++ b/solr/solr-ref-guide/src/math-expressions.adoc
@@ -38,7 +38,7 @@ record in your Solr Cloud cluster computable.
 
 *<<matrix-math.adoc#matrix-math,Matrix Math>>*: Matrix creation, manipulation, and matrix math.
 
-*<<vectorization.adoc#vectorization,Streams and Vectorization>>*: Retrieving streams and vectorizing numeric and lat/long point fields.
+*<<vectorization.adoc#vectorization,Streams and Vectorization>>*: Retrieving streams and vectorizing numeric and lat/lon location fields.
 
 *<<term-vectors.adoc#term-vectors,Text Analysis and Term Vectors>>*: Using math expressions for text analysis and TF-IDF term vectors.
 


[02/43] lucene-solr:jira/http2: [LUCENE-8343] documentation fix

Posted by da...@apache.org.
[LUCENE-8343] documentation fix


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/17cfa634
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/17cfa634
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/17cfa634

Branch: refs/heads/jira/http2
Commit: 17cfa634798f96539c2535dca2e9a8f2cc0bff45
Parents: e83e8ee
Author: Alessandro Benedetti <a....@sease.io>
Authored: Wed Jun 6 19:42:08 2018 +0100
Committer: Alessandro Benedetti <a....@sease.io>
Committed: Wed Jun 6 19:42:08 2018 +0100

----------------------------------------------------------------------
 solr/solr-ref-guide/src/suggester.adoc | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/17cfa634/solr/solr-ref-guide/src/suggester.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/suggester.adoc b/solr/solr-ref-guide/src/suggester.adoc
index 0f9e12e..7156dea 100644
--- a/solr/solr-ref-guide/src/suggester.adoc
+++ b/solr/solr-ref-guide/src/suggester.adoc
@@ -189,7 +189,7 @@ This implementation supports <<Context Filtering>>.
 
 ==== BlendedInfixLookupFactory
 
-An extension of the `AnalyzingInfixSuggester` which provides additional functionality to weight prefix matches across the matched documents. You can tell it to score higher if a hit is closer to the start of the suggestion or vice versa.
+An extension of the `AnalyzingInfixSuggester` which provides additional functionality to weight prefix matches across the matched documents. It scores higher if a hit is closer to the start of the suggestion.
 
 This implementation uses the following additional properties:
 
@@ -198,9 +198,11 @@ Used to calculate weight coefficient using the position of the first matching wo
 `position_linear`:::
 `weightFieldValue * (1 - 0.10*position)`: Matches to the start will be given a higher score. This is the default.
 `position_reciprocal`:::
-`weightFieldValue / (1 + position)`: Matches to the end will be given a higher score.
+`weightFieldValue / (1 + position)`: Matches to the start will be given a score which decay faster than linear.
+`position_exponential_reciprocal`:::
+`weightFieldValue / pow(1 + position,exponent)`: Matches to the start will be given a score which decay faster than reciprocal.
 `exponent`::::
-An optional configuration variable for `position_reciprocal` to control how fast the score will increase or decrease. Default `2.0`.
+An optional configuration variable for `position_reciprocal` to control how fast the score will decrease. Default `2.0`.
 
 `numFactor`::
 The factor to multiply the number of searched elements from which results will be pruned. Default is `10`.


[24/43] lucene-solr:jira/http2: SOLR-12759: Disable ExtractingRequestHandlerTest on JDK 11 due to JDK bug with timezone locale and Date.toString

Posted by da...@apache.org.
SOLR-12759: Disable ExtractingRequestHandlerTest on JDK 11 due to
JDK bug with timezone locale and Date.toString


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/623cdf29
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/623cdf29
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/623cdf29

Branch: refs/heads/jira/http2
Commit: 623cdf29ad9f59b93e15184c13f99275cf231a14
Parents: a537aa2
Author: David Smiley <ds...@apache.org>
Authored: Mon Sep 10 11:45:44 2018 -0400
Committer: David Smiley <ds...@apache.org>
Committed: Mon Sep 10 11:45:44 2018 -0400

----------------------------------------------------------------------
 .../solr/handler/extraction/ExtractingRequestHandlerTest.java      | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/623cdf29/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java
----------------------------------------------------------------------
diff --git a/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java b/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java
index 3241210..5a76a0b 100644
--- a/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java
+++ b/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java
@@ -41,6 +41,8 @@ public class ExtractingRequestHandlerTest extends SolrTestCaseJ4 {
 
   @BeforeClass
   public static void beforeClass() throws Exception {
+    assertFalse("SOLR-12759 JDK 11 (1st release) and Tika 1.x can result in extracting dates in a bad format.",
+        System.getProperty("java.version").startsWith("11"));
     initCore("solrconfig.xml", "schema.xml", getFile("extraction/solr").getAbsolutePath());
   }
 


[25/43] lucene-solr:jira/http2: SOLR-12762: Fix javadoc for SolrCloudTestCase.clusterShape() method and add a method that validates only against Active slices while testing

Posted by da...@apache.org.
SOLR-12762: Fix javadoc for SolrCloudTestCase.clusterShape() method and add a method that validates only against Active slices while testing


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a1b6db26
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a1b6db26
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a1b6db26

Branch: refs/heads/jira/http2
Commit: a1b6db26db5e03a31492549a181c285f9b35c9a2
Parents: 623cdf2
Author: Anshum Gupta <an...@apache.org>
Authored: Mon Sep 10 14:20:07 2018 -0700
Committer: Anshum Gupta <an...@apache.org>
Committed: Mon Sep 10 15:18:32 2018 -0700

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  6 ++-
 .../apache/solr/cloud/SolrCloudTestCase.java    | 40 +++++++++++++++-----
 2 files changed, 35 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a1b6db26/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 0d71204..f9e5b56 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -84,7 +84,11 @@ Apache ZooKeeper 3.4.11
 Jetty 9.4.11.v20180605
 
 
-(No Changes)
+Other Changes
+----------------------
+
+* SOLR-12762: Fix javadoc for SolrCloudTestCase.clusterShape() method and add a method that validates only against
+  Active slices (Anshum Gupta)
 
 
 ==================  7.5.0 ==================

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a1b6db26/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java b/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
index 1b5f67b..bd041f0 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
@@ -283,7 +283,7 @@ public class SolrCloudTestCase extends SolrTestCaseJ4 {
 
   /**
    * Return a {@link CollectionStatePredicate} that returns true if a collection has the expected
-   * number of active shards and active replicas
+   * number of shards and active replicas
    */
   public static CollectionStatePredicate clusterShape(int expectedShards, int expectedReplicas) {
     return (liveNodes, collectionState) -> {
@@ -291,20 +291,40 @@ public class SolrCloudTestCase extends SolrTestCaseJ4 {
         return false;
       if (collectionState.getSlices().size() != expectedShards)
         return false;
-      for (Slice slice : collectionState) {
-        int activeReplicas = 0;
-        for (Replica replica : slice) {
-          if (replica.isActive(liveNodes))
-            activeReplicas++;
-        }
-        if (activeReplicas != expectedReplicas)
-          return false;
-      }
+      if (compareActiveReplicaCountsForShards(expectedReplicas, liveNodes, collectionState)) return false;
       return true;
     };
   }
 
   /**
+   * Return a {@link CollectionStatePredicate} that returns true if a collection has the expected
+   * number of active shards and active replicas
+   */
+  public static CollectionStatePredicate activeClusterShape(int expectedShards, int expectedReplicas) {
+    return (liveNodes, collectionState) -> {
+      if (collectionState == null)
+        return false;
+      if (collectionState.getActiveSlices().size() != expectedShards)
+        return false;
+      if (compareActiveReplicaCountsForShards(expectedReplicas, liveNodes, collectionState)) return false;
+      return true;
+    };
+  }
+
+  private static boolean compareActiveReplicaCountsForShards(int expectedReplicas, Set<String> liveNodes, DocCollection collectionState) {
+    for (Slice slice : collectionState) {
+      int activeReplicas = 0;
+      for (Replica replica : slice) {
+        if (replica.isActive(liveNodes))
+          activeReplicas++;
+      }
+      if (activeReplicas != expectedReplicas)
+        return true;
+    }
+    return false;
+  }
+
+  /**
    * Get a (reproducibly) random shard from a {@link DocCollection}
    */
   protected static Slice getRandomShard(DocCollection collection) {


[08/43] lucene-solr:jira/http2: LUCENE-8483: Scorer cannot have a null Weight

Posted by da...@apache.org.
LUCENE-8483: Scorer cannot have a null Weight


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/66c671ea
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/66c671ea
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/66c671ea

Branch: refs/heads/jira/http2
Commit: 66c671ea80f81596dad6d2e7745328f31f530cb8
Parents: 0dc66c2
Author: Alan Woodward <ro...@apache.org>
Authored: Tue Sep 4 13:24:32 2018 +0100
Committer: Alan Woodward <ro...@apache.org>
Committed: Fri Sep 7 11:41:47 2018 +0100

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |  2 +
 lucene/MIGRATE.txt                              |  5 ++
 .../java/org/apache/lucene/index/Sorter.java    | 27 -------
 .../lucene/search/Boolean2ScorerSupplier.java   |  4 +-
 .../org/apache/lucene/search/BooleanScorer.java | 14 ++--
 .../org/apache/lucene/search/BooleanWeight.java |  2 +-
 .../apache/lucene/search/CachingCollector.java  | 16 +---
 .../org/apache/lucene/search/FakeScorer.java    | 62 ---------------
 .../apache/lucene/search/MatchAllDocsQuery.java |  2 +-
 .../org/apache/lucene/search/ScoreAndDoc.java   | 35 +++++++++
 .../java/org/apache/lucene/search/Scorer.java   |  7 +-
 .../org/apache/lucene/search/SortRescorer.java  | 10 +--
 .../lucene/search/MultiCollectorTest.java       | 20 ++---
 .../search/TestBoolean2ScorerSupplier.java      | 80 ++++++++++++++------
 .../org/apache/lucene/search/TestBooleanOr.java |  2 +-
 .../apache/lucene/search/TestBooleanScorer.java |  2 +-
 .../lucene/search/TestCachingCollector.java     | 23 ++----
 .../lucene/search/TestConjunctionDISI.java      | 50 +++++++++---
 .../lucene/search/TestConstantScoreQuery.java   |  4 +-
 .../search/TestMaxScoreSumPropagator.java       | 34 ++++++++-
 .../lucene/search/TestMultiCollector.java       | 16 +---
 .../apache/lucene/search/TestQueryRescorer.java |  2 +-
 .../lucene/search/TestTopDocsCollector.java     | 22 +-----
 .../lucene/search/TestTopFieldCollector.java    |  2 +-
 .../apache/lucene/expressions/FakeScorer.java   | 53 -------------
 .../lucene/facet/DrillSidewaysScorer.java       | 24 +-----
 .../search/grouping/BlockGroupingCollector.java | 19 ++++-
 .../lucene/search/grouping/FakeScorer.java      | 52 -------------
 .../apache/lucene/search/join/FakeScorer.java   | 52 -------------
 .../queries/function/FunctionRangeQuery.java    |  4 +-
 .../lucene/queries/function/FunctionValues.java | 15 ++--
 .../lucene/queries/function/ValueSource.java    | 26 ++-----
 .../queries/function/ValueSourceScorer.java     |  8 +-
 .../docvalues/DocTermsIndexDocValues.java       |  5 +-
 .../function/docvalues/DoubleDocValues.java     | 11 +--
 .../function/docvalues/IntDocValues.java        |  5 +-
 .../function/docvalues/LongDocValues.java       |  5 +-
 .../function/valuesource/EnumFieldSource.java   |  5 +-
 .../solr/handler/component/QueryComponent.java  | 31 +-------
 .../solr/search/CollapsingQParserPlugin.java    | 29 ++-----
 .../src/java/org/apache/solr/search/Filter.java |  2 +-
 .../apache/solr/search/FunctionRangeQuery.java  | 12 ++-
 .../search/function/ValueSourceRangeFilter.java |  9 ++-
 .../apache/solr/search/TestRankQueryPlugin.java | 32 +-------
 44 files changed, 309 insertions(+), 533 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index f344b82..07163b3 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -76,6 +76,8 @@ API Changes
 * LUCENE-8475: Deprecated constants have been removed from RamUsageEstimator.
   (Dimitrios Athanasiou)
 
+* LUCENE-8483: Scorers may no longer take null as a Weight (Alan Woodward)
+
 Changes in Runtime Behavior
 
 * LUCENE-8333: Switch MoreLikeThis.setMaxDocFreqPct to use maxDoc instead of

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/MIGRATE.txt
----------------------------------------------------------------------
diff --git a/lucene/MIGRATE.txt b/lucene/MIGRATE.txt
index f09751e..90ed4fc 100644
--- a/lucene/MIGRATE.txt
+++ b/lucene/MIGRATE.txt
@@ -113,3 +113,8 @@ Scorer has a number of methods that should never be called from Collectors, for
 those that advance the underlying iterators.  To hide these, LeafCollector.setScorer()
 now takes a Scorable, an abstract class that Scorers can extend, with methods
 docId() and score() (LUCENE-6228)
+
+## Scorers must have non-null Weights ##
+
+If a custom Scorer implementation does not have an associated Weight, it can probably
+be replaced with a Scorable instead.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/java/org/apache/lucene/index/Sorter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/Sorter.java b/lucene/core/src/java/org/apache/lucene/index/Sorter.java
index c47f9a1..a3718c2 100644
--- a/lucene/core/src/java/org/apache/lucene/index/Sorter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/Sorter.java
@@ -20,9 +20,7 @@ import java.io.IOException;
 import java.util.Arrays;
 import java.util.Comparator;
 
-import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.FieldComparator;
-import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.SortedNumericSelector;
@@ -445,30 +443,5 @@ final class Sorter {
   public String toString() {
     return getID();
   }
-
-  static final Scorer FAKESCORER = new Scorer(null) {
-
-    float score;
-    int doc = -1;
-
-    @Override
-    public int docID() {
-      return doc;
-    }
-
-    public DocIdSetIterator iterator() {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public float score() throws IOException {
-      return score;
-    }
-
-    @Override
-    public float getMaxScore(int upTo) throws IOException {
-      return Float.POSITIVE_INFINITY;
-    }
-  };
   
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/java/org/apache/lucene/search/Boolean2ScorerSupplier.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/Boolean2ScorerSupplier.java b/lucene/core/src/java/org/apache/lucene/search/Boolean2ScorerSupplier.java
index a13547f..e50eec4 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Boolean2ScorerSupplier.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Boolean2ScorerSupplier.java
@@ -30,13 +30,13 @@ import org.apache.lucene.search.BooleanClause.Occur;
 
 final class Boolean2ScorerSupplier extends ScorerSupplier {
 
-  private final BooleanWeight weight;
+  private final Weight weight;
   private final Map<BooleanClause.Occur, Collection<ScorerSupplier>> subs;
   private final ScoreMode scoreMode;
   private final int minShouldMatch;
   private long cost = -1;
 
-  Boolean2ScorerSupplier(BooleanWeight weight,
+  Boolean2ScorerSupplier(Weight weight,
       Map<Occur, Collection<ScorerSupplier>> subs,
       ScoreMode scoreMode, int minShouldMatch) {
     if (minShouldMatch < 0) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java b/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java
index a66305b..6ec17ba 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java
@@ -119,7 +119,7 @@ final class BooleanScorer extends BulkScorer {
   final BulkScorerAndDoc[] leads;
   final HeadPriorityQueue head;
   final TailPriorityQueue tail;
-  final FakeScorer fakeScorer = new FakeScorer();
+  final ScoreAndDoc scoreAndDoc = new ScoreAndDoc();
   final int minShouldMatch;
   final long cost;
 
@@ -178,12 +178,12 @@ final class BooleanScorer extends BulkScorer {
   }
 
   private void scoreDocument(LeafCollector collector, int base, int i) throws IOException {
-    final FakeScorer fakeScorer = this.fakeScorer;
+    final ScoreAndDoc scoreAndDoc = this.scoreAndDoc;
     final Bucket bucket = buckets[i];
     if (bucket.freq >= minShouldMatch) {
-      fakeScorer.score = (float) bucket.score;
+      scoreAndDoc.score = (float) bucket.score;
       final int doc = base | i;
-      fakeScorer.doc = doc;
+      scoreAndDoc.doc = doc;
       collector.collect(doc);
     }
     bucket.freq = 0;
@@ -276,7 +276,7 @@ final class BooleanScorer extends BulkScorer {
     bulkScorer.score(collector, acceptDocs, windowMin, end);
 
     // reset the scorer that should be used for the general case
-    collector.setScorer(fakeScorer);
+    collector.setScorer(scoreAndDoc);
   }
 
   private BulkScorerAndDoc scoreWindow(BulkScorerAndDoc top, LeafCollector collector,
@@ -307,8 +307,8 @@ final class BooleanScorer extends BulkScorer {
 
   @Override
   public int score(LeafCollector collector, Bits acceptDocs, int min, int max) throws IOException {
-    fakeScorer.doc = -1;
-    collector.setScorer(fakeScorer);
+    scoreAndDoc.doc = -1;
+    collector.setScorer(scoreAndDoc);
 
     BulkScorerAndDoc top = advance(min);
     while (top.next < max) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/java/org/apache/lucene/search/BooleanWeight.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanWeight.java b/lucene/core/src/java/org/apache/lucene/search/BooleanWeight.java
index d58b246..6298513 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BooleanWeight.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BooleanWeight.java
@@ -160,7 +160,7 @@ final class BooleanWeight extends Weight {
       @Override
       public int score(final LeafCollector collector, Bits acceptDocs, int min, int max) throws IOException {
         final LeafCollector noScoreCollector = new LeafCollector() {
-          FakeScorer fake = new FakeScorer();
+          ScoreAndDoc fake = new ScoreAndDoc();
 
           @Override
           public void setScorer(Scorable scorer) throws IOException {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java b/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
index f2da14b..273ece4 100644
--- a/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
@@ -47,7 +47,7 @@ public abstract class CachingCollector extends FilterCollector {
 
   private static final int INITIAL_ARRAY_SIZE = 128;
 
-  private static final class CachedScorer extends Scorer {
+  private static final class CachedScorable extends Scorable {
 
     // NOTE: these members are package-private b/c that way accessing them from
     // the outer class does not incur access check by the JVM. The same
@@ -56,22 +56,10 @@ public abstract class CachingCollector extends FilterCollector {
     int doc;
     float score;
 
-    private CachedScorer() { super(null); }
-
-    @Override
-    public DocIdSetIterator iterator() {
-      throw new UnsupportedOperationException();
-    }
-
     @Override
     public final float score() { return score; }
 
     @Override
-    public float getMaxScore(int upTo) throws IOException {
-      return Float.POSITIVE_INFINITY;
-    }
-
-    @Override
     public int docID() {
       return doc;
     }
@@ -188,7 +176,7 @@ public abstract class CachingCollector extends FilterCollector {
       final int[] docs = this.docs.get(i);
       final float[] scores = this.scores.get(i);
       assert docs.length == scores.length;
-      final CachedScorer scorer = new CachedScorer();
+      final CachedScorable scorer = new CachedScorable();
       collector.setScorer(scorer);
       for (int j = 0; j < docs.length; ++j) {
         scorer.doc = docs[j];

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/java/org/apache/lucene/search/FakeScorer.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/FakeScorer.java b/lucene/core/src/java/org/apache/lucene/search/FakeScorer.java
deleted file mode 100644
index 271833a..0000000
--- a/lucene/core/src/java/org/apache/lucene/search/FakeScorer.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.search;
-
-
-import java.io.IOException;
-import java.util.Collection;
-
-/** Used by {@link BulkScorer}s that need to pass a {@link
- *  Scorer} to {@link LeafCollector#setScorer}. */
-final class FakeScorer extends Scorer {
-  float score;
-  int doc = -1;
-
-  public FakeScorer() {
-    super(null);
-  }
-
-  @Override
-  public int docID() {
-    return doc;
-  }
-
-  @Override
-  public float score() {
-    return score;
-  }
-
-  @Override
-  public float getMaxScore(int upTo) throws IOException {
-    return Float.POSITIVE_INFINITY;
-  }
-
-  @Override
-  public DocIdSetIterator iterator() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public Weight getWeight() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public Collection<ChildScorable> getChildren() {
-    throw new UnsupportedOperationException();
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/MatchAllDocsQuery.java b/lucene/core/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
index 89b2997..7094bdf 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
@@ -53,7 +53,7 @@ public final class MatchAllDocsQuery extends Query {
           @Override
           public int score(LeafCollector collector, Bits acceptDocs, int min, int max) throws IOException {
             max = Math.min(max, maxDoc);
-            FakeScorer scorer = new FakeScorer();
+            ScoreAndDoc scorer = new ScoreAndDoc();
             scorer.score = score;
             collector.setScorer(scorer);
             for (int doc = min; doc < max; ++doc) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/java/org/apache/lucene/search/ScoreAndDoc.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/ScoreAndDoc.java b/lucene/core/src/java/org/apache/lucene/search/ScoreAndDoc.java
new file mode 100644
index 0000000..f9610f8
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/ScoreAndDoc.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.search;
+
+
+/** Used by {@link BulkScorer}s that need to pass a {@link
+ *  Scorable} to {@link LeafCollector#setScorer}. */
+final class ScoreAndDoc extends Scorable {
+  float score;
+  int doc = -1;
+
+  @Override
+  public int docID() {
+    return doc;
+  }
+
+  @Override
+  public float score() {
+    return score;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/java/org/apache/lucene/search/Scorer.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/Scorer.java b/lucene/core/src/java/org/apache/lucene/search/Scorer.java
index aef2168..47baefb 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Scorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Scorer.java
@@ -18,6 +18,7 @@ package org.apache.lucene.search;
 
 
 import java.io.IOException;
+import java.util.Objects;
 
 /**
  * Expert: Common scoring functionality for different types of queries.
@@ -38,8 +39,8 @@ import java.io.IOException;
  * with these scores.
  */
 public abstract class Scorer extends Scorable {
-  /** the Scorer's parent Weight. in some cases this may be null */
-  // TODO can we clean this up?
+
+  /** the Scorer's parent Weight */
   protected final Weight weight;
 
   /**
@@ -47,7 +48,7 @@ public abstract class Scorer extends Scorable {
    * @param weight The scorers <code>Weight</code>.
    */
   protected Scorer(Weight weight) {
-    this.weight = weight;
+    this.weight = Objects.requireNonNull(weight);
   }
 
   /** returns parent Weight

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/java/org/apache/lucene/search/SortRescorer.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/SortRescorer.java b/lucene/core/src/java/org/apache/lucene/search/SortRescorer.java
index ff8f46a..982813d 100644
--- a/lucene/core/src/java/org/apache/lucene/search/SortRescorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/SortRescorer.java
@@ -59,7 +59,7 @@ public class SortRescorer extends Rescorer {
     int docBase = 0;
 
     LeafCollector leafCollector = null;
-    FakeScorer fakeScorer = new FakeScorer();
+    ScoreAndDoc scoreAndDoc = new ScoreAndDoc();
 
     while (hitUpto < hits.length) {
       ScoreDoc hit = hits[hitUpto];
@@ -74,14 +74,14 @@ public class SortRescorer extends Rescorer {
       if (readerContext != null) {
         // We advanced to another segment:
         leafCollector = collector.getLeafCollector(readerContext);
-        leafCollector.setScorer(fakeScorer);
+        leafCollector.setScorer(scoreAndDoc);
         docBase = readerContext.docBase;
       }
 
-      fakeScorer.score = hit.score;
-      fakeScorer.doc = docID - docBase;
+      scoreAndDoc.score = hit.score;
+      scoreAndDoc.doc = docID - docBase;
 
-      leafCollector.collect(fakeScorer.doc);
+      leafCollector.collect(scoreAndDoc.doc);
 
       hitUpto++;
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/test/org/apache/lucene/search/MultiCollectorTest.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/MultiCollectorTest.java b/lucene/core/src/test/org/apache/lucene/search/MultiCollectorTest.java
index de86924..c3b4f42 100644
--- a/lucene/core/src/test/org/apache/lucene/search/MultiCollectorTest.java
+++ b/lucene/core/src/test/org/apache/lucene/search/MultiCollectorTest.java
@@ -70,7 +70,7 @@ public class MultiCollectorTest extends LuceneTestCase {
     final LeafCollector ac = c.getLeafCollector(null);
     ac.collect(1);
     c.getLeafCollector(null);
-    c.getLeafCollector(null).setScorer(new FakeScorer());
+    c.getLeafCollector(null).setScorer(new ScoreAndDoc());
   }
 
   @Test
@@ -92,7 +92,7 @@ public class MultiCollectorTest extends LuceneTestCase {
     LeafCollector ac = c.getLeafCollector(null);
     ac.collect(1);
     ac = c.getLeafCollector(null);
-    ac.setScorer(new FakeScorer());
+    ac.setScorer(new ScoreAndDoc());
 
     for (DummyCollector dc : dcs) {
       assertTrue(dc.collectCalled);
@@ -142,23 +142,23 @@ public class MultiCollectorTest extends LuceneTestCase {
     final LeafReaderContext ctx = reader.leaves().get(0);
 
     expectThrows(AssertionError.class, () -> {
-      collector(ScoreMode.COMPLETE_NO_SCORES, ScoreCachingWrappingScorer.class).getLeafCollector(ctx).setScorer(new FakeScorer());
+      collector(ScoreMode.COMPLETE_NO_SCORES, ScoreCachingWrappingScorer.class).getLeafCollector(ctx).setScorer(new ScoreAndDoc());
     });
 
     // no collector needs scores => no caching
-    Collector c1 = collector(ScoreMode.COMPLETE_NO_SCORES, FakeScorer.class);
-    Collector c2 = collector(ScoreMode.COMPLETE_NO_SCORES, FakeScorer.class);
-    MultiCollector.wrap(c1, c2).getLeafCollector(ctx).setScorer(new FakeScorer());
+    Collector c1 = collector(ScoreMode.COMPLETE_NO_SCORES, ScoreAndDoc.class);
+    Collector c2 = collector(ScoreMode.COMPLETE_NO_SCORES, ScoreAndDoc.class);
+    MultiCollector.wrap(c1, c2).getLeafCollector(ctx).setScorer(new ScoreAndDoc());
 
     // only one collector needs scores => no caching
-    c1 = collector(ScoreMode.COMPLETE, FakeScorer.class);
-    c2 = collector(ScoreMode.COMPLETE_NO_SCORES, FakeScorer.class);
-    MultiCollector.wrap(c1, c2).getLeafCollector(ctx).setScorer(new FakeScorer());
+    c1 = collector(ScoreMode.COMPLETE, ScoreAndDoc.class);
+    c2 = collector(ScoreMode.COMPLETE_NO_SCORES, ScoreAndDoc.class);
+    MultiCollector.wrap(c1, c2).getLeafCollector(ctx).setScorer(new ScoreAndDoc());
 
     // several collectors need scores => caching
     c1 = collector(ScoreMode.COMPLETE, ScoreCachingWrappingScorer.class);
     c2 = collector(ScoreMode.COMPLETE, ScoreCachingWrappingScorer.class);
-    MultiCollector.wrap(c1, c2).getLeafCollector(ctx).setScorer(new FakeScorer());
+    MultiCollector.wrap(c1, c2).getLeafCollector(ctx).setScorer(new ScoreAndDoc());
 
     reader.close();
     dir.close();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/test/org/apache/lucene/search/TestBoolean2ScorerSupplier.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBoolean2ScorerSupplier.java b/lucene/core/src/test/org/apache/lucene/search/TestBoolean2ScorerSupplier.java
index 3118fa8..ea2fd4c 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBoolean2ScorerSupplier.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBoolean2ScorerSupplier.java
@@ -22,7 +22,10 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.EnumMap;
 import java.util.Map;
+import java.util.Set;
 
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
@@ -31,12 +34,39 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks;
 
 public class TestBoolean2ScorerSupplier extends LuceneTestCase {
 
+  private static class FakeWeight extends Weight {
+
+    FakeWeight() {
+      super(new MatchNoDocsQuery());
+    }
+
+    @Override
+    public void extractTerms(Set<Term> terms) {
+
+    }
+
+    @Override
+    public Explanation explain(LeafReaderContext context, int doc) throws IOException {
+      return null;
+    }
+
+    @Override
+    public Scorer scorer(LeafReaderContext context) throws IOException {
+      return null;
+    }
+
+    @Override
+    public boolean isCacheable(LeafReaderContext ctx) {
+      return false;
+    }
+  }
+
   private static class FakeScorer extends Scorer {
 
     private final DocIdSetIterator it;
 
     FakeScorer(long cost) {
-      super(null);
+      super(new FakeWeight());
       this.it = DocIdSetIterator.all(Math.toIntExact(cost));
     }
 
@@ -124,17 +154,17 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
     }
 
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(42));
-    ScorerSupplier s = new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0);
+    ScorerSupplier s = new Boolean2ScorerSupplier(new FakeWeight(), subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0);
     assertEquals(42, s.cost());
     assertEquals(42, s.get(random().nextInt(100)).iterator().cost());
 
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12));
-    s = new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0);
+    s = new Boolean2ScorerSupplier(new FakeWeight(), subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0);
     assertEquals(42 + 12, s.cost());
     assertEquals(42 + 12, s.get(random().nextInt(100)).iterator().cost());
 
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(20));
-    s = new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0);
+    s = new Boolean2ScorerSupplier(new FakeWeight(), subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0);
     assertEquals(42 + 12 + 20, s.cost());
     assertEquals(42 + 12 + 20, s.get(random().nextInt(100)).iterator().cost());
   }
@@ -147,26 +177,26 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
 
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(42));
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12));
-    ScorerSupplier s = new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 1);
+    ScorerSupplier s = new Boolean2ScorerSupplier(new FakeWeight(), subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 1);
     assertEquals(42 + 12, s.cost());
     assertEquals(42 + 12, s.get(random().nextInt(100)).iterator().cost());
 
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(20));
-    s = new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 1);
+    s = new Boolean2ScorerSupplier(new FakeWeight(), subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 1);
     assertEquals(42 + 12 + 20, s.cost());
     assertEquals(42 + 12 + 20, s.get(random().nextInt(100)).iterator().cost());
-    s = new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 2);
+    s = new Boolean2ScorerSupplier(new FakeWeight(), subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 2);
     assertEquals(12 + 20, s.cost());
     assertEquals(12 + 20, s.get(random().nextInt(100)).iterator().cost());
 
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(30));
-    s = new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 1);
+    s = new Boolean2ScorerSupplier(new FakeWeight(), subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 1);
     assertEquals(42 + 12 + 20 + 30, s.cost());
     assertEquals(42 + 12 + 20 + 30, s.get(random().nextInt(100)).iterator().cost());
-    s = new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 2);
+    s = new Boolean2ScorerSupplier(new FakeWeight(), subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 2);
     assertEquals(12 + 20 + 30, s.cost());
     assertEquals(12 + 20 + 30, s.get(random().nextInt(100)).iterator().cost());
-    s = new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 3);
+    s = new Boolean2ScorerSupplier(new FakeWeight(), subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 3);
     assertEquals(12 + 20, s.cost());
     assertEquals(12 + 20, s.get(random().nextInt(100)).iterator().cost());
   }
@@ -201,7 +231,7 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
         continue;
       }
       int minShouldMatch = numShoulds == 0 ? 0 : TestUtil.nextInt(random(), 0, numShoulds - 1);
-      Boolean2ScorerSupplier supplier = new Boolean2ScorerSupplier(null,
+      Boolean2ScorerSupplier supplier = new Boolean2ScorerSupplier(new FakeWeight(),
           subs, scoreMode, minShouldMatch);
       long cost1 = supplier.cost();
       long cost2 = supplier.get(Long.MAX_VALUE).iterator().cost();
@@ -226,7 +256,7 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
     // If the clauses are less costly than the lead cost, the min cost is the new lead cost
     subs.get(RandomPicks.randomFrom(random(), Arrays.asList(Occur.FILTER, Occur.MUST))).add(new FakeScorerSupplier(42, 12));
     subs.get(RandomPicks.randomFrom(random(), Arrays.asList(Occur.FILTER, Occur.MUST))).add(new FakeScorerSupplier(12, 12));
-    new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).get(Long.MAX_VALUE); // triggers assertions as a side-effect
+    new Boolean2ScorerSupplier(new FakeWeight(), subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).get(Long.MAX_VALUE); // triggers assertions as a side-effect
 
     subs = new EnumMap<>(Occur.class);
     for (Occur occur : Occur.values()) {
@@ -236,7 +266,7 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
     // If the lead cost is less that the clauses' cost, then we don't modify it
     subs.get(RandomPicks.randomFrom(random(), Arrays.asList(Occur.FILTER, Occur.MUST))).add(new FakeScorerSupplier(42, 7));
     subs.get(RandomPicks.randomFrom(random(), Arrays.asList(Occur.FILTER, Occur.MUST))).add(new FakeScorerSupplier(12, 7));
-    new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).get(7); // triggers assertions as a side-effect
+    new Boolean2ScorerSupplier(new FakeWeight(), subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).get(7); // triggers assertions as a side-effect
   }
 
   public void testDisjunctionLeadCost() throws IOException {
@@ -246,12 +276,12 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
     }
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(42, 54));
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12, 54));
-    new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).get(100); // triggers assertions as a side-effect
+    new Boolean2ScorerSupplier(new FakeWeight(), subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).get(100); // triggers assertions as a side-effect
 
     subs.get(Occur.SHOULD).clear();
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(42, 20));
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12, 20));
-    new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).get(20); // triggers assertions as a side-effect
+    new Boolean2ScorerSupplier(new FakeWeight(), subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).get(20); // triggers assertions as a side-effect
   }
 
   public void testDisjunctionWithMinShouldMatchLeadCost() throws IOException {
@@ -265,7 +295,7 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(50, 42));
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12, 42));
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(30, 42));
-    new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 2).get(100); // triggers assertions as a side-effect
+    new Boolean2ScorerSupplier(new FakeWeight(), subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 2).get(100); // triggers assertions as a side-effect
 
     subs = new EnumMap<>(Occur.class);
     for (Occur occur : Occur.values()) {
@@ -276,7 +306,7 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(42, 20));
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12, 20));
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(30, 20));
-    new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 2).get(20); // triggers assertions as a side-effect
+    new Boolean2ScorerSupplier(new FakeWeight(), subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 2).get(20); // triggers assertions as a side-effect
 
     subs = new EnumMap<>(Occur.class);
     for (Occur occur : Occur.values()) {
@@ -287,7 +317,7 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12, 62));
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(30, 62));
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(20, 62));
-    new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 2).get(100); // triggers assertions as a side-effect
+    new Boolean2ScorerSupplier(new FakeWeight(), subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 2).get(100); // triggers assertions as a side-effect
 
     subs = new EnumMap<>(Occur.class);
     for (Occur occur : Occur.values()) {
@@ -298,7 +328,7 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12, 32));
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(30, 32));
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(20, 32));
-    new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 3).get(100); // triggers assertions as a side-effect
+    new Boolean2ScorerSupplier(new FakeWeight(), subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 3).get(100); // triggers assertions as a side-effect
   }
 
   public void testProhibitedLeadCost() throws IOException {
@@ -310,19 +340,19 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
     // The MUST_NOT clause is called with the same lead cost as the MUST clause
     subs.get(Occur.MUST).add(new FakeScorerSupplier(42, 42));
     subs.get(Occur.MUST_NOT).add(new FakeScorerSupplier(30, 42));
-    new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).get(100); // triggers assertions as a side-effect
+    new Boolean2ScorerSupplier(new FakeWeight(), subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).get(100); // triggers assertions as a side-effect
 
     subs.get(Occur.MUST).clear();
     subs.get(Occur.MUST_NOT).clear();
     subs.get(Occur.MUST).add(new FakeScorerSupplier(42, 42));
     subs.get(Occur.MUST_NOT).add(new FakeScorerSupplier(80, 42));
-    new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).get(100); // triggers assertions as a side-effect
+    new Boolean2ScorerSupplier(new FakeWeight(), subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).get(100); // triggers assertions as a side-effect
 
     subs.get(Occur.MUST).clear();
     subs.get(Occur.MUST_NOT).clear();
     subs.get(Occur.MUST).add(new FakeScorerSupplier(42, 20));
     subs.get(Occur.MUST_NOT).add(new FakeScorerSupplier(30, 20));
-    new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).get(20); // triggers assertions as a side-effect
+    new Boolean2ScorerSupplier(new FakeWeight(), subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).get(20); // triggers assertions as a side-effect
   }
 
   public void testMixedLeadCost() throws IOException {
@@ -334,19 +364,19 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
     // The SHOULD clause is always called with the same lead cost as the MUST clause
     subs.get(Occur.MUST).add(new FakeScorerSupplier(42, 42));
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(30, 42));
-    new Boolean2ScorerSupplier(null, subs, ScoreMode.COMPLETE, 0).get(100); // triggers assertions as a side-effect
+    new Boolean2ScorerSupplier(new FakeWeight(), subs, ScoreMode.COMPLETE, 0).get(100); // triggers assertions as a side-effect
 
     subs.get(Occur.MUST).clear();
     subs.get(Occur.SHOULD).clear();
     subs.get(Occur.MUST).add(new FakeScorerSupplier(42, 42));
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(80, 42));
-    new Boolean2ScorerSupplier(null, subs, ScoreMode.COMPLETE, 0).get(100); // triggers assertions as a side-effect
+    new Boolean2ScorerSupplier(new FakeWeight(), subs, ScoreMode.COMPLETE, 0).get(100); // triggers assertions as a side-effect
 
     subs.get(Occur.MUST).clear();
     subs.get(Occur.SHOULD).clear();
     subs.get(Occur.MUST).add(new FakeScorerSupplier(42, 20));
     subs.get(Occur.SHOULD).add(new FakeScorerSupplier(80, 20));
-    new Boolean2ScorerSupplier(null, subs, ScoreMode.COMPLETE, 0).get(20); // triggers assertions as a side-effect
+    new Boolean2ScorerSupplier(new FakeWeight(), subs, ScoreMode.COMPLETE, 0).get(20); // triggers assertions as a side-effect
   }
 
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/test/org/apache/lucene/search/TestBooleanOr.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanOr.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanOr.java
index 7b12500..16477a7 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanOr.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanOr.java
@@ -221,7 +221,7 @@ public class TestBooleanOr extends LuceneTestCase {
 
   private static BulkScorer scorer(int... matches) {
     return new BulkScorer() {
-      final FakeScorer scorer = new FakeScorer();
+      final ScoreAndDoc scorer = new ScoreAndDoc();
       int i = 0;
       @Override
       public int score(LeafCollector collector, Bits acceptDocs, int min, int max) throws IOException {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
index 86733a4..75fdd01 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
@@ -105,7 +105,7 @@ public class TestBooleanScorer extends LuceneTestCase {
             @Override
             public int score(LeafCollector collector, Bits acceptDocs, int min, int max) throws IOException {
               assert min == 0;
-              collector.setScorer(new FakeScorer());
+              collector.setScorer(new ScoreAndDoc());
               collector.collect(0);
               return DocIdSetIterator.NO_MORE_DOCS;
             }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java
index 12136b5..877c496 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java
@@ -25,25 +25,14 @@ public class TestCachingCollector extends LuceneTestCase {
 
   private static final double ONE_BYTE = 1.0 / (1024 * 1024); // 1 byte out of MB
   
-  private static class MockScorer extends Scorer {
+  private static class MockScorable extends Scorable {
     
-    private MockScorer() {
-      super((Weight) null);
-    }
-    
-    @Override
-    public float score() throws IOException { return 0; }
-
     @Override
-    public float getMaxScore(int upTo) throws IOException { return 0; }
+    public float score() { return 0; }
 
     @Override
     public int docID() { return 0; }
 
-    @Override
-    public DocIdSetIterator iterator() {
-      throw new UnsupportedOperationException();
-    }
   }
   
   private static class NoOpCollector extends SimpleCollector {
@@ -62,7 +51,7 @@ public class TestCachingCollector extends LuceneTestCase {
     for (boolean cacheScores : new boolean[] { false, true }) {
       CachingCollector cc = CachingCollector.create(new NoOpCollector(), cacheScores, 1.0);
       LeafCollector acc = cc.getLeafCollector(null);
-      acc.setScorer(new MockScorer());
+      acc.setScorer(new MockScorable());
 
       // collect 1000 docs
       for (int i = 0; i < 1000; i++) {
@@ -90,7 +79,7 @@ public class TestCachingCollector extends LuceneTestCase {
   public void testIllegalStateOnReplay() throws Exception {
     CachingCollector cc = CachingCollector.create(new NoOpCollector(), true, 50 * ONE_BYTE);
     LeafCollector acc = cc.getLeafCollector(null);
-    acc.setScorer(new MockScorer());
+    acc.setScorer(new MockScorable());
     
     // collect 130 docs, this should be enough for triggering cache abort.
     for (int i = 0; i < 130; i++) {
@@ -115,7 +104,7 @@ public class TestCachingCollector extends LuceneTestCase {
       CachingCollector cc = CachingCollector.create(new NoOpCollector(),
           cacheScores, bytesPerDoc * ONE_BYTE * numDocs);
       LeafCollector acc = cc.getLeafCollector(null);
-      acc.setScorer(new MockScorer());
+      acc.setScorer(new MockScorable());
       for (int i = 0; i < numDocs; i++) acc.collect(i);
       assertTrue(cc.isCached());
 
@@ -130,7 +119,7 @@ public class TestCachingCollector extends LuceneTestCase {
       // create w/ null wrapped collector, and test that the methods work
       CachingCollector cc = CachingCollector.create(cacheScores, 50 * ONE_BYTE);
       LeafCollector acc = cc.getLeafCollector(null);
-      acc.setScorer(new MockScorer());
+      acc.setScorer(new MockScorable());
       acc.collect(0);
       
       assertTrue(cc.isCached());

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/test/org/apache/lucene/search/TestConjunctionDISI.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestConjunctionDISI.java b/lucene/core/src/test/org/apache/lucene/search/TestConjunctionDISI.java
index 083ac24..c10b78e 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestConjunctionDISI.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestConjunctionDISI.java
@@ -22,7 +22,10 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.Set;
 
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
 import org.apache.lucene.util.BitDocIdSet;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.LuceneTestCase;
@@ -82,6 +85,33 @@ public class TestConjunctionDISI extends LuceneTestCase {
     return scorer(TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator), twoPhaseIterator);
   }
 
+  private static class FakeWeight extends Weight {
+
+    protected FakeWeight() {
+      super(new MatchNoDocsQuery());
+    }
+
+    @Override
+    public void extractTerms(Set<Term> terms) {
+
+    }
+
+    @Override
+    public Explanation explain(LeafReaderContext context, int doc) throws IOException {
+      return null;
+    }
+
+    @Override
+    public Scorer scorer(LeafReaderContext context) throws IOException {
+      return null;
+    }
+
+    @Override
+    public boolean isCacheable(LeafReaderContext ctx) {
+      return false;
+    }
+  }
+
   /**
    * Create a {@link Scorer} that wraps the given {@link DocIdSetIterator}. It
    * also accepts a {@link TwoPhaseIterator} view, which is exposed in
@@ -91,7 +121,7 @@ public class TestConjunctionDISI extends LuceneTestCase {
    * advantage of the {@link TwoPhaseIterator} view.
    */
   private static Scorer scorer(DocIdSetIterator it, TwoPhaseIterator twoPhaseIterator) {
-    return new Scorer(null) {
+    return new Scorer(new FakeWeight()) {
 
       @Override
       public DocIdSetIterator iterator() {
@@ -204,12 +234,12 @@ public class TestConjunctionDISI extends LuceneTestCase {
           case 0:
             // simple iterator
             sets[i] = set;
-            iterators[i] = new ConstantScoreScorer(null, 0f, anonymizeIterator(new BitDocIdSet(set).iterator()));
+            iterators[i] = new ConstantScoreScorer(new FakeWeight(), 0f, anonymizeIterator(new BitDocIdSet(set).iterator()));
             break;
           case 1:
             // bitSet iterator
             sets[i] = set;
-            iterators[i] = new ConstantScoreScorer(null, 0f, new BitDocIdSet(set).iterator());
+            iterators[i] = new ConstantScoreScorer(new FakeWeight(), 0f, new BitDocIdSet(set).iterator());
             break;
           default:
             // scorer with approximation
@@ -240,7 +270,7 @@ public class TestConjunctionDISI extends LuceneTestCase {
         if (random().nextBoolean()) {
           // simple iterator
           sets[i] = set;
-          iterators[i] = new ConstantScoreScorer(null, 0f, new BitDocIdSet(set).iterator());
+          iterators[i] = new ConstantScoreScorer(new FakeWeight(), 0f, new BitDocIdSet(set).iterator());
         } else {
           // scorer with approximation
           final FixedBitSet confirmed = clearRandomBits(set);
@@ -276,12 +306,12 @@ public class TestConjunctionDISI extends LuceneTestCase {
           case 0:
             // simple iterator
             sets[i] = set;
-            newIterator = new ConstantScoreScorer(null, 0f, anonymizeIterator(new BitDocIdSet(set).iterator()));
+            newIterator = new ConstantScoreScorer(new FakeWeight(), 0f, anonymizeIterator(new BitDocIdSet(set).iterator()));
             break;
           case 1:
             // bitSet iterator
             sets[i] = set;
-            newIterator = new ConstantScoreScorer(null, 0f, new BitDocIdSet(set).iterator());
+            newIterator = new ConstantScoreScorer(new FakeWeight(), 0f, new BitDocIdSet(set).iterator());
             break;
           default:
             // scorer with approximation
@@ -322,7 +352,7 @@ public class TestConjunctionDISI extends LuceneTestCase {
         if (random().nextBoolean()) {
           // simple iterator
           sets[i] = set;
-          scorers.add(new ConstantScoreScorer(null, 0f, new BitDocIdSet(set).iterator()));
+          scorers.add(new ConstantScoreScorer(new FakeWeight(), 0f, new BitDocIdSet(set).iterator()));
         } else {
           // scorer with approximation
           final FixedBitSet confirmed = clearRandomBits(set);
@@ -340,9 +370,9 @@ public class TestConjunctionDISI extends LuceneTestCase {
         List<Scorer> subIterators = scorers.subList(subSeqStart, subSeqEnd);
         Scorer subConjunction;
         if (wrapWithScorer) {
-          subConjunction = new ConjunctionScorer(null, subIterators, Collections.emptyList());
+          subConjunction = new ConjunctionScorer(new FakeWeight(), subIterators, Collections.emptyList());
         } else {
-          subConjunction = new ConstantScoreScorer(null, 0f, ConjunctionDISI.intersectScorers(subIterators));
+          subConjunction = new ConstantScoreScorer(new FakeWeight(), 0f, ConjunctionDISI.intersectScorers(subIterators));
         }
         scorers.set(subSeqStart, subConjunction);
         int toRemove = subSeqEnd - subSeqStart - 1;
@@ -352,7 +382,7 @@ public class TestConjunctionDISI extends LuceneTestCase {
       }
       if (scorers.size() == 1) {
         // ConjunctionDISI needs two iterators
-        scorers.add(new ConstantScoreScorer(null, 0f, DocIdSetIterator.all(maxDoc)));
+        scorers.add(new ConstantScoreScorer(new FakeWeight(), 0f, DocIdSetIterator.all(maxDoc)));
       }
 
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
index 2035f9e..3e15070 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
@@ -54,7 +54,7 @@ public class TestConstantScoreQuery extends LuceneTestCase {
     QueryUtils.checkUnequal(q1, new TermQuery(new Term("a", "b")));
   }
   
-  private void checkHits(IndexSearcher searcher, Query q, final float expectedScore, final Class<? extends Scorer> innerScorerClass) throws IOException {
+  private void checkHits(IndexSearcher searcher, Query q, final float expectedScore, final Class<? extends Scorable> innerScorerClass) throws IOException {
     final int[] count = new int[1];
     searcher.search(q, new SimpleCollector() {
       private Scorable scorer;
@@ -131,7 +131,7 @@ public class TestConstantScoreQuery extends LuceneTestCase {
       checkHits(searcher, csq2, csq2.getBoost(), TermScorer.class);
       
       // for the combined BQ, the scorer should always be BooleanScorer's BucketScorer, because our scorer supports out-of order collection!
-      final Class<FakeScorer> bucketScorerClass = FakeScorer.class;
+      final Class<ScoreAndDoc> bucketScorerClass = ScoreAndDoc.class;
       checkHits(searcher, csqbq, csqbq.getBoost(), bucketScorerClass);
     } finally {
       IOUtils.close(reader, directory);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/test/org/apache/lucene/search/TestMaxScoreSumPropagator.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMaxScoreSumPropagator.java b/lucene/core/src/test/org/apache/lucene/search/TestMaxScoreSumPropagator.java
index d072233..96a34a5 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestMaxScoreSumPropagator.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestMaxScoreSumPropagator.java
@@ -22,7 +22,10 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
+import java.util.Set;
 
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 
@@ -30,13 +33,40 @@ import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS;
 
 public class TestMaxScoreSumPropagator extends LuceneTestCase {
 
+  private static class FakeWeight extends Weight {
+
+    FakeWeight() {
+      super(new MatchNoDocsQuery());
+    }
+
+    @Override
+    public void extractTerms(Set<Term> terms) {
+
+    }
+
+    @Override
+    public Explanation explain(LeafReaderContext context, int doc) throws IOException {
+      return null;
+    }
+
+    @Override
+    public Scorer scorer(LeafReaderContext context) throws IOException {
+      return null;
+    }
+
+    @Override
+    public boolean isCacheable(LeafReaderContext ctx) {
+      return false;
+    }
+  }
+
   private static class FakeScorer extends Scorer {
 
     final float maxScore;
     float minCompetitiveScore;
 
-    FakeScorer(float maxScore) {
-      super(null);
+    FakeScorer(float maxScore) throws IOException {
+      super(new FakeWeight());
       this.maxScore = maxScore;
     }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/test/org/apache/lucene/search/TestMultiCollector.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiCollector.java
index f189821..dda314b 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestMultiCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiCollector.java
@@ -134,7 +134,7 @@ public class TestMultiCollector extends LuceneTestCase {
     collector1 = new TerminateAfterCollector(collector1, 1);
     collector2 = new TerminateAfterCollector(collector2, 2);
 
-    Scorer scorer = new FakeScorer();
+    Scorable scorer = new ScoreAndDoc();
 
     List<Collector> collectors = Arrays.asList(collector1, collector2);
     Collections.shuffle(collectors, random());
@@ -172,28 +172,18 @@ public class TestMultiCollector extends LuceneTestCase {
     IndexReader reader = DirectoryReader.open(w);
     w.close();
 
-    Scorer scorer = new Scorer(null) {
+    Scorable scorer = new Scorable() {
       @Override
       public int docID() {
         throw new UnsupportedOperationException();
       }
 
       @Override
-      public float score() throws IOException {
+      public float score() {
         return 0;
       }
 
       @Override
-      public DocIdSetIterator iterator() {
-        throw new UnsupportedOperationException();
-      }
-
-      @Override
-      public float getMaxScore(int upTo) throws IOException {
-        throw new UnsupportedOperationException();
-      }
-
-      @Override
       public void setMinCompetitiveScore(float minScore) {
         throw new AssertionError();
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java b/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java
index 8bbfeb5..2e95653 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java
@@ -429,7 +429,7 @@ public class TestQueryRescorer extends LuceneTestCase {
         @Override
         public Scorer scorer(final LeafReaderContext context) throws IOException {
 
-          return new Scorer(null) {
+          return new Scorer(this) {
             int docID = -1;
 
             @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/test/org/apache/lucene/search/TestTopDocsCollector.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTopDocsCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestTopDocsCollector.java
index caba25f..0506310 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTopDocsCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTopDocsCollector.java
@@ -192,15 +192,11 @@ public class TestTopDocsCollector extends LuceneTestCase {
     }
   }
 
-  private static class FakeScorer extends Scorer {
+  private static class ScoreAndDoc extends Scorable {
     int doc = -1;
     float score;
     Float minCompetitiveScore = null;
 
-    FakeScorer() {
-      super(null);
-    }
-
     @Override
     public void setMinCompetitiveScore(float minCompetitiveScore) {
       this.minCompetitiveScore = minCompetitiveScore;
@@ -215,16 +211,6 @@ public class TestTopDocsCollector extends LuceneTestCase {
     public float score() throws IOException {
       return score;
     }
-
-    @Override
-    public float getMaxScore(int upTo) throws IOException {
-      return Float.POSITIVE_INFINITY;
-    }
-
-    @Override
-    public DocIdSetIterator iterator() {
-      throw new UnsupportedOperationException();
-    }
   }
 
   public void testSetMinCompetitiveScore() throws Exception {
@@ -240,7 +226,7 @@ public class TestTopDocsCollector extends LuceneTestCase {
     w.close();
 
     TopScoreDocCollector collector = TopScoreDocCollector.create(2, null, 1);
-    FakeScorer scorer = new FakeScorer();
+    ScoreAndDoc scorer = new ScoreAndDoc();
 
     LeafCollector leafCollector = collector.getLeafCollector(reader.leaves().get(0));
     leafCollector.setScorer(scorer);
@@ -269,7 +255,7 @@ public class TestTopDocsCollector extends LuceneTestCase {
     assertEquals(Math.nextUp(2f), scorer.minCompetitiveScore, 0f);
 
     // Make sure the min score is set on scorers on new segments
-    scorer = new FakeScorer();
+    scorer = new ScoreAndDoc();
     leafCollector = collector.getLeafCollector(reader.leaves().get(1));
     leafCollector.setScorer(scorer);
     assertEquals(Math.nextUp(2f), scorer.minCompetitiveScore, 0f);
@@ -302,7 +288,7 @@ public class TestTopDocsCollector extends LuceneTestCase {
 
     for (int totalHitsThreshold = 1; totalHitsThreshold < 20; ++ totalHitsThreshold) {
       TopScoreDocCollector collector = TopScoreDocCollector.create(2, null, totalHitsThreshold);
-      FakeScorer scorer = new FakeScorer();
+      ScoreAndDoc scorer = new ScoreAndDoc();
 
       LeafCollector leafCollector = collector.getLeafCollector(reader.leaves().get(0));
       leafCollector.setScorer(scorer);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/core/src/test/org/apache/lucene/search/TestTopFieldCollector.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTopFieldCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestTopFieldCollector.java
index 36a1838..3a86449 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTopFieldCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTopFieldCollector.java
@@ -149,7 +149,7 @@ public class TestTopFieldCollector extends LuceneTestCase {
     for (int totalHitsThreshold = 1; totalHitsThreshold < 20; ++ totalHitsThreshold) {
       for (FieldDoc after : new FieldDoc[] { null, new FieldDoc(4, Float.NaN, new Object[] { 2L })}) {
         TopFieldCollector collector = TopFieldCollector.create(sort, 2, after, totalHitsThreshold);
-        FakeScorer scorer = new FakeScorer();
+        ScoreAndDoc scorer = new ScoreAndDoc();
 
         LeafCollector leafCollector1 = collector.getLeafCollector(reader.leaves().get(0));
         leafCollector1.setScorer(scorer);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/expressions/src/java/org/apache/lucene/expressions/FakeScorer.java
----------------------------------------------------------------------
diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/FakeScorer.java b/lucene/expressions/src/java/org/apache/lucene/expressions/FakeScorer.java
deleted file mode 100644
index 026c789..0000000
--- a/lucene/expressions/src/java/org/apache/lucene/expressions/FakeScorer.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.expressions;
-
-
-import java.io.IOException;
-
-import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.Scorer;
-
-class FakeScorer extends Scorer {
-
-  float score;
-  int doc = -1;
-
-  FakeScorer() {
-    super(null);
-  }
-
-  @Override
-  public int docID() {
-    return doc;
-  }
-
-  @Override
-  public DocIdSetIterator iterator() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public float score() throws IOException {
-    return score;
-  }
-
-  @Override
-  public float getMaxScore(int upTo) throws IOException {
-    return Float.POSITIVE_INFINITY;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysScorer.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysScorer.java b/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysScorer.java
index e1e5b55..ddace0d 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysScorer.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysScorer.java
@@ -26,9 +26,9 @@ import org.apache.lucene.search.BulkScorer;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.Scorable;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.TwoPhaseIterator;
-import org.apache.lucene.search.Weight;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.FixedBitSet;
 
@@ -82,7 +82,7 @@ class DrillSidewaysScorer extends BulkScorer {
     //  System.out.println("\nscore: reader=" + context.reader());
     //}
     //System.out.println("score r=" + context.reader());
-    FakeScorer scorer = new FakeScorer();
+    ScoreAndDoc scorer = new ScoreAndDoc();
     collector.setScorer(scorer);
     if (drillDownCollector != null) {
       drillDownLeafCollector = drillDownCollector.getLeafCollector(context);
@@ -580,21 +580,12 @@ class DrillSidewaysScorer extends BulkScorer {
     sidewaysCollector.collect(collectDocID);
   }
 
-  private final class FakeScorer extends Scorer {
-
-    public FakeScorer() {
-      super(null);
-    }
+  private final class ScoreAndDoc extends Scorable {
 
     @Override
     public int docID() {
       return collectDocID;
     }
-
-    @Override
-    public DocIdSetIterator iterator() {
-      throw new UnsupportedOperationException("FakeScorer doesn't support nextDoc()");
-    }
     
     @Override
     public float score() {
@@ -602,19 +593,10 @@ class DrillSidewaysScorer extends BulkScorer {
     }
 
     @Override
-    public float getMaxScore(int upTo) throws IOException {
-      return Float.POSITIVE_INFINITY;
-    }
-
-    @Override
     public Collection<ChildScorable> getChildren() {
       return Collections.singletonList(new ChildScorable(baseScorer, "MUST"));
     }
 
-    @Override
-    public Weight getWeight() {
-      throw new UnsupportedOperationException();
-    }
   }
 
   static class DocsAndCost {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
index d5b3555..23601ca 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
@@ -286,7 +286,7 @@ public class BlockGroupingCollector extends SimpleCollector {
     }
     int totalGroupedHitCount = 0;
 
-    final FakeScorer fakeScorer = new FakeScorer();
+    final ScoreAndDoc fakeScorer = new ScoreAndDoc();
 
     float maxScore = Float.MIN_VALUE;
 
@@ -494,4 +494,21 @@ public class BlockGroupingCollector extends SimpleCollector {
   public ScoreMode scoreMode() {
     return needsScores ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES;
   }
+
+  private static class ScoreAndDoc extends Scorable {
+
+    float score;
+    int doc = -1;
+
+    @Override
+    public int docID() {
+      return doc;
+    }
+
+    @Override
+    public float score() {
+      return score;
+    }
+
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/grouping/src/java/org/apache/lucene/search/grouping/FakeScorer.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/FakeScorer.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/FakeScorer.java
deleted file mode 100644
index b46662d..0000000
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/FakeScorer.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.search.grouping;
-
-import java.io.IOException;
-
-import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.Scorer;
-
-class FakeScorer extends Scorer {
-
-  float score;
-  int doc = -1;
-
-  FakeScorer() {
-    super(null);
-  }
-
-  @Override
-  public int docID() {
-    return doc;
-  }
-
-  @Override
-  public DocIdSetIterator iterator() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public float score() throws IOException {
-    return score;
-  }
-
-  @Override
-  public float getMaxScore(int upTo) throws IOException {
-    return Float.POSITIVE_INFINITY;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/join/src/java/org/apache/lucene/search/join/FakeScorer.java
----------------------------------------------------------------------
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/FakeScorer.java b/lucene/join/src/java/org/apache/lucene/search/join/FakeScorer.java
deleted file mode 100644
index 125ce88..0000000
--- a/lucene/join/src/java/org/apache/lucene/search/join/FakeScorer.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.search.join;
-
-import java.io.IOException;
-
-import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.Scorer;
-
-class FakeScorer extends Scorer {
-
-  float score;
-  int doc = -1;
-
-  FakeScorer() {
-    super(null);
-  }
-
-  @Override
-  public int docID() {
-    return doc;
-  }
-
-  @Override
-  public DocIdSetIterator iterator() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public float score() throws IOException {
-    return score;
-  }
-
-  @Override
-  public float getMaxScore(int upTo) throws IOException {
-    return Float.POSITIVE_INFINITY;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionRangeQuery.java
----------------------------------------------------------------------
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionRangeQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionRangeQuery.java
index 2d55bae..315f650 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionRangeQuery.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionRangeQuery.java
@@ -34,7 +34,7 @@ import org.apache.lucene.search.Weight;
  * range.  The score is the float value.  This can be a slow query if run by itself since it must visit all docs;
  * ideally it's combined with other queries.
  * It's mostly a wrapper around
- * {@link FunctionValues#getRangeScorer(LeafReaderContext, String, String, boolean, boolean)}.
+ * {@link FunctionValues#getRangeScorer(Weight, LeafReaderContext, String, String, boolean, boolean)}.
  *
  * A similar class is {@code org.apache.lucene.search.DocValuesRangeQuery} in the sandbox module.  That one is
  * constant scoring.
@@ -152,7 +152,7 @@ public class FunctionRangeQuery extends Query {
     public ValueSourceScorer scorer(LeafReaderContext context) throws IOException {
       FunctionValues functionValues = valueSource.getValues(vsContext, context);
       // getRangeScorer takes String args and parses them. Weird.
-      return functionValues.getRangeScorer(context, lowerVal, upperVal, includeLower, includeUpper);
+      return functionValues.getRangeScorer(this, context, lowerVal, upperVal, includeLower, includeUpper);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionValues.java
----------------------------------------------------------------------
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionValues.java b/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionValues.java
index 9e73b4b..76dda25 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionValues.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionValues.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.search.Explanation;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
 import org.apache.lucene.util.BytesRefBuilder;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.util.mutable.MutableValueFloat;
@@ -144,8 +145,8 @@ public abstract class FunctionValues {
    * Yields a {@link Scorer} that matches all documents,
    * and that which produces scores equal to {@link #floatVal(int)}.
    */
-  public ValueSourceScorer getScorer(LeafReaderContext readerContext) {
-    return new ValueSourceScorer(readerContext, this) {
+  public ValueSourceScorer getScorer(Weight weight, LeafReaderContext readerContext) {
+    return new ValueSourceScorer(weight, readerContext, this) {
       @Override
       public boolean matches(int doc) {
         return true;
@@ -161,7 +162,7 @@ public abstract class FunctionValues {
   // because it needs different behavior depending on the type of fields.  There is also
   // a setup cost - parsing and normalizing params, and doing a binary search on the StringIndex.
   // TODO: change "reader" to LeafReaderContext
-  public ValueSourceScorer getRangeScorer(LeafReaderContext readerContext, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) throws IOException {
+  public ValueSourceScorer getRangeScorer(Weight weight, LeafReaderContext readerContext, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) throws IOException {
     float lower;
     float upper;
 
@@ -180,7 +181,7 @@ public abstract class FunctionValues {
     final float u = upper;
 
     if (includeLower && includeUpper) {
-      return new ValueSourceScorer(readerContext, this) {
+      return new ValueSourceScorer(weight, readerContext, this) {
         @Override
         public boolean matches(int doc) throws IOException {
           if (!exists(doc)) return false;
@@ -190,7 +191,7 @@ public abstract class FunctionValues {
       };
     }
     else if (includeLower && !includeUpper) {
-       return new ValueSourceScorer(readerContext, this) {
+       return new ValueSourceScorer(weight, readerContext, this) {
         @Override
         public boolean matches(int doc) throws IOException {
           if (!exists(doc)) return false;
@@ -200,7 +201,7 @@ public abstract class FunctionValues {
       };
     }
     else if (!includeLower && includeUpper) {
-       return new ValueSourceScorer(readerContext, this) {
+       return new ValueSourceScorer(weight, readerContext, this) {
         @Override
         public boolean matches(int doc) throws IOException {
           if (!exists(doc)) return false;
@@ -210,7 +211,7 @@ public abstract class FunctionValues {
       };
     }
     else {
-       return new ValueSourceScorer(readerContext, this) {
+       return new ValueSourceScorer(weight, readerContext, this) {
         @Override
         public boolean matches(int doc) throws IOException {
           if (!exists(doc)) return false;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSource.java
----------------------------------------------------------------------
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSource.java
index 5290dcc..209fb15 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSource.java
@@ -23,7 +23,6 @@ import java.util.Map;
 import java.util.Objects;
 
 import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.DoubleValues;
 import org.apache.lucene.search.DoubleValuesSource;
 import org.apache.lucene.search.Explanation;
@@ -32,6 +31,7 @@ import org.apache.lucene.search.FieldComparatorSource;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.LongValues;
 import org.apache.lucene.search.LongValuesSource;
+import org.apache.lucene.search.Scorable;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.SimpleFieldComparator;
 import org.apache.lucene.search.SortField;
@@ -87,34 +87,20 @@ public abstract class ValueSource {
     return context;
   }
 
-  private static class FakeScorer extends Scorer {
+  private static class ScoreAndDoc extends Scorable {
 
     int current = -1;
     float score = 0;
 
-    FakeScorer() {
-      super(null);
-    }
-
     @Override
     public int docID() {
       return current;
     }
 
     @Override
-    public float score() throws IOException {
+    public float score() {
       return score;
     }
-
-    @Override
-    public float getMaxScore(int upTo) throws IOException {
-      return Float.POSITIVE_INFINITY;
-    }
-
-    @Override
-    public DocIdSetIterator iterator() {
-      throw new UnsupportedOperationException();
-    }
   }
 
   /**
@@ -135,7 +121,7 @@ public abstract class ValueSource {
     @Override
     public LongValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
       Map context = new IdentityHashMap<>();
-      FakeScorer scorer = new FakeScorer();
+      ScoreAndDoc scorer = new ScoreAndDoc();
       context.put("scorer", scorer);
       final FunctionValues fv = in.getValues(context, ctx);
       return new LongValues() {
@@ -211,7 +197,7 @@ public abstract class ValueSource {
     @Override
     public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
       Map context = new HashMap<>();
-      FakeScorer scorer = new FakeScorer();
+      ScoreAndDoc scorer = new ScoreAndDoc();
       context.put("scorer", scorer);
       context.put("searcher", searcher);
       FunctionValues fv = in.getValues(context, ctx);
@@ -248,7 +234,7 @@ public abstract class ValueSource {
     @Override
     public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation) throws IOException {
       Map context = new HashMap<>();
-      FakeScorer scorer = new FakeScorer();
+      ScoreAndDoc scorer = new ScoreAndDoc();
       scorer.score = scoreExplanation.getValue().floatValue();
       context.put("scorer", scorer);
       context.put("searcher", searcher);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java
----------------------------------------------------------------------
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java b/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java
index 509f454..b2270be 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java
@@ -22,13 +22,13 @@ import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.TwoPhaseIterator;
+import org.apache.lucene.search.Weight;
 
 /**
  * {@link Scorer} which returns the result of {@link FunctionValues#floatVal(int)} as
  * the score for a document, and which filters out documents that don't match {@link #matches(int)}.
  * This Scorer has a {@link TwoPhaseIterator}.  This is similar to {@link FunctionQuery},
- * but this one has no {@link org.apache.lucene.search.Weight} normalization factors/multipliers
- * and that one doesn't filter either.
+ * with an added filter.
  * <p>
  * Note: If the scores are needed, then the underlying value will probably be
  * fetched/computed twice -- once to filter and next to return the score.  If that's non-trivial then
@@ -43,8 +43,8 @@ public abstract class ValueSourceScorer extends Scorer {
   private final TwoPhaseIterator twoPhaseIterator;
   private final DocIdSetIterator disi;
 
-  protected ValueSourceScorer(LeafReaderContext readerContext, FunctionValues values) {
-    super(null);//no weight
+  protected ValueSourceScorer(Weight weight, LeafReaderContext readerContext, FunctionValues values) {
+    super(weight);
     this.values = values;
     final DocIdSetIterator approximation = DocIdSetIterator.all(readerContext.reader().maxDoc()); // no approximation!
     this.twoPhaseIterator = new TwoPhaseIterator(approximation) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/DocTermsIndexDocValues.java
----------------------------------------------------------------------
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/DocTermsIndexDocValues.java b/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/DocTermsIndexDocValues.java
index e855e27..1cc79a4 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/DocTermsIndexDocValues.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/DocTermsIndexDocValues.java
@@ -24,6 +24,7 @@ import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.ValueSourceScorer;
+import org.apache.lucene.search.Weight;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
 import org.apache.lucene.util.CharsRefBuilder;
@@ -115,7 +116,7 @@ public abstract class DocTermsIndexDocValues extends FunctionValues {
   public abstract Object objectVal(int doc) throws IOException;  // force subclasses to override
 
   @Override
-  public ValueSourceScorer getRangeScorer(LeafReaderContext readerContext, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) throws IOException {
+  public ValueSourceScorer getRangeScorer(Weight weight, LeafReaderContext readerContext, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) throws IOException {
     // TODO: are lowerVal and upperVal in indexed form or not?
     lowerVal = lowerVal == null ? null : toTerm(lowerVal);
     upperVal = upperVal == null ? null : toTerm(upperVal);
@@ -143,7 +144,7 @@ public abstract class DocTermsIndexDocValues extends FunctionValues {
     final int ll = lower;
     final int uu = upper;
 
-    return new ValueSourceScorer(readerContext, this) {
+    return new ValueSourceScorer(weight, readerContext, this) {
       final SortedDocValues values = readerContext.reader().getSortedDocValues(field);
       private int lastDocID;
       

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/DoubleDocValues.java
----------------------------------------------------------------------
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/DoubleDocValues.java b/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/DoubleDocValues.java
index 646db9c..321c05c 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/DoubleDocValues.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/DoubleDocValues.java
@@ -22,6 +22,7 @@ import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.ValueSourceScorer;
+import org.apache.lucene.search.Weight;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.util.mutable.MutableValueDouble;
 
@@ -85,7 +86,7 @@ public abstract class DoubleDocValues extends FunctionValues {
   }
   
   @Override
-  public ValueSourceScorer getRangeScorer(LeafReaderContext readerContext, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) {
+  public ValueSourceScorer getRangeScorer(Weight weight,  LeafReaderContext readerContext, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) {
     double lower,upper;
 
     if (lowerVal==null) {
@@ -105,7 +106,7 @@ public abstract class DoubleDocValues extends FunctionValues {
 
 
     if (includeLower && includeUpper) {
-      return new ValueSourceScorer(readerContext, this) {
+      return new ValueSourceScorer(weight, readerContext, this) {
         @Override
         public boolean matches(int doc) throws IOException {
           if (!exists(doc)) return false;
@@ -115,7 +116,7 @@ public abstract class DoubleDocValues extends FunctionValues {
       };
     }
     else if (includeLower && !includeUpper) {
-      return new ValueSourceScorer(readerContext, this) {
+      return new ValueSourceScorer(weight, readerContext, this) {
         @Override
         public boolean matches(int doc) throws IOException {
           if (!exists(doc)) return false;
@@ -125,7 +126,7 @@ public abstract class DoubleDocValues extends FunctionValues {
       };
     }
     else if (!includeLower && includeUpper) {
-      return new ValueSourceScorer(readerContext, this) {
+      return new ValueSourceScorer(weight, readerContext, this) {
         @Override
         public boolean matches(int doc) throws IOException {
           if (!exists(doc)) return false;
@@ -135,7 +136,7 @@ public abstract class DoubleDocValues extends FunctionValues {
       };
     }
     else {
-      return new ValueSourceScorer(readerContext, this) {
+      return new ValueSourceScorer(weight, readerContext, this) {
         @Override
         public boolean matches(int doc) throws IOException {
           if (!exists(doc)) return false;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/IntDocValues.java
----------------------------------------------------------------------
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/IntDocValues.java b/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/IntDocValues.java
index 2c40fb7..1b98666 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/IntDocValues.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/IntDocValues.java
@@ -22,6 +22,7 @@ import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.ValueSourceScorer;
+import org.apache.lucene.search.Weight;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.util.mutable.MutableValueInt;
 
@@ -80,7 +81,7 @@ public abstract class IntDocValues extends FunctionValues {
   }
   
   @Override
-  public ValueSourceScorer getRangeScorer(LeafReaderContext readerContext, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) {
+  public ValueSourceScorer getRangeScorer(Weight weight,  LeafReaderContext readerContext, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) {
     int lower,upper;
 
     // instead of using separate comparison functions, adjust the endpoints.
@@ -102,7 +103,7 @@ public abstract class IntDocValues extends FunctionValues {
     final int ll = lower;
     final int uu = upper;
 
-    return new ValueSourceScorer(readerContext, this) {
+    return new ValueSourceScorer(weight, readerContext, this) {
       @Override
       public boolean matches(int doc) throws IOException {
         if (!exists(doc)) return false;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/LongDocValues.java
----------------------------------------------------------------------
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/LongDocValues.java b/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/LongDocValues.java
index d36afa0..3637bb2 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/LongDocValues.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/docvalues/LongDocValues.java
@@ -22,6 +22,7 @@ import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.ValueSourceScorer;
+import org.apache.lucene.search.Weight;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.util.mutable.MutableValueLong;
 
@@ -89,7 +90,7 @@ public abstract class LongDocValues extends FunctionValues {
   }
   
   @Override
-  public ValueSourceScorer getRangeScorer(LeafReaderContext readerContext, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) {
+  public ValueSourceScorer getRangeScorer(Weight weight,  LeafReaderContext readerContext, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) {
     long lower,upper;
 
     // instead of using separate comparison functions, adjust the endpoints.
@@ -111,7 +112,7 @@ public abstract class LongDocValues extends FunctionValues {
     final long ll = lower;
     final long uu = upper;
 
-    return new ValueSourceScorer(readerContext, this) {
+    return new ValueSourceScorer(weight, readerContext, this) {
       @Override
       public boolean matches(int doc) throws IOException {
         if (!exists(doc)) return false;


[04/43] lucene-solr:jira/http2: [LUCENE-8343] minor documentation fixes

Posted by da...@apache.org.
[LUCENE-8343] minor documentation fixes


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/2b636e8c
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/2b636e8c
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/2b636e8c

Branch: refs/heads/jira/http2
Commit: 2b636e8c3adb879f0cd2cff45824e226d747b5f0
Parents: cef9a22
Author: Alessandro Benedetti <a....@sease.io>
Authored: Thu Jun 7 16:51:38 2018 +0100
Committer: Alessandro Benedetti <a....@sease.io>
Committed: Thu Jun 7 16:51:38 2018 +0100

----------------------------------------------------------------------
 solr/solr-ref-guide/src/suggester.adoc | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2b636e8c/solr/solr-ref-guide/src/suggester.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/suggester.adoc b/solr/solr-ref-guide/src/suggester.adoc
index 7156dea..caf36a9 100644
--- a/solr/solr-ref-guide/src/suggester.adoc
+++ b/solr/solr-ref-guide/src/suggester.adoc
@@ -198,11 +198,11 @@ Used to calculate weight coefficient using the position of the first matching wo
 `position_linear`:::
 `weightFieldValue * (1 - 0.10*position)`: Matches to the start will be given a higher score. This is the default.
 `position_reciprocal`:::
-`weightFieldValue / (1 + position)`: Matches to the start will be given a score which decay faster than linear.
+`weightFieldValue / (1 + position)`: Matches to the start will be given a higher score. The score of matches positioned far from the start of the suggestion decays faster than linear.
 `position_exponential_reciprocal`:::
-`weightFieldValue / pow(1 + position,exponent)`: Matches to the start will be given a score which decay faster than reciprocal.
+`weightFieldValue / pow(1 + position,exponent)`: Matches to the start will be given a higher score. The score of matches positioned far from the start of the suggestion decays faster than reciprocal.
 `exponent`::::
-An optional configuration variable for `position_reciprocal` to control how fast the score will decrease. Default `2.0`.
+An optional configuration variable for `position_exponential_reciprocal` to control how fast the score will decrease. Default `2.0`.
 
 `numFactor`::
 The factor to multiply the number of searched elements from which results will be pruned. Default is `10`.


[38/43] lucene-solr:jira/http2: SOLR-12361: ref guide changes & CHANGES.txt organization

Posted by da...@apache.org.
SOLR-12361: ref guide changes & CHANGES.txt organization


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/6e8c05f6
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/6e8c05f6
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/6e8c05f6

Branch: refs/heads/jira/http2
Commit: 6e8c05f6fe083544fb7f8fdd01df08ac54d7742e
Parents: 41e972e
Author: David Smiley <ds...@apache.org>
Authored: Wed Sep 12 17:34:28 2018 -0400
Committer: David Smiley <ds...@apache.org>
Committed: Wed Sep 12 17:34:28 2018 -0400

----------------------------------------------------------------------
 solr/CHANGES.txt                                | 45 ++++++------
 .../src/uploading-data-with-index-handlers.adoc | 75 +++++++++++++-------
 2 files changed, 74 insertions(+), 46 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6e8c05f6/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index b0da693..113ca13 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -131,9 +131,30 @@ New Features
 
 * SOLR-12474: Add an UpdateRequest Object that implements RequestWriter.ContentWriter (noble)
 
+* SOLR-12361: Allow nested child documents to be in field values of a SolrInputDocument as an alternative to
+  add/get ChildDocuments off to the side.  The latter is now referred to as "anonymous" child documents as opposed to
+  "labelled" (by the field name).  Anonymous child docs might be deprecated in the future.  This is an internal change
+  that should work for javabin/SolrJ; separate issues will address XML & JSON formats populating nested docs in this way.
+  AddUpdateCommand and it's relationship with DirectUpdateHandler2 was reworked substantially. (Moshe Bla, David Smiley)
+
 * SOLR-12362: Uploading docs in JSON now supports child documents as field values, thus providing a label to the
-  relationship instead of the current "anonymous" relationship.  Use of this experimental feature requires
-  anonChildDocs=false parameter.  (Moshe Bla, David Smiley)
+  relationship instead of the current "anonymous" relationship.  Use of this experimental feature sometimes requires a
+  anonChildDocs=false parameter until Solr 8 due to syntax ambiguities.  (Moshe Bla, David Smiley)
+
+* SOLR-12485: Uploading docs in XML now supports child documents as field values, thus providing a label to the
+  relationship instead of the current "anonymous" relationship. (Moshe Bla, David Smiley)
+
+* SOLR-12441: (EXPERIMENTAL) New NestedUpdateProcessorFactory (URP) to populate special fields _nest_parent_ and
+  _nest_path_ of nested (child) documents. It will generate a uniqueKey of nested docs if they were blank too.
+  (Moshe Bla, David Smiley)
+
+* SOLR-12519: The [child] transformer now returns a nested child doc structure (attached as fields if provided this way)
+  provided the schema has the _nest_path_ field.  This is part of a broader enhancement of nested docs.
+  (Moshe Bla, David Smiley)
+
+* SOLR-12722: The [child] transformer now takes an 'fl' param to specify which fields to return.  It will evaluate
+  doc transformers if present.  In 7.5 a missing 'fl' defaults to the current behavior of all fields, but in 8.0
+  defaults to the top/request "fl". (Moshe Bla, David Smiley)
 
 * SOLR-11578: Solr 7 Admin UI (Cloud > Graph) should reflect the Replica type to give a more accurate representation
   of the cluster. (Rhoit Singh via Erick Erickson)
@@ -155,9 +176,6 @@ New Features
 
 * SOLR-12495: An #EQUAL function for replica in autoscaling policy to equally distribute replicas (noble)
 
-* SOLR-12441: New NestedUpdateProcessorFactory (URP) to populate special fields _nest_parent_ and _nest_path_ of nested
-  (child) documents.  It will generate a uniqueKey of nested docs if they were blank too. (Moshe Bla, David Smiley)
-
 * SOLR-11986: Allow percentage in freedisk attribute in autoscaling policy rules (noble)
 
 * SOLR-12522: Support a runtime function `#ALL` for 'replica' in autoscaling policies (noble)
@@ -185,16 +203,9 @@ New Features
 
 * SOLR-12592: support #EQUAL function, range operator, decimal and percentage in cores in autoscaling policies (noble)
 
-* SOLR-12485: Uploading docs in XML now supports child documents as field values, thus providing a label to the
-  relationship instead of the current "anonymous" relationship. (Moshe Bla, David Smiley)
-
 * SOLR-12655: Add Korean morphological analyzer ("nori") to default distribution. This also adds examples
   for configuration in Solr's schema.  (Uwe Schindler)
 
-* SOLR-12519: The [child] transformer now returns a nested child doc structure (attached as fields if provided this way)
-  provided the schema is enabled for nested documents.  This is part of a broader enhancement of nested docs.
-  (Moshe Bla, David Smiley)
-
 * SOLR-11863: Add knnRegress Stream Evaluator to support nearest neighbor regression (Joel Bernstein)
 
 * SOLR-12702: Add zscores Stream Evaluator (Joel Bernstein)
@@ -219,10 +230,6 @@ New Features
 * SOLR-12716: NodeLostTrigger should support deleting replicas from lost nodes by setting preferredOperation=deletenode.
   (shalin)
 
-* SOLR-12722: The [child] transformer now takes an 'fl' param to specify which fields to return.  It will evaluate
-  doc transformers if present.  In 7.5 a missing 'fl' defaults to the current behavior of all fields, but in 8.0
-  defaults to the top/request "fl". (Moshe Bla, David Smiley)
-
 * SOLR-9418: Added a new (experimental) PhrasesIdentificationComponent for identifying potential phrases
   in query input based on overlapping shingles in the index. (Akash Mehta, Trey Grainger, hossman)
 
@@ -381,12 +388,6 @@ Optimizations
 Other Changes
 ----------------------
 
-* SOLR-12361: Allow nested child documents to be in field values of a SolrInputDocument as an alternative to
-  add/get ChildDocuments off to the side.  The latter is now referred to as "anonymous" child documents as opposed to
-  "labelled" (by the field name).  Anonymous child docs might be deprecated in the future.
-  This is an internal change not yet plumbed into /update formats.
-  AddUpdateCommand and it's relationship with DirectUpdateHandler2 was reworked substantially. (Moshe Bla, David Smiley)
-
 * SOLR-12208: Renamed the autoscaling variable 'INDEX.sizeInBytes' to 'INDEX.sizeInGB' (noble)
 
 * SOLR-12523: Improve error reporting and docs regarding Collection backup feature shared-fs requirement (janhoy)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6e8c05f6/solr/solr-ref-guide/src/uploading-data-with-index-handlers.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/uploading-data-with-index-handlers.adoc b/solr/solr-ref-guide/src/uploading-data-with-index-handlers.adoc
index 0f523d8..93ffdc2 100644
--- a/solr/solr-ref-guide/src/uploading-data-with-index-handlers.adoc
+++ b/solr/solr-ref-guide/src/uploading-data-with-index-handlers.adoc
@@ -541,27 +541,57 @@ The `/update/csv` path may be useful for clients sending in CSV formatted update
 
 == Nested Child Documents
 
-Solr indexes nested documents in blocks as a way to model documents containing other documents, such as a blog post parent document and comments as child documents -- or products as parent documents and sizes, colors, or other variations as child documents. At query time, the <<other-parsers.adoc#block-join-query-parsers,Block Join Query Parsers>> can search these relationships. In terms of performance, indexing the relationships between documents may be more efficient than attempting to do joins only at query time, since the relationships are already stored in the index and do not need to be computed.
+Solr supports indexing nested documents such as a blog post parent document and comments as child documents -- or products as parent documents and sizes, colors, or other variations as child documents.
+The parent with all children is referred to as a "block" and it explains some of the nomenclature of related features.
+At query time, the <<other-parsers.adoc#block-join-query-parsers,Block Join Query Parsers>> can search these relationships,
+ and the `[child]` <<transforming-result-documents.adoc#transforming-result-documents,Document Transformer>> can attach child documents to the result documents.
+In terms of performance, indexing the relationships between documents usually yields much faster queries than an equivalent "query time join",
+ since the relationships are already stored in the index and do not need to be computed.
+However, nested documents are less flexible than query time joins as it imposes rules that some applications may not be able to accept.
 
-Nested documents may be indexed via either the XML or JSON data syntax (or using <<using-solrj.adoc#using-solrj,SolrJ)>> - but regardless of syntax, you must include a field that identifies the parent document as a parent; it can be any field that suits this purpose, and it will be used as input for the <<other-parsers.adoc#block-join-query-parsers,block join query parsers>>.
+.Note
+[NOTE]
+====
+A big limitation is that the whole block of parent-children documents must be updated or deleted together, not separately.
+In other words, even if a single child document or the parent document is changed, the whole block of parent-child documents must be indexed together.
+_Solr does not enforce this rule_; if it's violated, you may get sporadic query failures or incorrect results.
+====
+
+Nested documents may be indexed via either the XML or JSON data syntax, and is also supported by <<using-solrj.adoc#using-solrj,SolrJ>> with javabin.
 
-To support nested documents, the schema must include an indexed/non-stored field `\_root_`. The value of that field is populated automatically and is the same for all documents in the block, regardless of the inheritance depth.
+=== Schema Notes
+
+ * The schema must include an indexed, non-stored field `\_root_`. The value of that field is populated automatically and is the same for all documents in the block, regardless of the inheritance depth.
+ * Nested documents are very much documents in their own right even if certain nested documents hold different information from the parent.
+   Therefore:
+ ** the schema must be able to represent the fields of any document
+ ** it may be infeasible to use `required`
+ ** even child documents need a unique `id`
+ * You must include a field that identifies the parent document as a parent; it can be any field that suits this purpose, and it will be used as input for the <<other-parsers.adoc#block-join-query-parsers,block join query parsers>>.
+ * If you associate a child document as a field (e.g. comment), that field need not be defined in the schema, and probably
+   shouldn't be as it would be confusing.  There is no child document field type.
 
 === XML Examples
 
-For example, here are two documents and their child documents:
+For example, here are two documents and their child documents.
+It illustrates two styles of adding child documents; the first is associated via a field "comment" (preferred),
+and the second is done in the classic way now referred to as an "anonymous" or "unlabelled" child document.
+This field label relationship is available to the URP chain in Solr but is ultimately discarded.
+Solr 8 will save the relationship.
 
 [source,xml]
 ----
 <add>
   <doc>
-  <field name="id">1</field>
-  <field name="title">Solr adds block join support</field>
-  <field name="content_type">parentDocument</field>
-    <doc>
-      <field name="id">2</field>
-      <field name="comments">SolrCloud supports it too!</field>
-    </doc>
+    <field name="id">1</field>
+    <field name="title">Solr adds block join support</field>
+    <field name="content_type">parentDocument</field>
+    <field name="content">
+      <doc>
+        <field name="id">2</field>
+        <field name="comments">SolrCloud supports it too!</field>
+      </doc>
+    </field>
   </doc>
   <doc>
     <field name="id">3</field>
@@ -575,11 +605,15 @@ For example, here are two documents and their child documents:
 </add>
 ----
 
-In this example, we have indexed the parent documents with the field `content_type`, which has the value "parentDocument". We could have also used a boolean field, such as `isParent`, with a value of "true", or any other similar approach.
+In this example, we have indexed the parent documents with the field `content_type`, which has the value "parentDocument".
+We could have also used a boolean field, such as `isParent`, with a value of "true", or any other similar approach.
 
 === JSON Examples
 
-This example is equivalent to the XML example above, note the special `\_childDocuments_` key need to indicate the nested documents in JSON.
+This example is equivalent to the XML example above.
+Again, the field labelled relationship is preferred.
+The labelled relationship here is one child document but could have been wrapped in array brackets.
+For the anonymous relationship, note the special `\_childDocuments_` key whose contents must be an array of child documents.
 
 [source,json]
 ----
@@ -588,12 +622,10 @@ This example is equivalent to the XML example above, note the special `\_childDo
     "id": "1",
     "title": "Solr adds block join support",
     "content_type": "parentDocument",
-    "_childDocuments_": [
-      {
-        "id": "2",
-        "comments": "SolrCloud supports it too!"
-      }
-    ]
+    "comment": {
+      "id": "2",
+      "comments": "SolrCloud supports it too!"
+    }
   },
   {
     "id": "3",
@@ -609,8 +641,3 @@ This example is equivalent to the XML example above, note the special `\_childDo
 ]
 ----
 
-.Note
-[NOTE]
-====
-One limitation of indexing nested documents is that the whole block of parent-children documents must be updated together whenever any changes are required. In other words, even if a single child document or the parent document is changed, the whole block of parent-child documents must be indexed together.
-====


[07/43] lucene-solr:jira/http2: LUCENE-8483: Scorer cannot have a null Weight

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/EnumFieldSource.java
----------------------------------------------------------------------
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/EnumFieldSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/EnumFieldSource.java
index 2667660..21dbfab 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/EnumFieldSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/EnumFieldSource.java
@@ -25,6 +25,7 @@ import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSourceScorer;
 import org.apache.lucene.queries.function.docvalues.IntDocValues;
+import org.apache.lucene.search.Weight;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.util.mutable.MutableValueInt;
 
@@ -135,7 +136,7 @@ public class EnumFieldSource extends FieldCacheSource {
       }
 
       @Override
-      public ValueSourceScorer getRangeScorer(LeafReaderContext readerContext, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) {
+      public ValueSourceScorer getRangeScorer(Weight weight, LeafReaderContext readerContext, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) {
         Integer lower = stringValueToIntValue(lowerVal);
         Integer upper = stringValueToIntValue(upperVal);
 
@@ -156,7 +157,7 @@ public class EnumFieldSource extends FieldCacheSource {
         final int ll = lower;
         final int uu = upper;
 
-        return new ValueSourceScorer(readerContext, this) {
+        return new ValueSourceScorer(weight, readerContext, this) {
           @Override
           public boolean matches(int doc) throws IOException {
             if (!exists(doc)) return false;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
index 023aed6..e937370 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
@@ -35,15 +35,13 @@ import org.apache.lucene.index.IndexReaderContext;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.ReaderUtil;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.FieldComparator;
 import org.apache.lucene.search.LeafFieldComparator;
 import org.apache.lucene.search.MatchNoDocsQuery;
 import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Scorable;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.Weight;
 import org.apache.lucene.search.grouping.GroupDocs;
 import org.apache.lucene.search.grouping.SearchGroup;
 import org.apache.lucene.search.grouping.TopGroups;
@@ -469,7 +467,7 @@ public class QueryComponent extends SearchComponent
           }
 
           doc -= currentLeaf.docBase;  // adjust for what segment this is in
-          leafComparator.setScorer(new FakeScorer(doc, score));
+          leafComparator.setScorer(new ScoreAndDoc(doc, score));
           leafComparator.copy(0, doc);
           Object val = comparator.value(0);
           if (null != ft) val = ft.marshalSortValue(val);
@@ -1461,12 +1459,11 @@ public class QueryComponent extends SearchComponent
    *
    * TODO: when SOLR-5595 is fixed, this wont be needed, as we dont need to recompute sort values here from the comparator
    */
-  protected static class FakeScorer extends Scorer {
+  protected static class ScoreAndDoc extends Scorable {
     final int docid;
     final float score;
 
-    FakeScorer(int docid, float score) {
-      super(null);
+    ScoreAndDoc(int docid, float score) {
       this.docid = docid;
       this.score = score;
     }
@@ -1480,25 +1477,5 @@ public class QueryComponent extends SearchComponent
     public float score() throws IOException {
       return score;
     }
-
-    @Override
-    public float getMaxScore(int upTo) throws IOException {
-      return Float.POSITIVE_INFINITY;
-    }
-
-    @Override
-    public DocIdSetIterator iterator() {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public Weight getWeight() {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public Collection<ChildScorable> getChildren() {
-      throw new UnsupportedOperationException();
-    }
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
index 54664fb..08867db 100644
--- a/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
+++ b/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
@@ -56,7 +56,6 @@ import org.apache.lucene.search.LeafFieldComparator;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Scorable;
 import org.apache.lucene.search.ScoreMode;
-import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.util.ArrayUtil;
@@ -434,36 +433,18 @@ public class CollapsingQParserPlugin extends QParserPlugin {
   }
 
 
-  private static class DummyScorer extends Scorer {
+  private static class ScoreAndDoc extends Scorable {
 
     public float score;
     public int docId;
 
-    public DummyScorer() {
-      super(null);
-    }
-
     public float score() {
       return score;
     }
 
-    @Override
-    public float getMaxScore(int upTo) throws IOException {
-      return Float.POSITIVE_INFINITY;
-    }
-
-    public int freq() {
-      return 0;
-    }
-
     public int docID() {
       return docId;
     }
-
-    @Override
-    public DocIdSetIterator iterator() {
-      throw new UnsupportedOperationException();
-    }
   }
 
 
@@ -647,7 +628,7 @@ public class CollapsingQParserPlugin extends QParserPlugin {
 
       int nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
       leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
-      DummyScorer dummy = new DummyScorer();
+      ScoreAndDoc dummy = new ScoreAndDoc();
       leafDelegate.setScorer(dummy);
       DocIdSetIterator it = new BitSetIterator(collapsedSet, 0L); // cost is not useful here
       int docId = -1;
@@ -850,7 +831,7 @@ public class CollapsingQParserPlugin extends QParserPlugin {
       collapseValues = DocValues.getNumeric(contexts[currentContext].reader(), this.field);
       int nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
       leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
-      DummyScorer dummy = new DummyScorer();
+      ScoreAndDoc dummy = new ScoreAndDoc();
       leafDelegate.setScorer(dummy);
       DocIdSetIterator it = new BitSetIterator(collapsedSet, 0L); // cost is not useful here
       int globalDoc = -1;
@@ -1022,7 +1003,7 @@ public class CollapsingQParserPlugin extends QParserPlugin {
 
       int nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
       leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
-      DummyScorer dummy = new DummyScorer();
+      ScoreAndDoc dummy = new ScoreAndDoc();
       leafDelegate.setScorer(dummy);
       DocIdSetIterator it = new BitSetIterator(collapseStrategy.getCollapsedSet(), 0); // cost is not useful here
       int globalDoc = -1;
@@ -1181,7 +1162,7 @@ public class CollapsingQParserPlugin extends QParserPlugin {
       this.collapseValues = DocValues.getNumeric(contexts[currentContext].reader(), this.collapseField);
       int nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
       leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
-      DummyScorer dummy = new DummyScorer();
+      ScoreAndDoc dummy = new ScoreAndDoc();
       leafDelegate.setScorer(dummy);
       DocIdSetIterator it = new BitSetIterator(collapseStrategy.getCollapsedSet(), 0); // cost is not useful here
       int globalDoc = -1;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/solr/core/src/java/org/apache/solr/search/Filter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/Filter.java b/solr/core/src/java/org/apache/solr/search/Filter.java
index 3af83e2..847ef46 100644
--- a/solr/core/src/java/org/apache/solr/search/Filter.java
+++ b/solr/core/src/java/org/apache/solr/search/Filter.java
@@ -89,7 +89,7 @@ public abstract class Filter extends Query {
   //
 
   @Override
-  public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
+  public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) {
     return new Weight(this) {
 
       @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/solr/core/src/java/org/apache/solr/search/FunctionRangeQuery.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/FunctionRangeQuery.java b/solr/core/src/java/org/apache/solr/search/FunctionRangeQuery.java
index c5c6205..fdcdfc3 100644
--- a/solr/core/src/java/org/apache/solr/search/FunctionRangeQuery.java
+++ b/solr/core/src/java/org/apache/solr/search/FunctionRangeQuery.java
@@ -24,10 +24,13 @@ import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.ValueSourceScorer;
 import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreMode;
+import org.apache.lucene.search.Weight;
 import org.apache.solr.search.function.ValueSourceRangeFilter;
 
 // This class works as either a normal constant score query, or as a PostFilter using a collector
 public class FunctionRangeQuery extends SolrConstantScoreQuery implements PostFilter {
+
   final ValueSourceRangeFilter rangeFilt;
 
   public FunctionRangeQuery(ValueSourceRangeFilter filter) {
@@ -39,16 +42,19 @@ public class FunctionRangeQuery extends SolrConstantScoreQuery implements PostFi
   @Override
   public DelegatingCollector getFilterCollector(IndexSearcher searcher) {
     Map fcontext = ValueSource.newContext(searcher);
-    return new FunctionRangeCollector(fcontext);
+    Weight weight = rangeFilt.createWeight(searcher, ScoreMode.COMPLETE, 1);
+    return new FunctionRangeCollector(fcontext, weight);
   }
 
   class FunctionRangeCollector extends DelegatingCollector {
     final Map fcontext;
+    final Weight weight;
     ValueSourceScorer scorer;
     int maxdoc;
 
-    public FunctionRangeCollector(Map fcontext) {
+    public FunctionRangeCollector(Map fcontext, Weight weight) {
       this.fcontext = fcontext;
+      this.weight = weight;
     }
 
     @Override
@@ -64,7 +70,7 @@ public class FunctionRangeQuery extends SolrConstantScoreQuery implements PostFi
       super.doSetNextReader(context);
       maxdoc = context.reader().maxDoc();
       FunctionValues dv = rangeFilt.getValueSource().getValues(fcontext, context);
-      scorer = dv.getRangeScorer(context, rangeFilt.getLowerVal(), rangeFilt.getUpperVal(), rangeFilt.isIncludeLower(), rangeFilt.isIncludeUpper());
+      scorer = dv.getRangeScorer(weight, context, rangeFilt.getLowerVal(), rangeFilt.getUpperVal(), rangeFilt.isIncludeLower(), rangeFilt.isIncludeUpper());
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/solr/core/src/java/org/apache/solr/search/function/ValueSourceRangeFilter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/function/ValueSourceRangeFilter.java b/solr/core/src/java/org/apache/solr/search/function/ValueSourceRangeFilter.java
index 1fce97e..9f919ed 100644
--- a/solr/core/src/java/org/apache/solr/search/function/ValueSourceRangeFilter.java
+++ b/solr/core/src/java/org/apache/solr/search/function/ValueSourceRangeFilter.java
@@ -21,7 +21,9 @@ import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.DocIdSet;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreMode;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
 import org.apache.lucene.util.Bits;
 import org.apache.solr.search.BitsFilteredDocIdSet;
 import org.apache.solr.search.SolrFilter;
@@ -77,10 +79,13 @@ public class ValueSourceRangeFilter extends SolrFilter {
 
   @Override
   public DocIdSet getDocIdSet(final Map context, final LeafReaderContext readerContext, Bits acceptDocs) throws IOException {
-     return BitsFilteredDocIdSet.wrap(new DocIdSet() {
+    // NB the IndexSearcher parameter here can be null because Filter Weights don't
+    // actually use it.
+    Weight weight = createWeight(null, ScoreMode.COMPLETE, 1);
+    return BitsFilteredDocIdSet.wrap(new DocIdSet() {
        @Override
        public DocIdSetIterator iterator() throws IOException {
-         Scorer scorer = valueSource.getValues(context, readerContext).getRangeScorer(readerContext, lowerVal, upperVal, includeLower, includeUpper);
+         Scorer scorer = valueSource.getValues(context, readerContext).getRangeScorer(weight, readerContext, lowerVal, upperVal, includeLower, includeUpper);
          return scorer == null ? null : scorer.iterator();
        }
        @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66c671ea/solr/core/src/test/org/apache/solr/search/TestRankQueryPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestRankQueryPlugin.java b/solr/core/src/test/org/apache/solr/search/TestRankQueryPlugin.java
index 587a889..a678110 100644
--- a/solr/core/src/test/org/apache/solr/search/TestRankQueryPlugin.java
+++ b/solr/core/src/test/org/apache/solr/search/TestRankQueryPlugin.java
@@ -20,7 +20,6 @@ import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.StringWriter;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashMap;
@@ -32,7 +31,6 @@ import org.apache.lucene.index.IndexReaderContext;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.ReaderUtil;
-import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.FieldComparator;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.LeafCollector;
@@ -41,7 +39,6 @@ import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Scorable;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.ScoreMode;
-import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.TopDocs;
@@ -424,7 +421,7 @@ public class TestRankQueryPlugin extends QParserPlugin {
             }
 
             doc -= currentLeaf.docBase;  // adjust for what segment this is in
-            leafComparator.setScorer(new FakeScorer(doc, score));
+            leafComparator.setScorer(new ScoreAndDoc(doc, score));
             leafComparator.copy(0, doc);
             Object val = comparator.value(0);
             if (null != ft) val = ft.marshalSortValue(val);
@@ -438,13 +435,12 @@ public class TestRankQueryPlugin extends QParserPlugin {
       }
     }
 
-    private static class FakeScorer extends Scorer {
+    private static class ScoreAndDoc extends Scorable {
 
       final int docid;
       final float score;
 
-      FakeScorer(int docid, float score) {
-        super(null);
+      ScoreAndDoc(int docid, float score) {
         this.docid = docid;
         this.score = score;
       }
@@ -455,29 +451,9 @@ public class TestRankQueryPlugin extends QParserPlugin {
       }
 
       @Override
-      public float score() throws IOException {
+      public float score() {
         return score;
       }
-
-      @Override
-      public float getMaxScore(int upTo) throws IOException {
-        return score;
-      }
-
-      @Override
-      public DocIdSetIterator iterator() {
-        throw new UnsupportedOperationException();
-      }
-
-      @Override
-      public Weight getWeight() {
-        throw new UnsupportedOperationException();
-      }
-
-      @Override
-      public Collection<ChildScorable> getChildren() {
-        throw new UnsupportedOperationException();
-      }
     }
 
     public void merge(ResponseBuilder rb, ShardRequest sreq) {


[14/43] lucene-solr:jira/http2: LUCENE-7862: Store the real bounds of the leaf cells in the BKD index when the number of dimensions is bigger than 1

Posted by da...@apache.org.
LUCENE-7862: Store the real bounds of the leaf cells in the BKD index when the number of dimensions is bigger than 1


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/f406ff91
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/f406ff91
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/f406ff91

Branch: refs/heads/jira/http2
Commit: f406ff91a8912f13a7652a2802084db1c0da5830
Parents: 3b62f23
Author: iverase <iv...@apache.org>
Authored: Sat Sep 8 15:42:30 2018 +0200
Committer: iverase <iv...@apache.org>
Committed: Sat Sep 8 15:42:30 2018 +0200

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |  4 ++
 .../org/apache/lucene/util/bkd/BKDReader.java   | 58 +++++++++++++++++---
 .../org/apache/lucene/util/bkd/BKDWriter.java   | 56 ++++++++++++++++---
 .../document/FloatPointNearestNeighbor.java     |  2 +-
 .../apache/lucene/search/NearestNeighbor.java   |  2 +-
 5 files changed, 103 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f406ff91/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 07163b3..846554e 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -298,6 +298,10 @@ Improvements
 
 * LUCENE-8422: IntervalQuery now returns useful Matches (Alan Woodward)
 
+* LUCENE-7862: Store the real bounds of the leaf cells in the BKD index when the
+  number of dimensions is bigger than 1. It improves performance when there is
+  correlation between the dimensions, for example ranges. (Ignacio Vera, Adrien Grand)
+
 Other:
 
 * LUCENE-8485: Update randomizedtesting to version 2.6.4. (Dawid Weiss)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f406ff91/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
index f6a9bf4..00a1d7d 100644
--- a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
+++ b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
@@ -325,7 +325,7 @@ public final class BKDReader extends PointValues implements Accountable {
   public static final class IntersectState {
     final IndexInput in;
     final int[] scratchDocIDs;
-    final byte[] scratchPackedValue;
+    final byte[] scratchPackedValue1, scratchPackedValue2;
     final int[] commonPrefixLengths;
 
     final IntersectVisitor visitor;
@@ -340,7 +340,8 @@ public final class BKDReader extends PointValues implements Accountable {
       this.visitor = visitor;
       this.commonPrefixLengths = new int[numDims];
       this.scratchDocIDs = new int[maxPointsInLeafNode];
-      this.scratchPackedValue = new byte[packedBytesLength];
+      this.scratchPackedValue1 = new byte[packedBytesLength];
+      this.scratchPackedValue2 = new byte[packedBytesLength];
       this.index = indexVisitor;
     }
   }
@@ -402,7 +403,7 @@ public final class BKDReader extends PointValues implements Accountable {
     int count = readDocIDs(state.in, index.getLeafBlockFP(), state.scratchDocIDs);
 
     // Again, this time reading values and checking with the visitor
-    visitDocValues(state.commonPrefixLengths, state.scratchPackedValue, state.in, state.scratchDocIDs, count, state.visitor);
+    visitDocValues(state.commonPrefixLengths, state.scratchPackedValue1, state.scratchPackedValue2, state.in, state.scratchDocIDs, count, state.visitor);
   }
 
   private void visitDocIDs(IndexInput in, long blockFP, IntersectVisitor visitor) throws IOException {
@@ -427,20 +428,59 @@ public final class BKDReader extends PointValues implements Accountable {
     return count;
   }
 
-  void visitDocValues(int[] commonPrefixLengths, byte[] scratchPackedValue, IndexInput in, int[] docIDs, int count, IntersectVisitor visitor) throws IOException {
-    visitor.grow(count);
+  void visitDocValues(int[] commonPrefixLengths, byte[] scratchPackedValue1, byte[] scratchPackedValue2, IndexInput in, int[] docIDs, int count, IntersectVisitor visitor) throws IOException {
+
+
+    readCommonPrefixes(commonPrefixLengths, scratchPackedValue1, in);
+
+    if (numDims != 1 && version >= BKDWriter.VERSION_LEAF_STORES_BOUNDS) {
+      byte[] minPackedValue = scratchPackedValue1;
+      byte[] maxPackedValue = scratchPackedValue2;
+      //Copy common prefixes before reading adjusted
+      // box
+      System.arraycopy(minPackedValue, 0, maxPackedValue, 0, packedBytesLength);
+      readMinMax(commonPrefixLengths, minPackedValue, maxPackedValue, in);
+
+      // The index gives us range of values for each dimension, but the actual range of values
+      // might be much more narrow than what the index told us, so we double check the relation
+      // here, which is cheap yet might help figure out that the block either entirely matches
+      // or does not match at all. This is especially more likely in the case that there are
+      // multiple dimensions that have correlation, ie. splitting on one dimension also
+      // significantly changes the range of values in another dimension.
+      Relation r = visitor.compare(minPackedValue, maxPackedValue);
+      if (r == Relation.CELL_OUTSIDE_QUERY) {
+        return;
+      }
+      visitor.grow(count);
+
+      if (r == Relation.CELL_INSIDE_QUERY) {
+        for (int i = 0; i < count; ++i) {
+          visitor.visit(docIDs[i]);
+        }
+        return;
+      }
+    } else {
+      visitor.grow(count);
+    }
 
-    readCommonPrefixes(commonPrefixLengths, scratchPackedValue, in);
 
     int compressedDim = readCompressedDim(in);
 
     if (compressedDim == -1) {
-      visitRawDocValues(commonPrefixLengths, scratchPackedValue, in, docIDs, count, visitor);
+      visitRawDocValues(commonPrefixLengths, scratchPackedValue1, in, docIDs, count, visitor);
     } else {
-      visitCompressedDocValues(commonPrefixLengths, scratchPackedValue, in, docIDs, count, visitor, compressedDim);
+      visitCompressedDocValues(commonPrefixLengths, scratchPackedValue1, in, docIDs, count, visitor, compressedDim);
     }
   }
 
+    private void readMinMax(int[] commonPrefixLengths, byte[] minPackedValue, byte[] maxPackedValue, IndexInput in) throws IOException {
+      for (int dim = 0; dim < numDims; dim++) {
+        int prefix = commonPrefixLengths[dim];
+        in.readBytes(minPackedValue, dim * bytesPerDim + prefix, bytesPerDim - prefix);
+        in.readBytes(maxPackedValue, dim * bytesPerDim + prefix, bytesPerDim - prefix);
+      }
+    }
+
   // Just read suffixes for every dimension
   private void visitRawDocValues(int[] commonPrefixLengths, byte[] scratchPackedValue, IndexInput in, int[] docIDs, int count, IntersectVisitor visitor) throws IOException {
     for (int i = 0; i < count; ++i) {
@@ -521,7 +561,7 @@ public final class BKDReader extends PointValues implements Accountable {
         int count = readDocIDs(state.in, state.index.getLeafBlockFP(), state.scratchDocIDs);
 
         // Again, this time reading values and checking with the visitor
-        visitDocValues(state.commonPrefixLengths, state.scratchPackedValue, state.in, state.scratchDocIDs, count, state.visitor);
+        visitDocValues(state.commonPrefixLengths, state.scratchPackedValue1, state.scratchPackedValue2, state.in, state.scratchDocIDs, count, state.visitor);
       }
 
     } else {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f406ff91/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
index a8b9813..014d470 100644
--- a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
@@ -39,6 +39,7 @@ import org.apache.lucene.store.RAMOutputStream;
 import org.apache.lucene.store.TrackingDirectoryWrapper;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
 import org.apache.lucene.util.BytesRefComparator;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.FutureArrays;
@@ -83,7 +84,9 @@ public class BKDWriter implements Closeable {
 
   public static final String CODEC_NAME = "BKD";
   public static final int VERSION_START = 4; // version used by Lucene 7.0
-  public static final int VERSION_CURRENT = VERSION_START;
+  //public static final int VERSION_CURRENT = VERSION_START;
+  public static final int VERSION_LEAF_STORES_BOUNDS = 5;
+  public static final int VERSION_CURRENT = VERSION_LEAF_STORES_BOUNDS;
 
   /** How many bytes each docs takes in the fixed-width offline format */
   private final int bytesPerDoc;
@@ -344,16 +347,16 @@ public class BKDWriter implements Closeable {
           docsInBlock = bkd.readDocIDs(state.in, state.in.getFilePointer(), state.scratchDocIDs);
           assert docsInBlock > 0;
           docBlockUpto = 0;
-          bkd.visitDocValues(state.commonPrefixLengths, state.scratchPackedValue, state.in, state.scratchDocIDs, docsInBlock, new IntersectVisitor() {
+          bkd.visitDocValues(state.commonPrefixLengths, state.scratchPackedValue1, state.scratchPackedValue2, state.in, state.scratchDocIDs, docsInBlock, new IntersectVisitor() {
             int i = 0;
 
             @Override
-            public void visit(int docID) throws IOException {
+            public void visit(int docID) {
               throw new UnsupportedOperationException();
             }
 
             @Override
-            public void visit(int docID, byte[] packedValue) throws IOException {
+            public void visit(int docID, byte[] packedValue) {
               assert docID == state.scratchDocIDs[i];
               System.arraycopy(packedValue, 0, packedValues, i * bkd.packedBytesLength, bkd.packedBytesLength);
               i++;
@@ -361,7 +364,7 @@ public class BKDWriter implements Closeable {
 
             @Override
             public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
-              throw new UnsupportedOperationException();
+              return Relation.CELL_CROSSES_QUERY;
             }
 
           });
@@ -382,7 +385,7 @@ public class BKDWriter implements Closeable {
         if (mappedDocID != -1) {
           // Not deleted!
           docID = mappedDocID;
-          System.arraycopy(packedValues, index * bkd.packedBytesLength, state.scratchPackedValue, 0, bkd.packedBytesLength);
+          System.arraycopy(packedValues, index * bkd.packedBytesLength, state.scratchPackedValue1, 0, bkd.packedBytesLength);
           return true;
         }
       }
@@ -401,7 +404,7 @@ public class BKDWriter implements Closeable {
     public boolean lessThan(MergeReader a, MergeReader b) {
       assert a != b;
 
-      int cmp = FutureArrays.compareUnsigned(a.state.scratchPackedValue, 0, bytesPerDim, b.state.scratchPackedValue, 0, bytesPerDim);
+      int cmp = FutureArrays.compareUnsigned(a.state.scratchPackedValue1, 0, bytesPerDim, b.state.scratchPackedValue1, 0, bytesPerDim);
       if (cmp < 0) {
         return true;
       } else if (cmp > 0) {
@@ -543,7 +546,7 @@ public class BKDWriter implements Closeable {
       MergeReader reader = queue.top();
       // System.out.println("iter reader=" + reader);
 
-      oneDimWriter.add(reader.state.scratchPackedValue, reader.docID);
+      oneDimWriter.add(reader.state.scratchPackedValue1, reader.docID);
 
       if (reader.next()) {
         queue.updateTop();
@@ -1272,6 +1275,9 @@ public class BKDWriter implements Closeable {
       // all values in this block are equal
       out.writeByte((byte) -1);
     } else {
+      if (numDims != 1) {
+        writeActualBounds(out, commonPrefixLengths, count, packedValues);
+      }
       assert commonPrefixLengths[sortedDim] < bytesPerDim;
       out.writeByte((byte) sortedDim);
       int compressedByteOffset = sortedDim * bytesPerDim + commonPrefixLengths[sortedDim];
@@ -1291,6 +1297,40 @@ public class BKDWriter implements Closeable {
     }
   }
 
+  private void writeActualBounds(DataOutput out, int[] commonPrefixLengths, int count, IntFunction<BytesRef> packedValues) throws IOException {
+    for (int dim = 0; dim < numDims; ++dim) {
+      int commonPrefixLength = commonPrefixLengths[dim];
+      int suffixLength = bytesPerDim - commonPrefixLength;
+      if (suffixLength > 0) {
+        BytesRef[] minMax = computeMinMax(count, packedValues, dim * bytesPerDim + commonPrefixLength, suffixLength);
+        BytesRef min = minMax[0];
+        BytesRef max = minMax[1];
+        out.writeBytes(min.bytes, min.offset, min.length);
+        out.writeBytes(max.bytes, max.offset, max.length);
+      }
+    }
+  }
+
+  /** Return an array that contains the min and max values for the [offset, offset+length] interval
+   *  of the given {@link BytesRef}s. */
+  private static BytesRef[] computeMinMax(int count, IntFunction<BytesRef> packedValues, int offset, int length) {
+    assert length > 0;
+    BytesRefBuilder min = new BytesRefBuilder();
+    BytesRefBuilder max = new BytesRefBuilder();
+    BytesRef first = packedValues.apply(0);
+    min.copyBytes(first.bytes, first.offset + offset, length);
+    max.copyBytes(first.bytes, first.offset + offset, length);
+    for (int i = 1; i < count; ++i) {
+      BytesRef candidate = packedValues.apply(i);
+      if (FutureArrays.compareUnsigned(min.bytes(), 0, length, candidate.bytes, candidate.offset + offset, candidate.offset + offset + length) > 0) {
+        min.copyBytes(candidate.bytes, candidate.offset + offset, length);
+      } else if (FutureArrays.compareUnsigned(max.bytes(), 0, length, candidate.bytes, candidate.offset + offset, candidate.offset + offset + length) < 0) {
+        max.copyBytes(candidate.bytes, candidate.offset + offset, length);
+      }
+    }
+    return new BytesRef[]{min.get(), max.get()};
+  }
+
   private void writeLeafBlockPackedValuesRange(DataOutput out, int[] commonPrefixLengths, int start, int end, IntFunction<BytesRef> packedValues) throws IOException {
     for (int i = start; i < end; ++i) {
       BytesRef ref = packedValues.apply(i);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f406ff91/lucene/sandbox/src/java/org/apache/lucene/document/FloatPointNearestNeighbor.java
----------------------------------------------------------------------
diff --git a/lucene/sandbox/src/java/org/apache/lucene/document/FloatPointNearestNeighbor.java b/lucene/sandbox/src/java/org/apache/lucene/document/FloatPointNearestNeighbor.java
index 8458746..38a4103 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/document/FloatPointNearestNeighbor.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/document/FloatPointNearestNeighbor.java
@@ -190,7 +190,7 @@ public class FloatPointNearestNeighbor {
 
     @Override
     public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
-      throw new AssertionError();
+      return PointValues.Relation.CELL_CROSSES_QUERY;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f406ff91/lucene/sandbox/src/java/org/apache/lucene/search/NearestNeighbor.java
----------------------------------------------------------------------
diff --git a/lucene/sandbox/src/java/org/apache/lucene/search/NearestNeighbor.java b/lucene/sandbox/src/java/org/apache/lucene/search/NearestNeighbor.java
index 8502c45..449d37d 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/search/NearestNeighbor.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/search/NearestNeighbor.java
@@ -178,7 +178,7 @@ class NearestNeighbor {
 
     @Override
     public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
-      throw new AssertionError();
+      return Relation.CELL_CROSSES_QUERY;
     }
   }
 


[22/43] lucene-solr:jira/http2: Add 7.6.0 version

Posted by da...@apache.org.
Add 7.6.0 version


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/3a71bf39
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/3a71bf39
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/3a71bf39

Branch: refs/heads/jira/http2
Commit: 3a71bf39d1af2e62d5a84b5a3a7daf6a874ce164
Parents: 1f2b344
Author: Jim Ferenczi <ji...@apache.org>
Authored: Mon Sep 10 16:05:53 2018 +0200
Committer: Jim Ferenczi <ji...@apache.org>
Committed: Mon Sep 10 16:05:53 2018 +0200

----------------------------------------------------------------------
 lucene/CHANGES.txt                                  |  3 +++
 .../src/java/org/apache/lucene/util/Version.java    |  7 +++++++
 solr/CHANGES.txt                                    | 16 ++++++++++++++++
 3 files changed, 26 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3a71bf39/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 846554e..a9b97a7 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -160,6 +160,9 @@ Optimizations
 * LUCENE-8448: Boolean queries now propagates the mininum score to their sub-scorers.
   (Jim Ferenczi, Adrien Grand)
 
+======================= Lucene 7.6.0 =======================
+(No Changes)
+
 ======================= Lucene 7.5.0 =======================
 
 API Changes:

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3a71bf39/lucene/core/src/java/org/apache/lucene/util/Version.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/util/Version.java b/lucene/core/src/java/org/apache/lucene/util/Version.java
index 80368da..808e56b 100644
--- a/lucene/core/src/java/org/apache/lucene/util/Version.java
+++ b/lucene/core/src/java/org/apache/lucene/util/Version.java
@@ -97,6 +97,13 @@ public final class Version {
   public static final Version LUCENE_7_5_0 = new Version(7, 5, 0);
 
   /**
+   * Match settings and bugs in Lucene's 7.6.0 release.
+   * @deprecated Use latest
+   */
+  @Deprecated
+  public static final Version LUCENE_7_6_0 = new Version(7, 6, 0);
+
+  /**
    * Match settings and bugs in Lucene's 8.0.0 release.
    * <p>
    * Use this to get the latest &amp; greatest settings, bug

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3a71bf39/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 797acfc..0d71204 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -71,6 +71,22 @@ Other Changes
   java.time.DateTimeFormatter instead of Joda time (see upgrade notes).  "Lenient" is enabled.  Removed Joda Time dependency.
   (David Smiley, Bar Rotstein)
 
+==================  7.6.0 ==================
+
+Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
+
+Versions of Major Components
+---------------------
+Apache Tika 1.18
+Carrot2 3.16.0
+Velocity 1.7 and Velocity Tools 2.0
+Apache ZooKeeper 3.4.11
+Jetty 9.4.11.v20180605
+
+
+(No Changes)
+
+
 ==================  7.5.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.


[41/43] lucene-solr:jira/http2: SOLR-12766: Backoff time for internal requests is never more than 2 seconds

Posted by da...@apache.org.
SOLR-12766: Backoff time for internal requests is never more than 2 seconds


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/9f37a6be
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/9f37a6be
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/9f37a6be

Branch: refs/heads/jira/http2
Commit: 9f37a6be9bea011a98d769692560ea41d6fb3d08
Parents: dea3d69
Author: Tomas Fernandez Lobbe <tf...@apache.org>
Authored: Thu Sep 13 08:43:10 2018 -0700
Committer: Tomas Fernandez Lobbe <tf...@apache.org>
Committed: Thu Sep 13 08:43:10 2018 -0700

----------------------------------------------------------------------
 solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9f37a6be/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java b/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
index 3a65f17..d7388f0 100644
--- a/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
+++ b/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
@@ -170,7 +170,7 @@ public class SolrCmdDistributor implements Closeable {
     if (resubmitList.size() > 0) {
       // Only backoff once for the full batch
       try {
-        int backoffTime = retryPause * resubmitList.get(0).req.retries;
+        int backoffTime = Math.min(retryPause * resubmitList.get(0).req.retries, 2000);
         log.debug("Sleeping {}ms before re-submitting {} requests", backoffTime, resubmitList.size());
         Thread.sleep(backoffTime);
       } catch (InterruptedException e) {


[35/43] lucene-solr:jira/http2: Fix typo in build.xml so PDF javadoc links get converted correctly

Posted by da...@apache.org.
Fix typo in build.xml so PDF javadoc links get converted correctly


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/ad7f15d8
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/ad7f15d8
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/ad7f15d8

Branch: refs/heads/jira/http2
Commit: ad7f15d808232572c8755967559f440c742a2352
Parents: 5b96f89
Author: Cassandra Targett <ct...@apache.org>
Authored: Wed Sep 12 15:03:43 2018 -0500
Committer: Cassandra Targett <ct...@apache.org>
Committed: Wed Sep 12 15:03:43 2018 -0500

----------------------------------------------------------------------
 solr/solr-ref-guide/build.xml | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ad7f15d8/solr/solr-ref-guide/build.xml
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/build.xml b/solr/solr-ref-guide/build.xml
index 08fbc40..fb6a36a 100644
--- a/solr/solr-ref-guide/build.xml
+++ b/solr/solr-ref-guide/build.xml
@@ -101,7 +101,7 @@
   <!-- for the PDF guide, we always use absolute javadoc urls -->
   <property name="pdf-solr-javadocs" value="https://lucene.apache.org/solr/${solr-docs-version-path}/" />
   <property name="pdf-lucene-javadocs" value="https://lucene.apache.org/core/${solr-docs-version-path}/" />
-  
+
   <property name="build.content.dir" location="${build.dir}/content" />
   <property name="main-page" value="index" />
   <property name="pdf-filename" value="apache-solr-ref-guide-${solr-guide-version}.pdf" />
@@ -192,7 +192,7 @@
     <attribute name="outputDirectory"/>
     <attribute name="backend"/>
     <attribute name="solr-javadocs" default="${pdf-solr-javadocs}" />
-    <attribute name="lucene-javadocs" default="#{pdf-lucene-javadocs}" />
+    <attribute name="lucene-javadocs" default="${pdf-lucene-javadocs}" />
     <attribute name="headerFooter" default="true" />
     <sequential>
       <!-- NOTE: we have our own variant on the asciidoctor-ant task, so that sourceDocumentName=""
@@ -247,7 +247,7 @@
     </sequential>
   </macrodef>
 
-  
+
   <!-- ====== PDF Build ======= -->
   <target name="build-pdf" depends="bare-bones-html-validation,-build-pdf-and-reduce-pdf"
           description="Builds the PDF (after building &amp; validating a bare-bones html version)" />
@@ -265,7 +265,7 @@
                          />
     <move file="${build.dir}/pdf-tmp/SolrRefGuide-all.pdf" tofile="${build.dir}/pdf-tmp/RAW-${pdf-filename}" />
   </target>
-  
+
   <target name="-reduce-pdf-size" depends="build-init,build-tools-jar">
     <java classname="ReducePDFSize"
           failonerror="true"
@@ -300,16 +300,16 @@
       <arg value="build"/>
     </exec>
   </target>
-  
+
   <!-- ======= HTML Bare Bones Conversion =======
        Does a very raw converstion of the adoc files to HTML for the purpose of link & anchor checking
-       
+
        Unlike the "HTML Site Build" above, this does *NOT* require Jekyll, and can be done entirely
        With ivy deps fetched automatically (just like the PDF)
        -->
   <target name="bare-bones-html-validation" depends="build-init,build-nav-data-files"
           description="Builds (w/o Jekyll) a very simple html version of the guide and runs link/anchor validation on it">
-    
+
     <delete dir="${build.dir}/bare-bones-html"/>
     <mkdir dir="${build.dir}/bare-bones-html"/>
     <asciidoctor-convert sourceDirectory="${build.content.dir}"
@@ -341,6 +341,6 @@
     <echo>SITE: ${build.dir}/html-site/${main-page}.html</echo>
   </target>
 
-  
-  
+
+
 </project>


[11/43] lucene-solr:jira/http2: SOLR-11943: Change location... to latlon...

Posted by da...@apache.org.
SOLR-11943: Change location... to latlon...


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/f5ce384f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/f5ce384f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/f5ce384f

Branch: refs/heads/jira/http2
Commit: f5ce384fb8e0c44f833344727740d6e92753417c
Parents: 2c88922
Author: Joel Bernstein <jb...@apache.org>
Authored: Fri Sep 7 15:42:03 2018 -0400
Committer: Joel Bernstein <jb...@apache.org>
Committed: Fri Sep 7 15:42:34 2018 -0400

----------------------------------------------------------------------
 .../org/apache/solr/client/solrj/io/Lang.java   |   2 +-
 .../solrj/io/eval/LatLonVectorsEvaluator.java   | 115 +++++++++++++++++++
 .../solrj/io/eval/LocationVectorsEvaluator.java | 105 -----------------
 .../apache/solr/client/solrj/io/TestLang.java   |   2 +-
 .../solrj/io/stream/MathExpressionTest.java     |   4 +-
 5 files changed, 119 insertions(+), 109 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f5ce384f/solr/solrj/src/java/org/apache/solr/client/solrj/io/Lang.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/Lang.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/Lang.java
index 9568ecb..3f24e7b 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/Lang.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/Lang.java
@@ -255,7 +255,7 @@ public class Lang {
         .withFunctionName("removeCache", RemoveCacheEvaluator.class)
         .withFunctionName("listCache", ListCacheEvaluator.class)
         .withFunctionName("zscores", NormalizeEvaluator.class)
-        .withFunctionName("locationVectors", LocationVectorsEvaluator.class)
+        .withFunctionName("latlonVectors", LatLonVectorsEvaluator.class)
 
         // Boolean Stream Evaluators
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f5ce384f/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/LatLonVectorsEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/LatLonVectorsEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/LatLonVectorsEvaluator.java
new file mode 100644
index 0000000..de8168a
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/LatLonVectorsEvaluator.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.ArrayList;
+
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionNamedParameter;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+/**
+ * The LatLonVectorsEvaluator maps to the latlonVectors Math Expression. The latlonVectors expression
+ * takes a list of Tuples that contain a lat,lon point field and a named parameter called field
+ * as input. The field parameter specifies which field to parse the lat,lon vectors from in the Tuples.
+ *
+ * The latlonVectors function returns a matrix of lat,lon vectors. Each row in the matrix
+ * contains a single lat,lon pair.
+ *
+ **/
+
+public class LatLonVectorsEvaluator extends RecursiveObjectEvaluator implements ManyValueWorker {
+  protected static final long serialVersionUID = 1L;
+
+  private String field;
+
+  public LatLonVectorsEvaluator(StreamExpression expression, StreamFactory factory) throws IOException {
+    super(expression, factory);
+
+    List<StreamExpressionNamedParameter> namedParams = factory.getNamedOperands(expression);
+
+    for (StreamExpressionNamedParameter namedParam : namedParams) {
+      if(namedParam.getName().equals("field")) {
+        this.field = namedParam.getParameter().toString();
+      } else {
+        throw new IOException("Unexpected named parameter:" + namedParam.getName());
+      }
+    }
+
+    if(field == null) {
+      throw new IOException("The named parameter \"field\" must be set for the latlonVectors function.");
+    }
+  }
+
+  @Override
+  public Object doWork(Object... objects) throws IOException {
+
+    if (objects.length == 1) {
+      //Just docs
+      if(!(objects[0] instanceof List)) {
+        throw new IOException("The latlonVectors function expects a list of Tuples as a parameter.");
+      } else {
+        List list = (List)objects[0];
+        if(list.size() > 0) {
+          Object o = list.get(0);
+          if(!(o instanceof Tuple)) {
+            throw new IOException("The latlonVectors function expects a list of Tuples as a parameter.");
+          }
+        } else {
+          throw new IOException("Empty list was passed as a parameter to termVectors function.");
+        }
+      }
+
+      List<Tuple> tuples = (List<Tuple>) objects[0];
+
+      double[][] locationVectors = new double[tuples.size()][2];
+      List<String> features = new ArrayList();
+      features.add("lat");
+      features.add("lon");
+
+      List<String> rowLabels = new ArrayList();
+
+      for(int i=0; i< tuples.size(); i++) {
+        Tuple tuple = tuples.get(i);
+        String value = tuple.getString(field);
+        String[] latLong = null;
+        if(value.contains(",")) {
+          latLong = value.split(",");
+        } else {
+          latLong = value.split(" ");
+        }
+
+        locationVectors[i][0] = Double.parseDouble(latLong[0].trim());
+        locationVectors[i][1] = Double.parseDouble(latLong[1].trim());
+        if(tuple.get("id") != null) {
+          rowLabels.add(tuple.get("id").toString());
+        }
+      }
+
+      Matrix matrix = new Matrix(locationVectors);
+      matrix.setColumnLabels(features);
+      matrix.setRowLabels(rowLabels);
+      return matrix;
+    } else {
+      throw new IOException("The latlonVectors function takes a single positional parameter.");
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f5ce384f/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/LocationVectorsEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/LocationVectorsEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/LocationVectorsEvaluator.java
deleted file mode 100644
index 0c1ba99..0000000
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/LocationVectorsEvaluator.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.client.solrj.io.eval;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.ArrayList;
-
-import org.apache.solr.client.solrj.io.Tuple;
-import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
-import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionNamedParameter;
-import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
-
-public class LocationVectorsEvaluator extends RecursiveObjectEvaluator implements ManyValueWorker {
-  protected static final long serialVersionUID = 1L;
-
-  private String field;
-
-  public LocationVectorsEvaluator(StreamExpression expression, StreamFactory factory) throws IOException {
-    super(expression, factory);
-
-    List<StreamExpressionNamedParameter> namedParams = factory.getNamedOperands(expression);
-
-    for (StreamExpressionNamedParameter namedParam : namedParams) {
-      if(namedParam.getName().equals("field")) {
-        this.field = namedParam.getParameter().toString();
-      } else {
-        throw new IOException("Unexpected named parameter:" + namedParam.getName());
-      }
-    }
-
-    if(field == null) {
-      throw new IOException("The named parameter \"field\" must be set for the locationVectors function.");
-    }
-  }
-
-  @Override
-  public Object doWork(Object... objects) throws IOException {
-
-    if (objects.length == 1) {
-      //Just docs
-      if(!(objects[0] instanceof List)) {
-        throw new IOException("The locationVectors function expects a list of Tuples as a parameter.");
-      } else {
-        List list = (List)objects[0];
-        if(list.size() > 0) {
-          Object o = list.get(0);
-          if(!(o instanceof Tuple)) {
-            throw new IOException("The locationVectors function expects a list of Tuples as a parameter.");
-          }
-        } else {
-          throw new IOException("Empty list was passed as a parameter to termVectors function.");
-        }
-      }
-
-      List<Tuple> tuples = (List<Tuple>) objects[0];
-
-      double[][] locationVectors = new double[tuples.size()][2];
-      List<String> features = new ArrayList();
-      features.add("lat");
-      features.add("long");
-
-      List<String> rowLabels = new ArrayList();
-
-      for(int i=0; i< tuples.size(); i++) {
-        Tuple tuple = tuples.get(i);
-        String value = tuple.getString(field);
-        String[] latLong = null;
-        if(value.contains(",")) {
-          latLong = value.split(",");
-        } else {
-          latLong = value.split(" ");
-        }
-
-        locationVectors[i][0] = Double.parseDouble(latLong[0].trim());
-        locationVectors[i][1] = Double.parseDouble(latLong[1].trim());
-        if(tuple.get("id") != null) {
-          rowLabels.add(tuple.get("id").toString());
-        }
-      }
-
-      Matrix matrix = new Matrix(locationVectors);
-      matrix.setColumnLabels(features);
-      matrix.setRowLabels(rowLabels);
-      return matrix;
-    } else {
-      throw new IOException("The termVectors function takes a single positional parameter.");
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f5ce384f/solr/solrj/src/test/org/apache/solr/client/solrj/io/TestLang.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/TestLang.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/TestLang.java
index ee8c1e1..12ff2e9 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/TestLang.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/TestLang.java
@@ -70,7 +70,7 @@ public class TestLang extends LuceneTestCase {
       "mod", "ceil", "floor", "sin", "asin", "sinh", "cos", "acos", "cosh", "tan", "atan", "tanh", "round", "sqrt",
       "cbrt", "coalesce", "uuid", "if", "convert", "valueAt", "memset", "fft", "ifft", "euclidean","manhattan",
       "earthMovers", "canberra", "chebyshev", "ones", "zeros", "setValue", "getValue", "knnRegress", "gaussfit",
-      "outliers", "stream", "getCache", "putCache", "listCache", "removeCache", "zscores", "locationVectors"};
+      "outliers", "stream", "getCache", "putCache", "listCache", "removeCache", "zscores", "latlonVectors"};
 
   @Test
   public void testLang() {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f5ce384f/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
index 4bcf50d..137add6 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
@@ -312,7 +312,7 @@ public class MathExpressionTest extends SolrCloudTestCase {
   }
 
   @Test
-  public void testLocationFunctions() throws Exception {
+  public void testLatlonFunctions() throws Exception {
     UpdateRequest updateRequest = new UpdateRequest();
 
     int i=0;
@@ -326,7 +326,7 @@ public class MathExpressionTest extends SolrCloudTestCase {
 
     String expr = "let(echo=true," +
         "              a=search("+COLLECTIONORALIAS+", q=*:*, fl=\"id, loc_p, price_i\",rows=100, sort=\"price_i asc\"),"+
-        "              b=locationVectors(a, field=loc_p)," +
+        "              b=latlonVectors(a, field=loc_p)," +
         "              c=distance(array(40.7128, 74.0060), array(45.7128, 74.0060), haversineMeters()))";
 
 


[10/43] lucene-solr:jira/http2: SOLR-8742: In HdfsDirectoryTest replace RAMDirectory usages with ByteBuffersDirectory.

Posted by da...@apache.org.
SOLR-8742: In HdfsDirectoryTest replace RAMDirectory usages with ByteBuffersDirectory.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/2c889229
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/2c889229
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/2c889229

Branch: refs/heads/jira/http2
Commit: 2c88922998ab3bc2e97d9c640bc01fd39d95fb34
Parents: 6fbcda6
Author: Steve Rowe <sa...@apache.org>
Authored: Fri Sep 7 13:19:01 2018 -0400
Committer: Steve Rowe <sa...@apache.org>
Committed: Fri Sep 7 13:19:01 2018 -0400

----------------------------------------------------------------------
 solr/CHANGES.txt                                              | 3 +++
 .../test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java    | 7 +++----
 2 files changed, 6 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2c889229/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index e5596e1..7d30c0c 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -424,6 +424,9 @@ Other Changes
 
 * SOLR-12744: Improve logging messages and verbosity around recoveries (Cao Manh Dat, Varun Thacker)
 
+* SOLR-8742: In HdfsDirectoryTest replace RAMDirectory usages with ByteBuffersDirectory. 
+  (hossman, Mark Miller, Andrzej Bialecki, Steve Rowe)
+
 ==================  7.4.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2c889229/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java
index 97b26fd..b9bd029 100644
--- a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java
+++ b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java
@@ -24,11 +24,11 @@ import java.util.Set;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.lucene.store.ByteBuffersDirectory;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.store.RAMDirectory;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.cloud.hdfs.HdfsTestUtil;
 import org.apache.solr.util.BadHdfsThreadsFilter;
@@ -140,9 +140,8 @@ public class HdfsDirectoryTest extends SolrTestCaseJ4 {
   }
   
   @Test
-  // 12-Jun-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028")
   public void testEOF() throws IOException {
-    Directory fsDir = new RAMDirectory();
+    Directory fsDir = new ByteBuffersDirectory();
     String name = "test.eof";
     createFile(name, fsDir, directory);
     long fsLength = fsDir.fileLength(name);
@@ -168,7 +167,7 @@ public class HdfsDirectoryTest extends SolrTestCaseJ4 {
     try {
       Set<String> names = new HashSet<>();
       for (; i< 10; i++) {
-        Directory fsDir = new RAMDirectory();
+        Directory fsDir = new ByteBuffersDirectory();
         String name = getName();
         System.out.println("Working on pass [" + i  +"] contains [" + names.contains(name) + "]");
         names.add(name);