You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by is...@apache.org on 2017/07/29 22:00:04 UTC

[27/28] lucene-solr:jira/solr-6630: Merging master

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java
----------------------------------------------------------------------
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java
index 85e73df..6f3ea78 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java
@@ -15,9 +15,11 @@
  * limitations under the License.
  */
 package org.apache.lucene.search.highlight;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Iterator;
+import java.util.Objects;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
@@ -26,38 +28,42 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.util.PriorityQueue;
 
 /**
- * Class used to markup highlighted terms found in the best sections of a
+ * Marks up highlighted terms found in the best sections of
  * text, using configurable {@link Fragmenter}, {@link Scorer}, {@link Formatter},
  * {@link Encoder} and tokenizers.
+ *
+ * This is Lucene's original Highlighter; there are others.
  */
 public class Highlighter
 {
   public static final int DEFAULT_MAX_CHARS_TO_ANALYZE = 50*1024;
 
-  private int maxDocCharsToAnalyze = DEFAULT_MAX_CHARS_TO_ANALYZE;
   private Formatter formatter;
   private Encoder encoder;
-  private Fragmenter textFragmenter=new SimpleFragmenter();
-  private Scorer fragmentScorer=null;
+  private Scorer fragmentScorer;
+  private int maxDocCharsToAnalyze = DEFAULT_MAX_CHARS_TO_ANALYZE;
+  private Fragmenter textFragmenter = new SimpleFragmenter();
 
   public Highlighter(Scorer fragmentScorer)
   {
     this(new SimpleHTMLFormatter(),fragmentScorer);
   }
 
-
-   public Highlighter(Formatter formatter, Scorer fragmentScorer)
-   {
+  public Highlighter(Formatter formatter, Scorer fragmentScorer)
+  {
     this(formatter,new DefaultEncoder(),fragmentScorer);
   }
 
-
   public Highlighter(Formatter formatter, Encoder encoder, Scorer fragmentScorer)
   {
-     this.formatter = formatter;
+    ensureArgumentNotNull(formatter, "'formatter' must not be null");
+    ensureArgumentNotNull(encoder, "'encoder' must not be null");
+    ensureArgumentNotNull(fragmentScorer, "'fragmentScorer' must not be null");
+
+    this.formatter = formatter;
     this.encoder = encoder;
-     this.fragmentScorer = fragmentScorer;
-   }
+    this.fragmentScorer = fragmentScorer;
+  }
 
   /**
    * Highlights chosen terms in a text, extracting the most relevant section.
@@ -191,7 +197,7 @@ public class Highlighter
     if (fragmentScorer instanceof QueryScorer) {
       ((QueryScorer) fragmentScorer).setMaxDocCharsToAnalyze(maxDocCharsToAnalyze);
     }
-    
+
     TokenStream newStream = fragmentScorer.init(tokenStream);
     if(newStream != null) {
       tokenStream = newStream;
@@ -477,7 +483,6 @@ public class Highlighter
     this.maxDocCharsToAnalyze = maxDocCharsToAnalyze;
   }
 
-  
   public Fragmenter getTextFragmenter()
   {
     return textFragmenter;
@@ -485,7 +490,7 @@ public class Highlighter
 
   public void setTextFragmenter(Fragmenter fragmenter)
   {
-    textFragmenter = fragmenter;
+    textFragmenter = Objects.requireNonNull(fragmenter);
   }
 
   /**
@@ -496,34 +501,45 @@ public class Highlighter
     return fragmentScorer;
   }
 
-
   public void setFragmentScorer(Scorer scorer)
   {
-    fragmentScorer = scorer;
+    fragmentScorer = Objects.requireNonNull(scorer);
   }
 
-    public Encoder getEncoder()
-    {
-        return encoder;
-    }
-    public void setEncoder(Encoder encoder)
-    {
-        this.encoder = encoder;
+  public Encoder getEncoder() {
+    return encoder;
+  }
+
+  public void setEncoder(Encoder encoder) {
+    this.encoder = Objects.requireNonNull(encoder);
+  }
+
+  /**
+   * Throws an IllegalArgumentException with the provided message if 'argument' is null.
+   *
+   * @param argument the argument to be null-checked
+   * @param message  the message of the exception thrown if argument == null
+   */
+  private static void ensureArgumentNotNull(Object argument, String message) {
+    if (argument == null) {
+      throw new IllegalArgumentException(message);
     }
-}
-class FragmentQueue extends PriorityQueue<TextFragment>
-{
-  public FragmentQueue(int size)
-  {
-    super(size);
   }
 
-  @Override
-  public final boolean lessThan(TextFragment fragA, TextFragment fragB)
+  static class FragmentQueue extends PriorityQueue<TextFragment>
   {
-    if (fragA.getScore() == fragB.getScore())
-      return fragA.fragNum > fragB.fragNum;
-    else
-      return fragA.getScore() < fragB.getScore();
+    FragmentQueue(int size)
+    {
+      super(size);
+    }
+
+    @Override
+    public final boolean lessThan(TextFragment fragA, TextFragment fragB)
+    {
+      if (fragA.getScore() == fragB.getScore())
+        return fragA.fragNum > fragB.fragNum;
+      else
+        return fragA.getScore() < fragB.getScore();
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsCollector.java
----------------------------------------------------------------------
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsCollector.java b/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsCollector.java
index 9c9072d..15ce023 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsCollector.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsCollector.java
@@ -20,7 +20,7 @@ import java.io.IOException;
 
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.MultiDocValues;
+import org.apache.lucene.index.OrdinalMap;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.LeafCollector;
@@ -37,9 +37,9 @@ final class GlobalOrdinalsCollector implements Collector {
 
   final String field;
   final LongBitSet collectedOrds;
-  final MultiDocValues.OrdinalMap ordinalMap;
+  final OrdinalMap ordinalMap;
 
-  GlobalOrdinalsCollector(String field, MultiDocValues.OrdinalMap ordinalMap, long valueCount) {
+  GlobalOrdinalsCollector(String field, OrdinalMap ordinalMap, long valueCount) {
     this.field = field;
     this.ordinalMap = ordinalMap;
     this.collectedOrds = new LongBitSet(valueCount);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsQuery.java
----------------------------------------------------------------------
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsQuery.java
index aacda2d..9ddc5ee 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsQuery.java
@@ -21,7 +21,7 @@ import java.util.Set;
 
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.MultiDocValues;
+import org.apache.lucene.index.OrdinalMap;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.ConstantScoreWeight;
@@ -41,7 +41,7 @@ final class GlobalOrdinalsQuery extends Query {
   // All the ords of matching docs found with OrdinalsCollector.
   private final LongBitSet foundOrds;
   private final String joinField;
-  private final MultiDocValues.OrdinalMap globalOrds;
+  private final OrdinalMap globalOrds;
   // Is also an approximation of the docs that will match. Can be all docs that have toField or something more specific.
   private final Query toQuery;
 
@@ -50,7 +50,7 @@ final class GlobalOrdinalsQuery extends Query {
   // id of the context rather than the context itself in order not to hold references to index readers
   private final Object indexReaderContextId;
 
-  GlobalOrdinalsQuery(LongBitSet foundOrds, String joinField, MultiDocValues.OrdinalMap globalOrds, Query toQuery,
+  GlobalOrdinalsQuery(LongBitSet foundOrds, String joinField, OrdinalMap globalOrds, Query toQuery,
                       Query fromQuery, Object indexReaderContextId) {
     this.foundOrds = foundOrds;
     this.joinField = joinField;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsWithScoreCollector.java
----------------------------------------------------------------------
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsWithScoreCollector.java b/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsWithScoreCollector.java
index cc58041..a557416 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsWithScoreCollector.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsWithScoreCollector.java
@@ -21,7 +21,7 @@ import java.util.Arrays;
 
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.MultiDocValues;
+import org.apache.lucene.index.OrdinalMap;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.LeafCollector;
@@ -35,13 +35,13 @@ abstract class GlobalOrdinalsWithScoreCollector implements Collector {
   final boolean doMinMax;
   final int min;
   final int max;
-  final MultiDocValues.OrdinalMap ordinalMap;
+  final OrdinalMap ordinalMap;
   final LongBitSet collectedOrds;
 
   protected final Scores scores;
   protected final Occurrences occurrences;
 
-  GlobalOrdinalsWithScoreCollector(String field, MultiDocValues.OrdinalMap ordinalMap, long valueCount, ScoreMode scoreMode, int min, int max) {
+  GlobalOrdinalsWithScoreCollector(String field, OrdinalMap ordinalMap, long valueCount, ScoreMode scoreMode, int min, int max) {
     if (valueCount > Integer.MAX_VALUE) {
       // We simply don't support more than
       throw new IllegalStateException("Can't collect more than [" + Integer.MAX_VALUE + "] ids");
@@ -168,7 +168,7 @@ abstract class GlobalOrdinalsWithScoreCollector implements Collector {
 
   static final class Min extends GlobalOrdinalsWithScoreCollector {
 
-    public Min(String field, MultiDocValues.OrdinalMap ordinalMap, long valueCount, int min, int max) {
+    public Min(String field, OrdinalMap ordinalMap, long valueCount, int min, int max) {
       super(field, ordinalMap, valueCount, ScoreMode.Min, min, max);
     }
 
@@ -185,7 +185,7 @@ abstract class GlobalOrdinalsWithScoreCollector implements Collector {
 
   static final class Max extends GlobalOrdinalsWithScoreCollector {
 
-    public Max(String field, MultiDocValues.OrdinalMap ordinalMap, long valueCount, int min, int max) {
+    public Max(String field, OrdinalMap ordinalMap, long valueCount, int min, int max) {
       super(field, ordinalMap, valueCount, ScoreMode.Max, min, max);
     }
 
@@ -202,7 +202,7 @@ abstract class GlobalOrdinalsWithScoreCollector implements Collector {
 
   static final class Sum extends GlobalOrdinalsWithScoreCollector {
 
-    public Sum(String field, MultiDocValues.OrdinalMap ordinalMap, long valueCount, int min, int max) {
+    public Sum(String field, OrdinalMap ordinalMap, long valueCount, int min, int max) {
       super(field, ordinalMap, valueCount, ScoreMode.Total, min, max);
     }
 
@@ -219,7 +219,7 @@ abstract class GlobalOrdinalsWithScoreCollector implements Collector {
 
   static final class Avg extends GlobalOrdinalsWithScoreCollector {
 
-    public Avg(String field, MultiDocValues.OrdinalMap ordinalMap, long valueCount, int min, int max) {
+    public Avg(String field, OrdinalMap ordinalMap, long valueCount, int min, int max) {
       super(field, ordinalMap, valueCount, ScoreMode.Avg, min, max);
     }
 
@@ -241,7 +241,7 @@ abstract class GlobalOrdinalsWithScoreCollector implements Collector {
 
   static final class NoScore extends GlobalOrdinalsWithScoreCollector {
 
-    public NoScore(String field, MultiDocValues.OrdinalMap ordinalMap, long valueCount, int min, int max) {
+    public NoScore(String field, OrdinalMap ordinalMap, long valueCount, int min, int max) {
       super(field, ordinalMap, valueCount, ScoreMode.None, min, max);
     }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsWithScoreQuery.java
----------------------------------------------------------------------
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsWithScoreQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsWithScoreQuery.java
index 1c80bf3..7946559 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsWithScoreQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsWithScoreQuery.java
@@ -21,7 +21,7 @@ import java.util.Set;
 
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.MultiDocValues;
+import org.apache.lucene.index.OrdinalMap;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.DocIdSetIterator;
@@ -39,7 +39,7 @@ final class GlobalOrdinalsWithScoreQuery extends Query {
 
   private final GlobalOrdinalsWithScoreCollector collector;
   private final String joinField;
-  private final MultiDocValues.OrdinalMap globalOrds;
+  private final OrdinalMap globalOrds;
   // Is also an approximation of the docs that will match. Can be all docs that have toField or something more specific.
   private final Query toQuery;
 
@@ -52,7 +52,7 @@ final class GlobalOrdinalsWithScoreQuery extends Query {
   private final Object indexReaderContextId;
 
   GlobalOrdinalsWithScoreQuery(GlobalOrdinalsWithScoreCollector collector, ScoreMode scoreMode, String joinField,
-                               MultiDocValues.OrdinalMap globalOrds, Query toQuery, Query fromQuery, int min, int max,
+                               OrdinalMap globalOrds, Query toQuery, Query fromQuery, int min, int max,
                                Object indexReaderContextId) {
     this.collector = collector;
     this.joinField = joinField;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/lucene/join/src/java/org/apache/lucene/search/join/JoinUtil.java
----------------------------------------------------------------------
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/JoinUtil.java b/lucene/join/src/java/org/apache/lucene/search/join/JoinUtil.java
index d7e0ae8..c0f380d 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/JoinUtil.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/JoinUtil.java
@@ -34,8 +34,8 @@ import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.MultiDocValues;
 import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.OrdinalMap;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedNumericDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
@@ -407,7 +407,7 @@ public final class JoinUtil {
 
 
   /**
-   * Delegates to {@link #createJoinQuery(String, Query, Query, IndexSearcher, ScoreMode, MultiDocValues.OrdinalMap, int, int)},
+   * Delegates to {@link #createJoinQuery(String, Query, Query, IndexSearcher, ScoreMode, OrdinalMap, int, int)},
    * but disables the min and max filtering.
    *
    * @param joinField   The {@link SortedDocValues} field containing the join values
@@ -425,7 +425,7 @@ public final class JoinUtil {
                                       Query toQuery,
                                       IndexSearcher searcher,
                                       ScoreMode scoreMode,
-                                      MultiDocValues.OrdinalMap ordinalMap) throws IOException {
+                                      OrdinalMap ordinalMap) throws IOException {
     return createJoinQuery(joinField, fromQuery, toQuery, searcher, scoreMode, ordinalMap, 0, Integer.MAX_VALUE);
   }
 
@@ -464,7 +464,7 @@ public final class JoinUtil {
                                       Query toQuery,
                                       IndexSearcher searcher,
                                       ScoreMode scoreMode,
-                                      MultiDocValues.OrdinalMap ordinalMap,
+                                      OrdinalMap ordinalMap,
                                       int min,
                                       int max) throws IOException {
     int numSegments = searcher.getIndexReader().leaves().size();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
----------------------------------------------------------------------
diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java b/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
index b1a4a02..12fefd5 100644
--- a/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
+++ b/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
@@ -55,11 +55,10 @@ import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.MultiDocValues.OrdinalMap;
-import org.apache.lucene.index.MultiDocValues;
 import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.index.NoMergePolicy;
 import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.OrdinalMap;
 import org.apache.lucene.index.PostingsEnum;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SortedDocValues;
@@ -267,7 +266,7 @@ public class TestJoinUtil extends LuceneTestCase {
       LeafReader leafReader =  r.leaves().get(i).reader();
       values[i] = DocValues.getSorted(leafReader, joinField);
     }
-    MultiDocValues.OrdinalMap ordinalMap = MultiDocValues.OrdinalMap.build(
+    OrdinalMap ordinalMap = OrdinalMap.build(
         null, values, PackedInts.DEFAULT
     );
 
@@ -372,7 +371,7 @@ public class TestJoinUtil extends LuceneTestCase {
       LeafReader leafReader =  r.leaves().get(i).reader();
       values[i] = DocValues.getSorted(leafReader, joinField);
     }
-    MultiDocValues.OrdinalMap ordinalMap = MultiDocValues.OrdinalMap.build(
+    OrdinalMap ordinalMap = OrdinalMap.build(
         null, values, PackedInts.DEFAULT
     );
 
@@ -500,7 +499,7 @@ public class TestJoinUtil extends LuceneTestCase {
     for (LeafReaderContext leadContext : searcher.getIndexReader().leaves()) {
       values[leadContext.ord] = DocValues.getSorted(leadContext.reader(), "join_field");
     }
-    MultiDocValues.OrdinalMap ordinalMap = MultiDocValues.OrdinalMap.build(
+    OrdinalMap ordinalMap = OrdinalMap.build(
         null, values, PackedInts.DEFAULT
     );
     BooleanQuery.Builder fromQuery = new BooleanQuery.Builder();
@@ -621,7 +620,7 @@ public class TestJoinUtil extends LuceneTestCase {
     for (LeafReaderContext leadContext : searcher.getIndexReader().leaves()) {
       values[leadContext.ord] = DocValues.getSorted(leadContext.reader(), "join_field");
     }
-    MultiDocValues.OrdinalMap ordinalMap = MultiDocValues.OrdinalMap.build(
+    OrdinalMap ordinalMap = OrdinalMap.build(
         null, values, PackedInts.DEFAULT
     );
     Query fromQuery = new TermQuery(new Term("type", "from"));
@@ -1036,7 +1035,7 @@ public class TestJoinUtil extends LuceneTestCase {
             LeafReader leafReader =  r.leaves().get(i).reader();
             values[i] = DocValues.getSorted(leafReader, joinField);
           }
-          MultiDocValues.OrdinalMap ordinalMap = MultiDocValues.OrdinalMap.build(
+          OrdinalMap ordinalMap = OrdinalMap.build(
               null, values, PackedInts.DEFAULT
           );
           IndexSearcher indexSearcher = new IndexSearcher(r);
@@ -1067,7 +1066,7 @@ public class TestJoinUtil extends LuceneTestCase {
             LeafReader leafReader =  r.leaves().get(i).reader();
             values[i] = DocValues.getSorted(leafReader, joinField);
           }
-          MultiDocValues.OrdinalMap ordinalMap = MultiDocValues.OrdinalMap.build(
+          OrdinalMap ordinalMap = OrdinalMap.build(
               null, values, PackedInts.DEFAULT
           );
           IndexSearcher indexSearcher = new IndexSearcher(r);
@@ -1590,7 +1589,7 @@ public class TestJoinUtil extends LuceneTestCase {
       for (LeafReaderContext leadContext : topLevelReader.leaves()) {
         values[leadContext.ord] = DocValues.getSorted(leadContext.reader(), "join_field");
       }
-      context.ordinalMap = MultiDocValues.OrdinalMap.build(
+      context.ordinalMap = OrdinalMap.build(
           null, values, PackedInts.DEFAULT
       );
     }
@@ -1712,7 +1711,7 @@ public class TestJoinUtil extends LuceneTestCase {
     Map<String, Map<Integer, JoinScore>> fromHitsToJoinScore = new HashMap<>();
     Map<String, Map<Integer, JoinScore>> toHitsToJoinScore = new HashMap<>();
 
-    MultiDocValues.OrdinalMap ordinalMap;
+    OrdinalMap ordinalMap;
 
     Directory dir;
     IndexSearcher searcher;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/serialized/SerializedDVStrategy.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/serialized/SerializedDVStrategy.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/serialized/SerializedDVStrategy.java
index 3a33380..c036d34 100644
--- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/serialized/SerializedDVStrategy.java
+++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/serialized/SerializedDVStrategy.java
@@ -26,6 +26,7 @@ import java.io.IOException;
 import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.search.ConstantScoreScorer;
 import org.apache.lucene.search.ConstantScoreWeight;
@@ -163,7 +164,7 @@ public class SerializedDVStrategy extends SpatialStrategy {
   }//PredicateValueSourceQuery
 
   /**
-   * Implements a ValueSource by deserializing a Shape in from BinaryDocValues using BinaryCodec.
+   * Implements a ShapeValueSource by deserializing a Shape from BinaryDocValues using BinaryCodec.
    * @see #makeShapeValueSource()
    */
   static class ShapeDocValueSource extends ShapeValuesSource {
@@ -178,7 +179,7 @@ public class SerializedDVStrategy extends SpatialStrategy {
 
     @Override
     public ShapeValues getValues(LeafReaderContext readerContext) throws IOException {
-      final BinaryDocValues docValues = readerContext.reader().getBinaryDocValues(fieldName);
+      final BinaryDocValues docValues = DocValues.getBinary(readerContext.reader(), fieldName);
 
       return new ShapeValues() {
         @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java
index 5e91fa7..7f8f649 100644
--- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java
+++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java
@@ -492,7 +492,7 @@ class GeoComplexPolygon extends GeoBasePolygon {
         if (left != null && left.traverse(edgeIterator, minValue, maxValue) == false) {
           return false;
         }
-        if (right != null && minValue >= low && right.traverse(edgeIterator, minValue, maxValue) == false) {
+        if (right != null && maxValue >= low && right.traverse(edgeIterator, minValue, maxValue) == false) {
           return false;
         }
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 8d0142e..e5abb3b 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -59,17 +59,32 @@ New Features
 
 * SOLR-11019: Add addAll Stream Evaluator (Joel Bernstein)
 
+* SOLR-11046: Add residuals Stream Evaluator (Joel Bernstein)
+
+* SOLR-10858: Make UUIDUpdateProcessorFactory as Runtime URP (Amit Sarkar, noble)
+
 Bug Fixes
 ----------------------
 
 * SOLR-10668: fix NPE at sort=childfield(..) .. on absent values (Mikhail Khludnev)
 
+* SOLR-8984: EnumField's error reporting to now indicate the field name in failure log (Lanny Ripple,
+  Ann Addicks via Ishan Chattopadhyaya)
+
+* SOLR-11012: Fix three (JavaBinCodec not being closed) Resource Leak warnings. (Christine Poerschke)
+
+* SOLR-11011: Assign.buildCoreName can lead to error in creating a new core when legacyCloud=false (Cao Manh Dat)
+
+* SOLR-10944: Get expression fails to return EOF tuple (Susheel Kumar, Joel Bernstein)
+
 Optimizations
 ----------------------
 
 * SOLR-10985: Remove unnecessary toString() calls in solr-core's search package's debug logging.
   (Michael Braun via Christine Poerschke)
 
+* SOLR-11124: MoveReplicaCmd should skip deleting old replica in case of its node is not live (Cao Manh Dat)
+
 Other Changes
 ----------------------
 
@@ -82,6 +97,25 @@ Other Changes
 
 * SOLR-10964: Reduce SolrIndexSearcher casting in LTRRescorer. (Christine Poerschke)
 
+* SOLR-11075: Refactor handling of params in CloudSolrStream and FacetStream (Erick Erickson)
+
+* SOLR-11052: Remove unnecessary Long-to-Integer and back casts in ReplicationHandler.
+  (Ramsey Haddad via Christine Poerschke)
+
+* SOLR-11106: TestLBHttpSolrClient.testReliablity takes 30 seconds because of the wrong server name
+  (Kensho Hirasawa via Erick Erickson)
+  
+* SOLR-11122: Creating a core should write a core.properties file first and clean up on failure
+  (Erick Erickson)
+
+* SOLR-10338: Configure SecureRandom non blocking for tests. (Mihaly Toth, hossman, Ishan Chattopadhyaya, via Mark Miller)
+
+* SOLR-10916: Convert tests that extend LuceneTestCase and use MiniSolrCloudCluster 
+  to instead extend SolrCloudTestCase. (Steve Rowe)
+
+* SOLR-11131: Document 'assert' as a command option in bin/solr, and bin/solr.cmd scripts.
+  (Jason Gerlowski via Anshum Gupta)
+
 ==================  7.0.0 ==================
 
 Versions of Major Components
@@ -96,6 +130,11 @@ Jetty 9.3.14.v20161028
 Upgrading from Solr 6.x
 ----------------------
 
+* The default response type is now JSON ("wt=json") instead of XML, and line indentation is now on by default
+  ("indent=on"). If you expect the responses to your queries to be returned in the previous format (XML
+  format, no indentation), you must now you must now explicitly pass in "wt=xml" and "indent=off" as query
+  parameters, or configure them as defaults on your request handlers.  See SOLR-10494 for more details.
+
 * the cluster property 'legacyCloud' is set to false from 7.0. This means 'zookeeper is the truth' by
   default. If an entry for a replica does not exist in the state.json, that replica cannot get
   registered. This may affect users who use that feature where they bring up replicas and they are
@@ -206,6 +245,10 @@ Upgrading from Solr 6.x
 
 * StandardRequestHandler is deprecated. Simply use SearchHandler instead.
 
+* The parameter names 'fromNode' for MOVEREPLICA and 'source', 'target' for REPLACENODE have been deprecated and
+  replaced with 'sourceNode' and 'targetNode' instead. The old names will continue to work for back-compatibility
+  but they will be removed in 8.0. See SOLR-11068 for more details.
+
 New Features
 ----------------------
 * SOLR-9857, SOLR-9858: Collect aggregated metrics from nodes and shard leaders in overseer. (ab)
@@ -275,7 +318,9 @@ New Features
   multivalued fields, a new JSON request language, and more. DocValues are now required for any field used in the analytics 
   expression  whereas previously docValues was not required. Please see SOLR-10123 for details. (Houston Putman)
 
-* SOLR-10282: bin/solr support for enabling Kerberos authentication (Ishan Chattopadhyaya)
+* SOLR-10282: bin/solr support for enabling Kerberos authentication (Ishan Chattopadhyaya, Jason Gerlowski)
+
+* SOLR-11093: Add support for PointFields for {!graph} query. (yonik)
 
 Bug Fixes
 ----------------------
@@ -328,6 +373,33 @@ Bug Fixes
 
 * SOLR-10826: Fix CloudSolrClient to expand the collection parameter correctly (Tim Owen via Varun Thacker)
 
+* SOLR-11039: Next button in Solr admin UI for collection list pagination does not work. (janhoy)
+
+* SOLR-11041: MoveReplicaCmd do not specify ulog dir in case of HDFS (Cao Manh Dat) 
+
+* SOLR-11045: The new replica created by MoveReplica will have to have same name and coreName as the 
+  old one in case of HDFS (Cao Manh Dat)
+
+* SOLR-11043: Fix facet.range.method=dv and interval facets on single-valued float fields with negative values.
+  (Tomás Fernández Löbbe, Steve Rowe)
+
+* SOLR-11073: Fix overflow in interval faceting when querying Long limits (e.g. (Long.MAX_VALUE TO Long.MAX_VALUE])
+  (Tomás Fernández Löbbe)
+
+* SOLR-11057: Fix overflow in point range queries when querying the type limits 
+  (e.g. q=field_i:{Integer.MAX_VALUE TO Integer.MAX_VALUE]) (Tomás Fernández Löbbe)
+
+* SOLR-11136: Fix solrj XMLResponseParser when nested docs transformer is used with indented XML (hossman)
+
+* SOLR-11130: V2Request in SolrJ should return the correct collection name so that the request is forwarded to the
+  correct node (noble)
+  
+* SOLR-11151: SolrInfoMBeanHandler.getDiff() ADD case non-functional: NPE when a bean value goes from null -> non-null.
+  (Steve Rowe)
+
+* SOLR-11154: Child documents' return fields now include useDocValuesAsStored fields (Mohammed Sheeri Shaketi Nauage via
+  Ishan Chattopadhyaya)
+
 Optimizations
 ----------------------
 
@@ -348,6 +420,9 @@ Optimizations
 * SOLR-10727: Avoid polluting the filter cache for certain types of faceting (typically ranges) when
   the base docset is empty. (David Smiley)
 
+* SOLR-11070: Make docValues range queries behave the same as Trie/Point fields for Double/Float Infinity cases
+  (Tomás Fernández Löbbe, Andrey Kudryavtsev)
+
 Other Changes
 ----------------------
 * SOLR-10236: Removed FieldType.getNumericType(). Use getNumberType() instead. (Tomás Fernández Löbbe)
@@ -453,15 +528,9 @@ Other Changes
 
 * SOLR-10823: Add reporting period to SolrMetricReporter base class. (Christine Poerschke)
 
-* SOLR-10807: Randomize Points based numeric field types in (more) test schemas
-  - SOLR-10946: Randomize the usage of Points based numerics in solrj test schemas (hossman)
-  - SOLR-10947: Randomize the usage of Points based numerics in contrib test schemas (hossman)
-  - SOLR-10970: Randomize PointFields in all tests using schema-*sort* files (hossman)
-  - SOLR-10971: Randomize PointFields in CdcrBootstrapTest (hossman)
-  - SOLR-10977: Randomize the usage of Points based numerics in schema15.xml and all impacted tests (hossman)
-  - SOLR-10979: Randomize PointFields in schema-docValues*.xml and all affected tests (hossman)
-  - SOLR-10989: Randomize PointFields and general cleanup in schema files where some Trie fields were unused (hossman)
-
+* SOLR-10807: Randomize Points based numeric field types in all existing tests & test schemas
+  where Trie numerics were previously hardcoded. (hossman, Steve Rowe, Anshum Gupta)
+  
 * SOLR-6807: Changed requestDispatcher's handleSelect to default to false, thus ignoring "qt".
   Simplified configs to not refer to handleSelect or "qt".  Switch all tests that assumed true to assume false
   (added leading '/' in request handlers). Switch all tests referring to "standard" request handler to
@@ -480,6 +549,39 @@ Other Changes
 * SOLR-11021: The elevate.xml config-file is made optional in the ElevationComponent.
   The default configset doesn't ship with a elevate.xml file anymore (Varun Thacker)
 
+* SOLR-10898: Fix SOLR-10898 to not deterministicly fail 1/512 runs (hossman)
+
+* SOLR-10796: TestPointFields: increase randomized testing of non-trivial values.  (Steve Rowe)
+
+* SOLR-11068: MOVEREPLICA and REPLACENODE API parameter names are now 'sourceNode' and 'targetNode'. The old names
+  viz. 'fromNode' for MOVEREPLICA and 'source', 'target' for REPLACENODE have been deprecated. (shalin)
+
+* SOLR-11088: Fix sporadic failures of MetricsHandlerTest.testPropertyFilter on jenkins (shalin)
+
+* SOLR-11037: Refactor to provide NodeConfig.getSolrDataHome internal API. (ab, janhoy, shalin)
+
+* SOLR-11119: Switch from Trie to Points field types in the .system collection schema. (Steve Rowe)
+
+* SOLR-10494: Make default response format JSON (wt=json), and also indent text responses formats
+  (indent=on) by default (Trey Grainger & Cassandra Targett via hossman)
+
+* SOLR-10760: Remove trie field types and fields from example schemas. (Steve Rowe)
+
+* SOLR-11056: Add random range query test that compares results across Trie*, *Point and DocValue-only fields
+  (Tomás Fernández Löbbe)
+
+* SOLR-10926: Increase the odds of randomly choosing point fields in our SolrTestCaseJ4 numeric type randomization.
+  (hossman, Steve Rowe)
+  
+* SOLR-10846: ExternalFileField/FloatFieldSource should throw a clear exception on initialization with
+  a Points-based keyField, which is not supported. (hossman, Steve Rowe)
+  
+* SOLR-11155: /analysis/field and /analysis/document requests should support points fields.
+  (Jason Gerlowski, Steve Rowe)
+  
+* SOLR-10756: Undeprecate ZkStateReader.updateClusterState(), mark as @lucene.internal, and rename to
+  forciblyRefreshAllClusterStateSlow(). (hossman, shalin, Steve Rowe)
+
 ==================  6.7.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
@@ -612,6 +714,10 @@ when using one of Exact*StatsCache (Mikhail Khludnev)
 
 * SOLR-10914: RecoveryStrategy's sendPrepRecoveryCmd can get stuck for 5 minutes if leader is unloaded. (shalin)
 
+* SOLR-11024: ParallelStream should set the StreamContext when constructing SolrStreams (Joel Bernstein)
+
+* SOLR-10908: CloudSolrStream.toExpression incorrectly handles fq clauses (Rohit Singh via Erick Erickson)
+
 Optimizations
 ----------------------
 * SOLR-10634: JSON Facet API: When a field/terms facet will retrieve all buckets (i.e. limit:-1)
@@ -2256,6 +2362,7 @@ Bug Fixes
 * SOLR-9391: LBHttpSolrClient.request now correctly returns Rsp.server when
   previously skipped servers were successfully tried. (Christine Poerschke)
 
+
 Optimizations
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/bin/solr
----------------------------------------------------------------------
diff --git a/solr/bin/solr b/solr/bin/solr
index 3d28340..eeb6da4 100755
--- a/solr/bin/solr
+++ b/solr/bin/solr
@@ -295,7 +295,7 @@ function print_usage() {
   if [ -z "$CMD" ]; then
     echo ""
     echo "Usage: solr COMMAND OPTIONS"
-    echo "       where COMMAND is one of: start, stop, restart, status, healthcheck, create, create_core, create_collection, delete, version, zk, auth"
+    echo "       where COMMAND is one of: start, stop, restart, status, healthcheck, create, create_core, create_collection, delete, version, zk, auth, assert"
     echo ""
     echo "  Standalone server example (start Solr running in the background on port 8984):"
     echo ""
@@ -555,7 +555,7 @@ function print_usage() {
     echo ""
     echo "Usage: solr auth enable [-type basicAuth] -credentials user:pass [-blockUnknown <true|false>] [-updateIncludeFileOnly <true|false>]"
     echo "       solr auth enable [-type basicAuth] -prompt <true|false> [-blockUnknown <true|false>] [-updateIncludeFileOnly <true|false>]"
-    echo "       solr auth enable -type kerberos -config "<kerberos configs>" [-updateIncludeFileOnly <true|false>]"
+    echo "       solr auth enable -type kerberos -config \"<kerberos configs>\" [-updateIncludeFileOnly <true|false>]"
     echo "       solr auth disable [-updateIncludeFileOnly <true|false>]"
     echo ""
     echo "  -type <type>                           The authentication mechanism (basicAuth or kerberos) to enable. Defaults to 'basicAuth'."
@@ -563,7 +563,7 @@ function print_usage() {
     echo "  -credentials <user:pass>               The username and password of the initial user. Applicable for basicAuth only."
     echo "                                         Note: only one of -prompt or -credentials must be provided"
     echo ""
-    echo "  -config "<configs>"                    Configuration parameters (Solr startup parameters). Required and applicable only for Kerberos"
+    echo "  -config \"<configs>\"                    Configuration parameters (Solr startup parameters). Required and applicable only for Kerberos"
     echo ""
     echo "  -prompt <true|false>                   Prompts the user to provide the credentials. Applicable for basicAuth only."
     echo "                                         Note: only one of -prompt or -credentials must be provided"
@@ -978,7 +978,7 @@ if [[ "$SCRIPT_CMD" == "create" || "$SCRIPT_CMD" == "create_core" || "$SCRIPT_CM
     exit 1
   fi
 
-  if [ "$CREATE_CONFDIR" == "_default" ]; then
+  if [[ "$CREATE_CONFDIR" == "_default" ]] && ([[ "$CREATE_CONFNAME" == "" ]] || [[ "$CREATE_CONFNAME" == "_default" ]]); then
     echo "WARNING: Using _default configset. Data driven schema functionality is enabled by default, which is"
     echo "         NOT RECOMMENDED for production use."
     echo

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/bin/solr.cmd
----------------------------------------------------------------------
diff --git a/solr/bin/solr.cmd b/solr/bin/solr.cmd
index cfc21b9..9a2ca9b 100644
--- a/solr/bin/solr.cmd
+++ b/solr/bin/solr.cmd
@@ -268,7 +268,7 @@ goto done
 :script_usage
 @echo.
 @echo Usage: solr COMMAND OPTIONS
-@echo        where COMMAND is one of: start, stop, restart, healthcheck, create, create_core, create_collection, delete, version, zk, auth
+@echo        where COMMAND is one of: start, stop, restart, healthcheck, create, create_core, create_collection, delete, version, zk, auth, assert
 @echo.
 @echo   Standalone server example (start Solr running in the background on port 8984):
 @echo.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java
----------------------------------------------------------------------
diff --git a/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java b/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java
index 79cb6b3..f723a25 100644
--- a/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java
+++ b/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java
@@ -271,7 +271,7 @@ public class ICUCollationField extends FieldType {
     BytesRef low = part1 == null ? null : getCollationKey(f, part1);
     BytesRef high = part2 == null ? null : getCollationKey(f, part2);
     if (!field.indexed() && field.hasDocValues()) {
-      return SortedSetDocValuesField.newRangeQuery(
+      return SortedSetDocValuesField.newSlowRangeQuery(
           field.getName(), low, high, minInclusive, maxInclusive);
     } else {
       return new TermRangeQuery(field.getName(), low, high, minInclusive, maxInclusive);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestHierarchicalDocBuilder.java
----------------------------------------------------------------------
diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestHierarchicalDocBuilder.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestHierarchicalDocBuilder.java
index 086d7be..103d0dc 100644
--- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestHierarchicalDocBuilder.java
+++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestHierarchicalDocBuilder.java
@@ -160,9 +160,9 @@ public class TestHierarchicalDocBuilder extends AbstractDataImportHandlerTestCas
     int totalDocsNum = parentsNum + childrenNum + grandChildrenNum;
     
     String resp = runFullImport(THREE_LEVEL_HIERARCHY_CONFIG);
-    String xpath = "//arr[@name='documents']/lst/arr[@name='id' and .='"+parentId1+"']/../"+
-      "arr[@name='_childDocuments_']/lst/arr[@name='id' and .='"+childId+"']/../"+
-      "arr[@name='_childDocuments_']/lst/arr[@name='id' and .='"+grandChildrenIds.get(0)+"']";
+    String xpath = "//arr[@name='documents']/lst[arr[@name='id']/str='"+parentId1+"']/"+
+      "arr[@name='_childDocuments_']/lst[arr[@name='id']/str='"+childId+"']/"+
+      "arr[@name='_childDocuments_']/lst[arr[@name='id']/str='"+grandChildrenIds.get(0)+"']";
     String results = TestHarness.validateXPath(resp, 
            xpath);
     assertTrue("Debug documents does not contain child documents\n"+resp+"\n"+ xpath+

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/contrib/ltr/README.md
----------------------------------------------------------------------
diff --git a/solr/contrib/ltr/README.md b/solr/contrib/ltr/README.md
index cc28dcc..6b56cdf 100644
--- a/solr/contrib/ltr/README.md
+++ b/solr/contrib/ltr/README.md
@@ -8,7 +8,7 @@ deploy that model to Solr and use it to rerank your top X search results.
 # Getting Started With Solr Learning To Rank
 
 For information on how to get started with solr ltr please see:
- * [Solr Reference Guide's section on Learning To Rank](https://cwiki.apache.org/confluence/display/solr/Learning+To+Rank)
+ * [Solr Reference Guide's section on Learning To Rank](https://lucene.apache.org/solr/guide/learning-to-rank.html)
 
 # Getting Started With Solr
 
@@ -21,4 +21,3 @@ For information on how to get started with solr please see:
 For information on how to contribute see:
  * http://wiki.apache.org/lucene-java/HowToContribute
  * http://wiki.apache.org/solr/HowToContribute
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/contrib/ltr/example/README.md
----------------------------------------------------------------------
diff --git a/solr/contrib/ltr/example/README.md b/solr/contrib/ltr/example/README.md
index 054c3ab..06a4789 100644
--- a/solr/contrib/ltr/example/README.md
+++ b/solr/contrib/ltr/example/README.md
@@ -1,6 +1,6 @@
 This README file is only about this example directory's content.
 
-Please refer to the Solr Reference Guide's section on [Learning To Rank](https://cwiki.apache.org/confluence/display/solr/Learning+To+Rank) section for broader information on Learning to Rank (LTR) with Apache Solr.
+Please refer to the Solr Reference Guide's section on [Learning To Rank](https://lucene.apache.org/solr/guide/learning-to-rank.html) section for broader information on Learning to Rank (LTR) with Apache Solr.
 
 # Start Solr with the LTR plugin enabled
 
@@ -29,7 +29,7 @@ Please refer to the Solr Reference Guide's section on [Learning To Rank](https:/
 4. Search and rerank the results using the trained model
 
 ```
-http://localhost:8983/solr/techproducts/query?indent=on&q=test&wt=json&rq={!ltr%20model=exampleModel%20reRankDocs=25%20efi.user_query=%27test%27}&fl=price,score,name
+http://localhost:8983/solr/techproducts/query?q=test&rq={!ltr%20model=exampleModel%20reRankDocs=25%20efi.user_query=%27test%27}&fl=price,score,name
 ```
 
 # Assemble training data
@@ -101,8 +101,8 @@ hard drive|6H500F0        |0|CLICK_LOGS
 hard drive|F8V7067-APL-KIT|0|CLICK_LOGS
 ```
 
-This is a really trival way to generate a training dataset, and in many settings 
-it might not produce great results. Indeed, it is a well known fact that 
+This is a really trival way to generate a training dataset, and in many settings
+it might not produce great results. Indeed, it is a well known fact that
 clicks are *biased*: users tend to click  on the first
 result proposed for a query, also if it is not relevant. A click on a document in position
 five could be considered more important than a click on a document in position one, because
@@ -128,5 +128,3 @@ Usually a human worker visualizes a query together with a list of results and th
 consists in assigning a relevance label to each document (e.g., Perfect, Excellent, Good, Fair, Not relevant).
 Training data can then be obtained by translating the labels into numeric scores
 (e.g., Perfect = 4, Excellent = 3, Good = 2, Fair = 1, Not relevant = 0).
-
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestLTRReRankingPipeline.java
----------------------------------------------------------------------
diff --git a/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestLTRReRankingPipeline.java b/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestLTRReRankingPipeline.java
index 75d3538..081245f 100644
--- a/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestLTRReRankingPipeline.java
+++ b/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestLTRReRankingPipeline.java
@@ -48,7 +48,6 @@ import org.apache.solr.ltr.model.LTRScoringModel;
 import org.apache.solr.ltr.model.TestLinearModel;
 import org.apache.solr.ltr.norm.IdentityNormalizer;
 import org.apache.solr.ltr.norm.Normalizer;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -60,7 +59,12 @@ public class TestLTRReRankingPipeline extends LuceneTestCase {
   private static final SolrResourceLoader solrResourceLoader = new SolrResourceLoader();
 
   private IndexSearcher getSearcher(IndexReader r) {
-    final IndexSearcher searcher = newSearcher(r);
+    // 'yes' to maybe wrapping in general
+    final boolean maybeWrap = true;
+    final boolean wrapWithAssertions = false;
+     // 'no' to asserting wrap because lucene AssertingWeight
+     // cannot be cast to solr LTRScoringQuery$ModelWeight
+    final IndexSearcher searcher = newSearcher(r, maybeWrap, wrapWithAssertions);
 
     return searcher;
   }
@@ -102,7 +106,6 @@ public class TestLTRReRankingPipeline extends LuceneTestCase {
 
   }
 
-  @Ignore
   @Test
   public void testRescorer() throws IOException {
     final Directory dir = newDirectory();
@@ -112,7 +115,7 @@ public class TestLTRReRankingPipeline extends LuceneTestCase {
     doc.add(newStringField("id", "0", Field.Store.YES));
     doc.add(newTextField("field", "wizard the the the the the oz",
         Field.Store.NO));
-    doc.add(new FloatDocValuesField("final-score", 1.0f));
+    doc.add(newStringField("final-score", "F", Field.Store.YES)); // TODO: change to numeric field
 
     w.addDocument(doc);
     doc = new Document();
@@ -120,7 +123,7 @@ public class TestLTRReRankingPipeline extends LuceneTestCase {
     // 1 extra token, but wizard and oz are close;
     doc.add(newTextField("field", "wizard oz the the the the the the",
         Field.Store.NO));
-    doc.add(new FloatDocValuesField("final-score", 2.0f));
+    doc.add(newStringField("final-score", "T", Field.Store.YES)); // TODO: change to numeric field
     w.addDocument(doc);
 
     final IndexReader r = w.getReader();
@@ -145,7 +148,7 @@ public class TestLTRReRankingPipeline extends LuceneTestCase {
     final List<Feature> allFeatures = makeFieldValueFeatures(new int[] {0, 1,
         2, 3, 4, 5, 6, 7, 8, 9}, "final-score");
     final LTRScoringModel ltrScoringModel = TestLinearModel.createLinearModel("test",
-        features, norms, "test", allFeatures, null);
+        features, norms, "test", allFeatures, TestLinearModel.makeFeatureWeights(features));
 
     final LTRRescorer rescorer = new LTRRescorer(new LTRScoringQuery(ltrScoringModel));
     hits = rescorer.rescore(searcher, hits, 2);
@@ -159,7 +162,7 @@ public class TestLTRReRankingPipeline extends LuceneTestCase {
 
   }
 
-  @Ignore
+  @AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/SOLR-11134")
   @Test
   public void testDifferentTopN() throws IOException {
     final Directory dir = newDirectory();
@@ -221,7 +224,7 @@ public class TestLTRReRankingPipeline extends LuceneTestCase {
     final List<Feature> allFeatures = makeFieldValueFeatures(new int[] {0, 1,
         2, 3, 4, 5, 6, 7, 8, 9}, "final-score");
     final LTRScoringModel ltrScoringModel = TestLinearModel.createLinearModel("test",
-        features, norms, "test", allFeatures, null);
+        features, norms, "test", allFeatures, TestLinearModel.makeFeatureWeights(features));
 
     final LTRRescorer rescorer = new LTRRescorer(new LTRScoringQuery(ltrScoringModel));
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/core/src/java/org/apache/solr/api/V2HttpCall.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/api/V2HttpCall.java b/solr/core/src/java/org/apache/solr/api/V2HttpCall.java
index 49d50e9..ab81f76 100644
--- a/solr/core/src/java/org/apache/solr/api/V2HttpCall.java
+++ b/solr/core/src/java/org/apache/solr/api/V2HttpCall.java
@@ -82,7 +82,7 @@ public class V2HttpCall extends HttpSolrCall {
         api = new Api(null) {
           @Override
           public void call(SolrQueryRequest req, SolrQueryResponse rsp) {
-            rsp.add("documentation", "https://cwiki.apache.org/confluence/display/solr/v2+API");
+            rsp.add("documentation", "https://lucene.apache.org/solr/guide/v2-api.html");
             rsp.add("description", "V2 API root path");
           }
         };

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java
index c42d073..61dc8db 100644
--- a/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java
@@ -73,6 +73,7 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
     String node = message.getStr(CoreAdminParams.NODE);
     String shard = message.getStr(SHARD_ID_PROP);
     String coreName = message.getStr(CoreAdminParams.NAME);
+    String coreNodeName = message.getStr(CoreAdminParams.CORE_NODE_NAME);
     Replica.Type replicaType = Replica.Type.valueOf(message.getStr(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT.name()).toUpperCase(Locale.ROOT));
     boolean parallel = message.getBool("parallel", false);
     if (StringUtils.isBlank(coreName)) {
@@ -103,7 +104,7 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Node: " + node + " is not live");
     }
     if (coreName == null) {
-      coreName = Assign.buildCoreName(coll, shard, replicaType);
+      coreName = Assign.buildCoreName(ocmh.zkStateReader.getZkClient(), coll, shard, replicaType);
     } else if (!skipCreateReplicaInClusterState) {
       //Validate that the core name is unique in that collection
       for (Slice slice : coll.getSlices()) {
@@ -130,6 +131,9 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
             ZkStateReader.BASE_URL_PROP, zkStateReader.getBaseUrlForNodeName(node),
             ZkStateReader.NODE_NAME_PROP, node,
             ZkStateReader.REPLICA_TYPE, replicaType.name());
+        if (coreNodeName != null) {
+          props = props.plus(ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
+        }
         Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
       }
       params.set(CoreAdminParams.CORE_NODE_NAME,
@@ -139,6 +143,7 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
     String configName = zkStateReader.readConfigName(collection);
     String routeKey = message.getStr(ShardParams._ROUTE_);
     String dataDir = message.getStr(CoreAdminParams.DATA_DIR);
+    String ulogDir = message.getStr(CoreAdminParams.ULOG_DIR);
     String instanceDir = message.getStr(CoreAdminParams.INSTANCE_DIR);
 
     params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.CREATE.toString());
@@ -161,6 +166,9 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
     if (dataDir != null) {
       params.set(CoreAdminParams.DATA_DIR, dataDir);
     }
+    if (ulogDir != null) {
+      params.set(CoreAdminParams.ULOG_DIR, ulogDir);
+    }
     if (instanceDir != null) {
       params.set(CoreAdminParams.INSTANCE_DIR, instanceDir);
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/core/src/java/org/apache/solr/cloud/Assign.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/Assign.java b/solr/core/src/java/org/apache/solr/cloud/Assign.java
index 7c9752d..98dcfde 100644
--- a/solr/core/src/java/org/apache/solr/cloud/Assign.java
+++ b/solr/core/src/java/org/apache/solr/cloud/Assign.java
@@ -28,8 +28,6 @@ import java.util.Map;
 import java.util.Random;
 import java.util.Set;
 import java.util.function.Supplier;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 
 import com.google.common.collect.ImmutableMap;
@@ -46,12 +44,16 @@ import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.ReplicaPosition;
 import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.util.StrUtils;
 import org.apache.solr.common.util.Utils;
 import org.apache.solr.core.CoreContainer;
+import org.apache.solr.util.NumberUtils;
+import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.data.Stat;
 
 import static java.util.Collections.singletonMap;
 import static org.apache.solr.client.solrj.cloud.autoscaling.Policy.POLICY;
@@ -66,25 +68,61 @@ import static org.apache.solr.common.cloud.ZkStateReader.SOLR_AUTOSCALING_CONF_P
 
 
 public class Assign {
-  private static Pattern COUNT = Pattern.compile("core_node(\\d+)");
 
-  public static String assignNode(DocCollection collection) {
-    Map<String, Slice> sliceMap = collection != null ? collection.getSlicesMap() : null;
-    if (sliceMap == null) {
-      return "core_node1";
+  public static int incAndGetId(SolrZkClient zkClient, String collection, int defaultValue) {
+    String path = "/collections/"+collection;
+    try {
+      if (!zkClient.exists(path, true)) {
+        try {
+          zkClient.makePath(path, true);
+        } catch (KeeperException.NodeExistsException e) {
+          // it's okay if another beats us creating the node
+        }
+      }
+      path += "/counter";
+      if (!zkClient.exists(path, true)) {
+        try {
+          zkClient.create(path, NumberUtils.intToBytes(defaultValue), CreateMode.PERSISTENT, true);
+        } catch (KeeperException.NodeExistsException e) {
+          // it's okay if another beats us creating the node
+        }
+      }
+    } catch (InterruptedException e) {
+      Thread.interrupted();
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error creating counter node in Zookeeper for collection:" + collection, e);
+    } catch (KeeperException e) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error creating counter node in Zookeeper for collection:" + collection, e);
     }
 
-    int max = 0;
-    for (Slice slice : sliceMap.values()) {
-      for (Replica replica : slice.getReplicas()) {
-        Matcher m = COUNT.matcher(replica.getName());
-        if (m.matches()) {
-          max = Math.max(max, Integer.parseInt(m.group(1)));
+    while (true) {
+      Stat stat = new Stat();
+      try {
+        byte[] data = zkClient.getData(path, null, stat, true);
+        int currentId = NumberUtils.bytesToInt(data);
+        data = NumberUtils.intToBytes(++currentId);
+        zkClient.setData(path, data, stat.getVersion(), true);
+        return currentId;
+      } catch (KeeperException e) {
+        if (e.code() != KeeperException.Code.BADVERSION) {
+          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error inc and get counter from Zookeeper for collection:"+collection, e);
         }
+      } catch (InterruptedException e) {
+        Thread.interrupted();
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error inc and get counter from Zookeeper for collection:" + collection, e);
       }
     }
+  }
 
-    return "core_node" + (max + 1);
+  public static String assignNode(SolrZkClient client, DocCollection collection) {
+    // for backward compatibility;
+    int numReplicas = collection.getReplicas().size();
+    String coreNodeName = "core_node" + incAndGetId(client, collection.getName(), numReplicas * 20);
+    while (collection.getReplica(coreNodeName) != null) {
+      // there is wee chance that, the new coreNodeName id not totally unique,
+      // but this will be guaranteed unique for new collections
+      coreNodeName = "core_node" + incAndGetId(client, collection.getName(), numReplicas * 20);
+    }
+    return coreNodeName;
   }
 
   /**
@@ -131,27 +169,33 @@ public class Assign {
     return returnShardId;
   }
 
-  public static String buildCoreName(String collectionName, String shard, Replica.Type type, int replicaNum) {
+  private static String buildCoreName(String collectionName, String shard, Replica.Type type, int replicaNum) {
     // TODO: Adding the suffix is great for debugging, but may be an issue if at some point we want to support a way to change replica type
     return String.format(Locale.ROOT, "%s_%s_replica_%s%s", collectionName, shard, type.name().substring(0,1).toLowerCase(Locale.ROOT), replicaNum);
   }
 
-  public static String buildCoreName(DocCollection collection, String shard, Replica.Type type) {
+  public static String buildCoreName(SolrZkClient zkClient, DocCollection collection, String shard, Replica.Type type) {
     Slice slice = collection.getSlice(shard);
-    int replicaNum = slice.getReplicas().size();
-    for (; ; ) {
-      String replicaName = buildCoreName(collection.getName(), shard, type, replicaNum);
-      boolean exists = false;
-      for (Replica replica : slice.getReplicas()) {
-        if (replicaName.equals(replica.getStr(CORE_NAME_PROP))) {
-          exists = true;
-          break;
-        }
+    int numReplicas = collection.getReplicas().size();
+    int replicaNum = incAndGetId(zkClient, collection.getName(), numReplicas * 20);
+    String coreName = buildCoreName(collection.getName(), shard, type, replicaNum);
+    while (existCoreName(coreName, slice)) {
+      replicaNum = incAndGetId(zkClient, collection.getName(), numReplicas * 20);
+      coreName = buildCoreName(collection.getName(), shard, type, replicaNum);
+    }
+    return coreName;
+  }
+
+  private static boolean existCoreName(String coreName, Slice slice) {
+    if (slice == null) return false;
+    for (Replica replica : slice.getReplicas()) {
+      if (coreName.equals(replica.getStr(CORE_NAME_PROP))) {
+        return true;
       }
-      if (exists) replicaNum++;
-      else return replicaName;
     }
+    return false;
   }
+
   public static List<String> getLiveOrLiveAndCreateNodeSetList(final Set<String> liveNodes, final ZkNodeProps message, final Random random) {
     // TODO: add smarter options that look at the current number of cores per
     // node?

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java b/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
index ac09621..c05072d 100644
--- a/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
+++ b/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
@@ -31,6 +31,7 @@ import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CoreAdminParams;
 import org.apache.solr.core.CoreContainer;
 import org.apache.solr.core.CoreDescriptor;
 import org.apache.solr.core.SolrResourceLoader;
@@ -64,10 +65,11 @@ public class CloudUtil {
           
           String cnn = replica.getName();
           String baseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
+          boolean isSharedFs = replica.getStr(CoreAdminParams.DATA_DIR) != null;
           log.debug("compare against coreNodeName={} baseUrl={}", cnn, baseUrl);
           
           if (thisCnn != null && thisCnn.equals(cnn)
-              && !thisBaseUrl.equals(baseUrl)) {
+              && !thisBaseUrl.equals(baseUrl) && isSharedFs) {
             if (cc.getLoadedCoreNames().contains(desc.getName())) {
               cc.unload(desc.getName());
             }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/core/src/java/org/apache/solr/cloud/CreateCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/CreateCollectionCmd.java
index d685dd0..9368fe1 100644
--- a/solr/core/src/java/org/apache/solr/cloud/CreateCollectionCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/CreateCollectionCmd.java
@@ -211,7 +211,8 @@ public class CreateCollectionCmd implements Cmd {
       Map<String,ShardRequest> coresToCreate = new LinkedHashMap<>();
       for (ReplicaPosition replicaPosition : replicaPositions) {
         String nodeName = replicaPosition.node;
-        String coreName = Assign.buildCoreName(collectionName, replicaPosition.shard, replicaPosition.type, replicaPosition.index + 1);
+        String coreName = Assign.buildCoreName(ocmh.zkStateReader.getZkClient(), zkStateReader.getClusterState().getCollection(collectionName),
+            replicaPosition.shard, replicaPosition.type);
         log.debug(formatString("Creating core {0} as part of shard {1} of collection {2} on {3}"
             , coreName, replicaPosition.shard, collectionName, nodeName));
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java
index f96dd0c..c3344eb 100644
--- a/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java
@@ -88,23 +88,19 @@ public class CreateShardCmd implements Cmd {
     int createdNrtReplicas = 0, createdTlogReplicas = 0, createdPullReplicas = 0;
     CountDownLatch countDownLatch = new CountDownLatch(totalReplicas);
     for (int j = 1; j <= totalReplicas; j++) {
-      int coreNameNumber;
       Replica.Type typeToCreate;
       if (createdNrtReplicas < numNrtReplicas) {
         createdNrtReplicas++;
-        coreNameNumber = createdNrtReplicas;
         typeToCreate = Replica.Type.NRT;
       } else if (createdTlogReplicas < numTlogReplicas) {
         createdTlogReplicas++;
-        coreNameNumber = createdTlogReplicas;
         typeToCreate = Replica.Type.TLOG;
       } else {
         createdPullReplicas++;
-        coreNameNumber = createdPullReplicas;
         typeToCreate = Replica.Type.PULL;
       }
       String nodeName = sortedNodeList.get(((j - 1)) % sortedNodeList.size()).nodeName;
-      String coreName = Assign.buildCoreName(collectionName, sliceName, typeToCreate, coreNameNumber);
+      String coreName = Assign.buildCoreName(ocmh.zkStateReader.getZkClient(), collection, sliceName, typeToCreate);
 //      String coreName = collectionName + "_" + sliceName + "_replica" + j;
       log.info("Creating replica " + coreName + " as part of slice " + sliceName + " of collection " + collectionName
           + " on " + nodeName);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/core/src/java/org/apache/solr/cloud/MigrateCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/MigrateCmd.java b/solr/core/src/java/org/apache/solr/cloud/MigrateCmd.java
index 0ea5d6e..f70c836 100644
--- a/solr/core/src/java/org/apache/solr/cloud/MigrateCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/MigrateCmd.java
@@ -224,7 +224,7 @@ public class MigrateCmd implements OverseerCollectionMessageHandler.Cmd {
     Slice tempSourceSlice = clusterState.getCollection(tempSourceCollectionName).getSlices().iterator().next();
     Replica tempSourceLeader = zkStateReader.getLeaderRetry(tempSourceCollectionName, tempSourceSlice.getName(), 120000);
 
-    String tempCollectionReplica1 = Assign.buildCoreName(tempSourceCollectionName, tempSourceSlice.getName(), Replica.Type.NRT, 1);
+    String tempCollectionReplica1 = tempSourceLeader.getCoreName();
     String coreNodeName = ocmh.waitForCoreNodeName(tempSourceCollectionName,
         sourceLeader.getNodeName(), tempCollectionReplica1);
     // wait for the replicas to be seen as active on temp source leader
@@ -257,7 +257,8 @@ public class MigrateCmd implements OverseerCollectionMessageHandler.Cmd {
 
     log.info("Creating a replica of temporary collection: {} on the target leader node: {}",
         tempSourceCollectionName, targetLeader.getNodeName());
-    String tempCollectionReplica2 = Assign.buildCoreName(tempSourceCollectionName, tempSourceSlice.getName(), Replica.Type.NRT, 2); 
+    String tempCollectionReplica2 = Assign.buildCoreName(ocmh.zkStateReader.getZkClient(),
+        zkStateReader.getClusterState().getCollection(tempSourceCollectionName), tempSourceSlice.getName(), Replica.Type.NRT);
     props = new HashMap<>();
     props.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
     props.put(COLLECTION_PROP, tempSourceCollectionName);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/core/src/java/org/apache/solr/cloud/MoveReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/MoveReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/MoveReplicaCmd.java
index 4d6e26d..4ba203c 100644
--- a/solr/core/src/java/org/apache/solr/cloud/MoveReplicaCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/MoveReplicaCmd.java
@@ -31,9 +31,12 @@ import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.params.CollectionParams;
 import org.apache.solr.common.params.CoreAdminParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.Utils;
+import org.apache.solr.update.UpdateLog;
+import org.apache.solr.util.TimeOut;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -79,20 +82,22 @@ public class MoveReplicaCmd implements Cmd{
             "Collection: " + collection + " replica: " + replicaName + " does not exist");
       }
     } else {
-      ocmh.checkRequired(message, SHARD_ID_PROP, "fromNode");
-      String fromNode = message.getStr("fromNode");
+      String sourceNode = message.getStr(CollectionParams.SOURCE_NODE, message.getStr(CollectionParams.FROM_NODE));
+      if (sourceNode == null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "sourceNode is a required param" );
+      }
       String shardId = message.getStr(SHARD_ID_PROP);
       Slice slice = clusterState.getCollection(collection).getSlice(shardId);
       List<Replica> sliceReplicas = new ArrayList<>(slice.getReplicas());
       Collections.shuffle(sliceReplicas, RANDOM);
       for (Replica r : slice.getReplicas()) {
-        if (r.getNodeName().equals(fromNode)) {
+        if (r.getNodeName().equals(sourceNode)) {
           replica = r;
         }
       }
       if (replica == null) {
         throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "Collection: " + collection + " node: " + fromNode + " do not have any replica belong to shard: " + shardId);
+            "Collection: " + collection + " node: " + sourceNode + " do not have any replica belong to shard: " + shardId);
       }
     }
 
@@ -114,35 +119,56 @@ public class MoveReplicaCmd implements Cmd{
 
   private void moveHdfsReplica(ClusterState clusterState, NamedList results, String dataDir, String targetNode, String async,
                                  DocCollection coll, Replica replica, Slice slice, int timeout) throws Exception {
-    String newCoreName = Assign.buildCoreName(coll, slice.getName(), replica.getType());
+    String skipCreateReplicaInClusterState = "true";
+    if (clusterState.getLiveNodes().contains(replica.getNodeName())) {
+      skipCreateReplicaInClusterState = "false";
+      ZkNodeProps removeReplicasProps = new ZkNodeProps(
+          COLLECTION_PROP, coll.getName(),
+          SHARD_ID_PROP, slice.getName(),
+          REPLICA_PROP, replica.getName()
+      );
+      removeReplicasProps.getProperties().put(CoreAdminParams.DELETE_DATA_DIR, false);
+      removeReplicasProps.getProperties().put(CoreAdminParams.DELETE_INDEX, false);
+      if(async!=null) removeReplicasProps.getProperties().put(ASYNC, async);
+      NamedList deleteResult = new NamedList();
+      ocmh.deleteReplica(clusterState, removeReplicasProps, deleteResult, null);
+      if (deleteResult.get("failure") != null) {
+        String errorString = String.format(Locale.ROOT, "Failed to cleanup replica collection=%s shard=%s name=%s",
+            coll.getName(), slice.getName(), replica.getName());
+        log.warn(errorString);
+        results.add("failure", errorString + ", because of : " + deleteResult.get("failure"));
+        return;
+      }
+
+      TimeOut timeOut = new TimeOut(20L, TimeUnit.SECONDS);
+      while (!timeOut.hasTimedOut()) {
+        coll = ocmh.zkStateReader.getClusterState().getCollection(coll.getName());
+        if (coll.getReplica(replica.getName()) != null) {
+          Thread.sleep(100);
+        } else {
+          break;
+        }
+      }
+      if (timeOut.hasTimedOut()) {
+        results.add("failure", "Still see deleted replica in clusterstate!");
+        return;
+      }
 
-    ZkNodeProps removeReplicasProps = new ZkNodeProps(
-        COLLECTION_PROP, coll.getName(),
-        SHARD_ID_PROP, slice.getName(),
-        REPLICA_PROP, replica.getName()
-        );
-    removeReplicasProps.getProperties().put(CoreAdminParams.DELETE_DATA_DIR, false);
-    removeReplicasProps.getProperties().put(CoreAdminParams.DELETE_INDEX, false);
-    if(async!=null) removeReplicasProps.getProperties().put(ASYNC, async);
-    NamedList deleteResult = new NamedList();
-    ocmh.deleteReplica(clusterState, removeReplicasProps, deleteResult, null);
-    if (deleteResult.get("failure") != null) {
-      String errorString = String.format(Locale.ROOT, "Failed to cleanup replica collection=%s shard=%s name=%s",
-          coll.getName(), slice.getName(), replica.getName());
-      log.warn(errorString);
-      results.add("failure", errorString + ", because of : " + deleteResult.get("failure"));
-      return;
     }
 
+    String ulogDir = replica.getStr(CoreAdminParams.ULOG_DIR);
     ZkNodeProps addReplicasProps = new ZkNodeProps(
         COLLECTION_PROP, coll.getName(),
         SHARD_ID_PROP, slice.getName(),
         CoreAdminParams.NODE, targetNode,
-        CoreAdminParams.NAME, newCoreName,
+        CoreAdminParams.CORE_NODE_NAME, replica.getName(),
+        CoreAdminParams.NAME, replica.getCoreName(),
+        SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, skipCreateReplicaInClusterState,
+        CoreAdminParams.ULOG_DIR, ulogDir.substring(0, ulogDir.lastIndexOf(UpdateLog.TLOG_NAME)),
         CoreAdminParams.DATA_DIR, dataDir);
     if(async!=null) addReplicasProps.getProperties().put(ASYNC, async);
     NamedList addResult = new NamedList();
-    ocmh.addReplica(clusterState, addReplicasProps, addResult, null);
+    ocmh.addReplica(ocmh.zkStateReader.getClusterState(), addReplicasProps, addResult, null);
     if (addResult.get("failure") != null) {
       String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
           " on node=%s", coll.getName(), slice.getName(), targetNode);
@@ -151,14 +177,14 @@ public class MoveReplicaCmd implements Cmd{
       return;
     } else {
       String successString = String.format(Locale.ROOT, "MOVEREPLICA action completed successfully, moved replica=%s at node=%s " +
-          "to replica=%s at node=%s", replica.getCoreName(), replica.getNodeName(), newCoreName, targetNode);
+          "to replica=%s at node=%s", replica.getCoreName(), replica.getNodeName(), replica.getCoreName(), targetNode);
       results.add("success", successString);
     }
   }
 
   private void moveNormalReplica(ClusterState clusterState, NamedList results, String targetNode, String async,
                                  DocCollection coll, Replica replica, Slice slice, int timeout) throws Exception {
-    String newCoreName = Assign.buildCoreName(coll, slice.getName(), replica.getType());
+    String newCoreName = Assign.buildCoreName(ocmh.zkStateReader.getZkClient(), coll, slice.getName(), replica.getType());
     ZkNodeProps addReplicasProps = new ZkNodeProps(
         COLLECTION_PROP, coll.getName(),
         SHARD_ID_PROP, slice.getName(),

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/Overseer.java b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
index 521434a..7dd85bc 100644
--- a/solr/core/src/java/org/apache/solr/cloud/Overseer.java
+++ b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
@@ -142,7 +142,7 @@ public class Overseer implements Closeable {
           //TODO consider removing 'refreshClusterState' and simply check if clusterState is null
           if (refreshClusterState) {
             try {
-              reader.updateClusterState();
+              reader.forciblyRefreshAllClusterStateSlow();
               clusterState = reader.getClusterState();
               zkStateWriter = new ZkStateWriter(reader, stats);
               refreshClusterState = false;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
index ba60908..ef3fd89 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
@@ -37,6 +37,7 @@ import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams;
 import org.apache.solr.common.params.CoreAdminParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.zookeeper.KeeperException;
@@ -59,9 +60,11 @@ public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
   @Override
   public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
     ZkStateReader zkStateReader = ocmh.zkStateReader;
-    ocmh.checkRequired(message, "source", "target");
-    String source = message.getStr("source");
-    String target = message.getStr("target");
+    String source = message.getStr(CollectionParams.SOURCE_NODE, message.getStr("source"));
+    String target = message.getStr(CollectionParams.TARGET_NODE, message.getStr("target"));
+    if (source == null || target == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "sourceNode and targetNode are required params" );
+    }
     String async = message.getStr("async");
     int timeout = message.getInt("timeout", 10 * 60); // 10 minutes
     boolean parallel = message.getBool("parallel", false);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
index 099190c..8c69f54 100644
--- a/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
@@ -205,7 +205,7 @@ public class SplitShardCmd implements Cmd {
       for (int i = 0; i < subRanges.size(); i++) {
         String subSlice = slice + "_" + i;
         subSlices.add(subSlice);
-        String subShardName = Assign.buildCoreName(collectionName, subSlice, Replica.Type.NRT, 1);
+        String subShardName = Assign.buildCoreName(ocmh.zkStateReader.getZkClient(), collection, subSlice, Replica.Type.NRT);
         subShardNames.add(subShardName);
       }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkController.java b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
index c8d763f..dee833f 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ZkController.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
@@ -59,7 +59,6 @@ import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
 import org.apache.solr.common.cloud.BeforeReconnect;
 import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.ClusterStateUtil;
 import org.apache.solr.common.cloud.DefaultConnectionStrategy;
 import org.apache.solr.common.cloud.DefaultZkACLProvider;
 import org.apache.solr.common.cloud.DefaultZkCredentialsProvider;
@@ -2184,10 +2183,7 @@ public class ZkController {
       DocCollection collection = clusterState.getCollectionOrNull(desc
           .getCloudDescriptor().getCollectionName());
       if (collection != null) {
-        boolean autoAddReplicas = ClusterStateUtil.isAutoAddReplicas(getZkStateReader(), collection.getName());
-        if (autoAddReplicas) {
-          CloudUtil.checkSharedFSFailoverReplaced(cc, desc);
-        }
+        CloudUtil.checkSharedFSFailoverReplaced(cc, desc);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
index 9758c8f..82d106d 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
@@ -240,7 +240,7 @@ public class ReplicaMutator {
         log.debug("node=" + coreNodeName + " is already registered");
       } else {
         // if coreNodeName is null, auto assign one
-        coreNodeName = Assign.assignNode(collection);
+        coreNodeName = Assign.assignNode(zkStateReader.getZkClient(), collection);
       }
       message.getProperties().put(ZkStateReader.CORE_NODE_NAME_PROP,
           coreNodeName);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
index 5724f17..a611932 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
@@ -66,7 +66,12 @@ public class SliceMutator {
       log.error("Invalid Collection/Slice {}/{} ", coll, slice);
       return ZkStateWriter.NO_OP;
     }
-    String coreNodeName = Assign.assignNode(collection);
+    String coreNodeName;
+    if (message.getStr(ZkStateReader.CORE_NODE_NAME_PROP) != null) {
+      coreNodeName = message.getStr(ZkStateReader.CORE_NODE_NAME_PROP);
+    } else {
+      coreNodeName = Assign.assignNode(zkStateReader.getZkClient(), collection);
+    }
     Replica replica = new Replica(coreNodeName,
         makeMap(
             ZkStateReader.CORE_NAME_PROP, message.getStr(ZkStateReader.CORE_NAME_PROP),

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
index 5301370..8b8c740 100644
--- a/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
+++ b/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
@@ -397,13 +397,12 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory {
     maxWriteMBPerSecRead = (Double) args.get("maxWriteMBPerSecRead");
     maxWriteMBPerSecDefault = (Double) args.get("maxWriteMBPerSecDefault");
 
-    dataHomePath = args.get(DATA_HOME) == null ? null : Paths.get((String) args.get(DATA_HOME));
-    if (dataHomePath == null && System.getProperty(DATA_HOME) != null && System.getProperty(DATA_HOME).length() > 0) {
-      // If solr.data.home is not specified explicitly in solrconfig.xml, fall back to global System Property with same name
-      dataHomePath = Paths.get(System.getProperty(DATA_HOME));
+    // override global config
+    if (args.get(SolrXmlConfig.SOLR_DATA_HOME) != null) {
+      dataHomePath = Paths.get((String) args.get(SolrXmlConfig.SOLR_DATA_HOME));
     }
     if (dataHomePath != null) {
-      log.info(DATA_HOME + "=" + dataHomePath);
+      log.info(SolrXmlConfig.SOLR_DATA_HOME + "=" + dataHomePath);
     }
   }
   

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
index 322952b..a43af96 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
@@ -607,7 +607,7 @@ public class CoreContainer {
                 zkSys.getZkController().throwErrorIfReplicaReplaced(cd);
               }
 
-              core = create(cd, false, false);
+              core = createFromDescriptor(cd, false, false);
             } finally {
               if (asyncSolrCoreLoad) {
                 solrCores.markCoreAsNotLoading(cd);
@@ -901,14 +901,17 @@ public class CoreContainer {
         preExisitingZkEntry = getZkController().checkIfCoreNodeNameAlreadyExists(cd);
       }
 
-      SolrCore core = create(cd, true, newCollection);
-
-      // only write out the descriptor if the core is successfully created
+      // Much of the logic in core handling pre-supposes that the core.properties file already exists, so create it
+      // first and clean it up if there's an error.
       coresLocator.create(this, cd);
 
+      SolrCore core = createFromDescriptor(cd, true, newCollection);
+
       return core;
-    }
-    catch (Exception ex) {
+    } catch (Exception ex) {
+      // First clean up any core descriptor, there should never be an existing core.properties file for any core that 
+      // failed to be created on-the-fly. 
+      coresLocator.delete(this, cd);
       if (isZooKeeperAware() && !preExisitingZkEntry) {
         try {
           getZkController().unregister(coreName, cd);
@@ -948,7 +951,7 @@ public class CoreContainer {
    *
    * @return the newly created core
    */
-  private SolrCore create(CoreDescriptor dcore, boolean publishState, boolean newCollection) {
+  private SolrCore createFromDescriptor(CoreDescriptor dcore, boolean publishState, boolean newCollection) {
 
     if (isShutDown) {
       throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "Solr has been shutdown.");
@@ -1225,7 +1228,7 @@ public class CoreContainer {
     } else {
       CoreLoadFailure clf = coreInitFailures.get(name);
       if (clf != null) {
-        create(clf.cd, true, false);
+        createFromDescriptor(clf.cd, true, false);
       } else {
         throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No such core: " + name );
       }
@@ -1408,7 +1411,7 @@ public class CoreContainer {
         if (zkSys.getZkController() != null) {
           zkSys.getZkController().throwErrorIfReplicaReplaced(desc);
         }
-        core = create(desc, true, false); // This should throw an error if it fails.
+        core = createFromDescriptor(desc, true, false); // This should throw an error if it fails.
       }
       core.open();
     }
@@ -1556,7 +1559,12 @@ public class CoreContainer {
   public long getStatus() {
     return status;
   }
-
+  
+  // Occasaionally we need to access the transient cache handler in places other than coreContainer.
+  public TransientSolrCoreCache getTransientCache() {
+    return solrCores.getTransientCacheHandler();
+  }
+  
 }
 
 class CloserThread extends Thread {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d00e53b/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
index f60958d..e7e33a5 100644
--- a/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
+++ b/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
@@ -55,8 +55,6 @@ public abstract class DirectoryFactory implements NamedListInitializedPlugin,
 
   protected static final String INDEX_W_TIMESTAMP_REGEX = "index\\.[0-9]{17}"; // see SnapShooter.DATE_FMT
 
-  public static final String DATA_HOME = "solr.data.home";
-
   // May be set by sub classes as data root, in which case getDataHome will use it as base
   protected Path dataHomePath;
 
@@ -394,6 +392,9 @@ public abstract class DirectoryFactory implements NamedListInitializedPlugin,
   
   public void initCoreContainer(CoreContainer cc) {
     this.coreContainer = cc;
+    if (cc != null && cc.getConfig() != null) {
+      this.dataHomePath = cc.getConfig().getSolrDataHome();
+    }
   }
   
   // special hack to work with FilterDirectory