You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ab...@apache.org on 2018/04/10 14:11:40 UTC

[01/50] lucene-solr:jira/solr-12181: LUCENE-8196: Check for a null input in LowpassIntervalsSource

Repository: lucene-solr
Updated Branches:
  refs/heads/jira/solr-12181 2f13a21f2 -> 751987d53


LUCENE-8196: Check for a null input in LowpassIntervalsSource


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/7117b68d
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/7117b68d
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/7117b68d

Branch: refs/heads/jira/solr-12181
Commit: 7117b68db6835acfeda17f04ab2c20a8c1ec2c17
Parents: 34b83ed
Author: Alan Woodward <ro...@apache.org>
Authored: Wed Apr 4 09:37:32 2018 +0100
Committer: Alan Woodward <ro...@apache.org>
Committed: Wed Apr 4 09:38:19 2018 +0100

----------------------------------------------------------------------
 .../java/org/apache/lucene/search/intervals/IntervalFilter.java   | 3 ++-
 .../apache/lucene/search/intervals/LowpassIntervalsSource.java    | 3 +++
 2 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7117b68d/lucene/sandbox/src/java/org/apache/lucene/search/intervals/IntervalFilter.java
----------------------------------------------------------------------
diff --git a/lucene/sandbox/src/java/org/apache/lucene/search/intervals/IntervalFilter.java b/lucene/sandbox/src/java/org/apache/lucene/search/intervals/IntervalFilter.java
index d1d2fcf..7571fc2 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/search/intervals/IntervalFilter.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/search/intervals/IntervalFilter.java
@@ -18,6 +18,7 @@
 package org.apache.lucene.search.intervals;
 
 import java.io.IOException;
+import java.util.Objects;
 
 /**
  * Wraps an {@link IntervalIterator} and passes through those intervals that match the {@link #accept()} function
@@ -30,7 +31,7 @@ public abstract class IntervalFilter extends IntervalIterator {
    * Create a new filter
    */
   public IntervalFilter(IntervalIterator in) {
-    this.in = in;
+    this.in = Objects.requireNonNull(in);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7117b68d/lucene/sandbox/src/java/org/apache/lucene/search/intervals/LowpassIntervalsSource.java
----------------------------------------------------------------------
diff --git a/lucene/sandbox/src/java/org/apache/lucene/search/intervals/LowpassIntervalsSource.java b/lucene/sandbox/src/java/org/apache/lucene/search/intervals/LowpassIntervalsSource.java
index 3bb469e..4d7846c 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/search/intervals/LowpassIntervalsSource.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/search/intervals/LowpassIntervalsSource.java
@@ -56,6 +56,9 @@ class LowpassIntervalsSource extends IntervalsSource {
   @Override
   public IntervalIterator intervals(String field, LeafReaderContext ctx) throws IOException {
     IntervalIterator i = in.intervals(field, ctx);
+    if (i == null) {
+      return null;
+    }
     return new IntervalFilter(i) {
       @Override
       protected boolean accept() {


[13/50] lucene-solr:jira/solr-12181: LUCENE-8239: Add failing test with ignore flag

Posted by ab...@apache.org.
LUCENE-8239: Add failing test with ignore flag


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/508476e1
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/508476e1
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/508476e1

Branch: refs/heads/jira/solr-12181
Commit: 508476e1ad304598433acac71cac47ed92332ad5
Parents: 4a902f3
Author: Ignacio Vera <iv...@apache.org>
Authored: Thu Apr 5 15:33:55 2018 +0200
Committer: Ignacio Vera <iv...@apache.org>
Committed: Thu Apr 5 15:33:55 2018 +0200

----------------------------------------------------------------------
 .../lucene/spatial3d/geom/GeoPolygonTest.java   | 45 ++++++++++++++++++++
 1 file changed, 45 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/508476e1/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
index f577901..fb32471 100755
--- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
+++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
@@ -21,6 +21,7 @@ import java.util.List;
 import java.util.BitSet;
 import java.util.Collections;
 
+import org.junit.Ignore;
 import org.junit.Test;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -1427,5 +1428,49 @@ shape:
     int largeIntersection = solid.getRelationship(largeShape);
     assertTrue(intersection == largeIntersection);
   }
+
+  @Ignore
+  @Test
+  public void testComplexPolygonPlaneOutsideWorld() {
+    List<GeoPoint> points = new ArrayList<>();
+    points.add(new GeoPoint(PlanetModel.SPHERE, -0.5, -0.5));
+    points.add(new GeoPoint(PlanetModel.SPHERE, -0.5, 0.5));
+    points.add(new GeoPoint(PlanetModel.SPHERE, 0.5, 0.5));
+    points.add(new GeoPoint(PlanetModel.SPHERE,0.5, -0.5));
+    GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points);
+    //Large polygon with arbitrary testPoint in set
+    GeoPolygon largePolygon = new GeoComplexPolygon(PlanetModel.SPHERE, Collections.singletonList(points), new GeoPoint(PlanetModel.SPHERE, 0.25, 0), true);
+    //This point is ok
+    GeoPoint point1 = new GeoPoint(PlanetModel.SPHERE, 0, 1e-8);
+    assertTrue(polygon.isWithin(point1) == largePolygon.isWithin(point1));
+    //This point is ok
+    point1 = new GeoPoint(PlanetModel.SPHERE, 0, 1e-5);
+    assertTrue(polygon.isWithin(point1) == largePolygon.isWithin(point1));
+    //Fails here
+    point1 = new GeoPoint(PlanetModel.SPHERE, 0, 1e-7);
+    assertTrue(polygon.isWithin(point1) == largePolygon.isWithin(point1));
+  }
+
+  @Ignore
+  @Test
+  public void testComplexPolygonDegeneratedVector() {
+    List<GeoPoint> points = new ArrayList<>();
+    points.add(new GeoPoint(PlanetModel.SPHERE, -0.5, -0.5));
+    points.add(new GeoPoint(PlanetModel.SPHERE, -0.5, 0.5));
+    points.add(new GeoPoint(PlanetModel.SPHERE, 0.5, 0.5));
+    points.add(new GeoPoint(PlanetModel.SPHERE,0.5, -0.5));
+    final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points);
+    //Large polygon with test point in (0,0)
+    final GeoPolygon largePolygon = new GeoComplexPolygon(PlanetModel.SPHERE, Collections.singletonList(points), new GeoPoint(PlanetModel.SPHERE, 0.0, 0), true);
+    //Chooses Plane Z and succeed
+    final GeoPoint point1 = new GeoPoint(PlanetModel.SPHERE, 0, 1e-5);
+    assertTrue(polygon.isWithin(point1) == largePolygon.isWithin(point1));
+    //Numerically identical
+    final GeoPoint point2 = new GeoPoint(PlanetModel.SPHERE, 0, 1e-13);
+    assertTrue(polygon.isWithin(point2) == largePolygon.isWithin(point2));
+    //Fails here, chooses plane X
+    final GeoPoint point3 = new GeoPoint(PlanetModel.SPHERE, 0, 1e-6);
+    assertTrue(polygon.isWithin(point3) == largePolygon.isWithin(point3));
+  }
   
 }


[23/50] lucene-solr:jira/solr-12181: SOLR-12183: Refactor Streaming Expression test cases

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/80375acb/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
index 7e8b6c6..4d88b4e 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
@@ -25,8 +25,6 @@ import java.util.List;
 import java.util.Locale;
 import java.util.Map;
 
-import org.apache.commons.math3.distribution.NormalDistribution;
-import org.apache.commons.math3.stat.inference.KolmogorovSmirnovTest;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
@@ -35,20 +33,6 @@ import org.apache.solr.client.solrj.io.SolrClientCache;
 import org.apache.solr.client.solrj.io.Tuple;
 import org.apache.solr.client.solrj.io.comp.ComparatorOrder;
 import org.apache.solr.client.solrj.io.comp.FieldComparator;
-import org.apache.solr.client.solrj.io.eval.AddEvaluator;
-import org.apache.solr.client.solrj.io.eval.AndEvaluator;
-import org.apache.solr.client.solrj.io.eval.EqualToEvaluator;
-import org.apache.solr.client.solrj.io.eval.GreaterThanEqualToEvaluator;
-import org.apache.solr.client.solrj.io.eval.GreaterThanEvaluator;
-import org.apache.solr.client.solrj.io.eval.IfThenElseEvaluator;
-import org.apache.solr.client.solrj.io.eval.LessThanEqualToEvaluator;
-import org.apache.solr.client.solrj.io.eval.LessThanEvaluator;
-import org.apache.solr.client.solrj.io.eval.NotEvaluator;
-import org.apache.solr.client.solrj.io.eval.OrEvaluator;
-import org.apache.solr.client.solrj.io.eval.RawValueEvaluator;
-import org.apache.solr.client.solrj.io.ops.ConcatOperation;
-import org.apache.solr.client.solrj.io.ops.GroupOperation;
-import org.apache.solr.client.solrj.io.ops.ReplaceOperation;
 import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
 import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionParser;
 import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
@@ -68,12 +52,6 @@ import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-/**
- *  All base tests will be done with CloudSolrStream. Under the covers CloudSolrStream uses SolrStream so
- *  SolrStream will get fully exercised through these tests.
- *
- **/
-
 @Slow
 @LuceneTestCase.SuppressCodecs({"Lucene3x", "Lucene40","Lucene41","Lucene42","Lucene45"})
 public class StreamExpressionTest extends SolrCloudTestCase {
@@ -293,9 +271,6 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     }
   }
 
-
-
-
   @Test
   public void testCloudSolrStreamWithZkHost() throws Exception {
 
@@ -422,197 +397,6 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     assertOrder(tuples, 0, 2, 1, 3, 4);
   }
 
-  @Test
-  public void testUniqueStream() throws Exception {
-
-    new UpdateRequest()
-        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0")
-        .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0")
-        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
-        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
-        .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1")
-        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
-
-    StreamExpression expression;
-    TupleStream stream;
-    List<Tuple> tuples;
-    StreamContext streamContext = new StreamContext();
-    SolrClientCache solrClientCache = new SolrClientCache();
-    streamContext.setSolrClientCache(solrClientCache);
-
-    StreamFactory factory = new StreamFactory()
-      .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
-      .withFunctionName("search", CloudSolrStream.class)
-      .withFunctionName("unique", UniqueStream.class);
-
-    try {
-      // Basic test
-      expression = StreamExpressionParser.parse("unique(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\"), over=\"a_f\")");
-      stream = new UniqueStream(expression, factory);
-      stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
-
-      assert (tuples.size() == 4);
-      assertOrder(tuples, 0, 1, 3, 4);
-
-      // Basic test desc
-      expression = StreamExpressionParser.parse("unique(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f desc, a_i desc\"), over=\"a_f\")");
-      stream = new UniqueStream(expression, factory);
-      stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
-
-      assert (tuples.size() == 4);
-      assertOrder(tuples, 4, 3, 1, 2);
-
-      // Basic w/multi comp
-      expression = StreamExpressionParser.parse("unique(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\"), over=\"a_f, a_i\")");
-      stream = new UniqueStream(expression, factory);
-      stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
-
-      assert (tuples.size() == 5);
-      assertOrder(tuples, 0, 2, 1, 3, 4);
-
-      // full factory w/multi comp
-      stream = factory.constructStream("unique(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\"), over=\"a_f, a_i\")");
-      stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
-
-      assert (tuples.size() == 5);
-      assertOrder(tuples, 0, 2, 1, 3, 4);
-    } finally {
-      solrClientCache.close();
-    }
-  }
-
-  @Test
-  public void testSortStream() throws Exception {
-
-    new UpdateRequest()
-        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0")
-        .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0")
-        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
-        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
-        .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1")
-        .add(id, "5", "a_s", "hello1", "a_i", "1", "a_f", "2")
-        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
-
-    StreamExpression expression;
-    TupleStream stream;
-    List<Tuple> tuples;
-    StreamContext streamContext = new StreamContext();
-    SolrClientCache solrClientCache = new SolrClientCache();
-    streamContext.setSolrClientCache(solrClientCache);
-    try {
-      StreamFactory factory = new StreamFactory()
-          .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
-          .withFunctionName("search", CloudSolrStream.class)
-          .withFunctionName("sort", SortStream.class);
-
-      // Basic test
-      stream = factory.constructStream("sort(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), by=\"a_i asc\")");
-      stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
-      assert (tuples.size() == 6);
-      assertOrder(tuples, 0, 1, 5, 2, 3, 4);
-
-      // Basic test desc
-      stream = factory.constructStream("sort(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), by=\"a_i desc\")");
-      stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
-      assert (tuples.size() == 6);
-      assertOrder(tuples, 4, 3, 2, 1, 5, 0);
-
-      // Basic w/multi comp
-      stream = factory.constructStream("sort(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), by=\"a_i asc, a_f desc\")");
-      stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
-      assert (tuples.size() == 6);
-      assertOrder(tuples, 0, 5, 1, 2, 3, 4);
-    } finally {
-      solrClientCache.close();
-    }
-  }
-
-
-  @Test
-  public void testNullStream() throws Exception {
-
-    new UpdateRequest()
-        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0")
-        .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0")
-        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
-        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
-        .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1")
-        .add(id, "5", "a_s", "hello1", "a_i", "1", "a_f", "2")
-        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
-
-    StreamExpression expression;
-    TupleStream stream;
-    List<Tuple> tuples;
-    StreamContext streamContext = new StreamContext();
-    SolrClientCache solrClientCache = new SolrClientCache();
-    streamContext.setSolrClientCache(solrClientCache);
-    StreamFactory factory = new StreamFactory()
-        .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
-        .withFunctionName("search", CloudSolrStream.class)
-        .withFunctionName("null", NullStream.class);
-
-    try {
-      // Basic test
-      stream = factory.constructStream("null(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), by=\"a_i asc\")");
-      stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
-      assertTrue(tuples.size() == 1);
-      assertTrue(tuples.get(0).getLong("nullCount") == 6);
-    } finally {
-      solrClientCache.close();
-    }
-  }
-
-
-  @Test
-  public void testParallelNullStream() throws Exception {
-
-    new UpdateRequest()
-        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0")
-        .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0")
-        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
-        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
-        .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1")
-        .add(id, "5", "a_s", "hello1", "a_i", "1", "a_f", "2")
-        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
-
-    StreamExpression expression;
-    TupleStream stream;
-    List<Tuple> tuples;
-    StreamContext streamContext = new StreamContext();
-    SolrClientCache solrClientCache = new SolrClientCache();
-    streamContext.setSolrClientCache(solrClientCache);
-
-    StreamFactory factory = new StreamFactory()
-        .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
-        .withFunctionName("search", CloudSolrStream.class)
-        .withFunctionName("null", NullStream.class)
-        .withFunctionName("parallel", ParallelStream.class);
-
-    try {
-
-      // Basic test
-      stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"nullCount desc\", null(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=id), by=\"a_i asc\"))");
-      stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
-      assertTrue(tuples.size() == 2);
-      long nullCount = 0;
-      for (Tuple t : tuples) {
-        nullCount += t.getLong("nullCount");
-      }
-
-      assertEquals(nullCount, 6L);
-    } finally {
-      solrClientCache.close();
-    }
-  }
 
   @Test
   public void testNulls() throws Exception {
@@ -684,174 +468,7 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     }
   }
 
-  @Test
-  public void testMergeStream() throws Exception {
-
-    new UpdateRequest()
-        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0")
-        .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0")
-        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
-        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
-        .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1")
-        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
-
-    StreamExpression expression;
-    TupleStream stream;
-    List<Tuple> tuples;
-    
-    StreamFactory factory = new StreamFactory()
-      .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
-      .withFunctionName("search", CloudSolrStream.class)
-      .withFunctionName("unique", UniqueStream.class)
-      .withFunctionName("merge", MergeStream.class);
-    
-    // Basic test
-    expression = StreamExpressionParser.parse("merge("
-        + "search(" + COLLECTIONORALIAS + ", q=\"id:(0 3 4)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"),"
-        + "search(" + COLLECTIONORALIAS + ", q=\"id:(1)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"),"
-        + "on=\"a_f asc\")");
-
-    stream = new MergeStream(expression, factory);
-    StreamContext streamContext = new StreamContext();
-    SolrClientCache solrClientCache = new SolrClientCache();
-    streamContext.setSolrClientCache(solrClientCache);
-    try {
-      stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
-
-      assert (tuples.size() == 4);
-      assertOrder(tuples, 0, 1, 3, 4);
-
-      // Basic test desc
-      expression = StreamExpressionParser.parse("merge("
-          + "search(" + COLLECTIONORALIAS + ", q=\"id:(0 3 4)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f desc\"),"
-          + "search(" + COLLECTIONORALIAS + ", q=\"id:(1)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f desc\"),"
-          + "on=\"a_f desc\")");
-      stream = new MergeStream(expression, factory);
-      stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
-
-      assert (tuples.size() == 4);
-      assertOrder(tuples, 4, 3, 1, 0);
-
-      // Basic w/multi comp
-      expression = StreamExpressionParser.parse("merge("
-          + "search(" + COLLECTIONORALIAS + ", q=\"id:(0 3 4)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\"),"
-          + "search(" + COLLECTIONORALIAS + ", q=\"id:(1 2)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\"),"
-          + "on=\"a_f asc, a_s asc\")");
-      stream = new MergeStream(expression, factory);
-      stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
-
-      assert (tuples.size() == 5);
-      assertOrder(tuples, 0, 2, 1, 3, 4);
-
-      // full factory w/multi comp
-      stream = factory.constructStream("merge("
-          + "search(" + COLLECTIONORALIAS + ", q=\"id:(0 3 4)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\"),"
-          + "search(" + COLLECTIONORALIAS + ", q=\"id:(1 2)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\"),"
-          + "on=\"a_f asc, a_s asc\")");
-      stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
-
-      assert (tuples.size() == 5);
-      assertOrder(tuples, 0, 2, 1, 3, 4);
-
-      // full factory w/multi streams
-      stream = factory.constructStream("merge("
-          + "search(" + COLLECTIONORALIAS + ", q=\"id:(0 4)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\"),"
-          + "search(" + COLLECTIONORALIAS + ", q=\"id:(1)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\"),"
-          + "search(" + COLLECTIONORALIAS + ", q=\"id:(2)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\"),"
-          + "on=\"a_f asc\")");
-      stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
-
-      assert (tuples.size() == 4);
-      assertOrder(tuples, 0, 2, 1, 4);
-    } finally {
-      solrClientCache.close();
-    }
-  }
-
-  @Test
-  public void testRankStream() throws Exception {
-
-    new UpdateRequest()
-        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0")
-        .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0")
-        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
-        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
-        .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1")
-        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
-
-    StreamExpression expression;
-    TupleStream stream;
-    List<Tuple> tuples;
-    StreamContext streamContext = new StreamContext();
-    SolrClientCache solrClientCache = new SolrClientCache();
-    streamContext.setSolrClientCache(solrClientCache);
-
-    StreamFactory factory = new StreamFactory()
-      .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
-      .withFunctionName("search", CloudSolrStream.class)
-      .withFunctionName("unique", UniqueStream.class)
-      .withFunctionName("top", RankStream.class);
-    try {
-      // Basic test
-      expression = StreamExpressionParser.parse("top("
-          + "n=3,"
-          + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\"),"
-          + "sort=\"a_f asc, a_i asc\")");
-      stream = new RankStream(expression, factory);
-      stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
-
-      assert (tuples.size() == 3);
-      assertOrder(tuples, 0, 2, 1);
-
-      // Basic test desc
-      expression = StreamExpressionParser.parse("top("
-          + "n=2,"
-          + "unique("
-          + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f desc\"),"
-          + "over=\"a_f\"),"
-          + "sort=\"a_f desc\")");
-      stream = new RankStream(expression, factory);
-      stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
-
-      assert (tuples.size() == 2);
-      assertOrder(tuples, 4, 3);
-
-      // full factory
-      stream = factory.constructStream("top("
-          + "n=4,"
-          + "unique("
-          + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\"),"
-          + "over=\"a_f\"),"
-          + "sort=\"a_f asc\")");
-      stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
-
-      assert (tuples.size() == 4);
-      assertOrder(tuples, 0, 1, 3, 4);
-
-      // full factory, switch order
-      stream = factory.constructStream("top("
-          + "n=4,"
-          + "unique("
-          + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f desc, a_i desc\"),"
-          + "over=\"a_f\"),"
-          + "sort=\"a_f asc\")");
-      stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
 
-      assert (tuples.size() == 4);
-      assertOrder(tuples, 2, 1, 3, 4);
-    } finally {
-      solrClientCache.close();
-    }
-  }
 
   @Test
   public void testRandomStream() throws Exception {
@@ -983,8 +600,16 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     }
   }
 
+
+
+
+
+
+
+
+
   @Test
-  public void testReducerStream() throws Exception {
+  public void testStatsStream() throws Exception {
 
     new UpdateRequest()
         .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1")
@@ -998,8216 +623,1105 @@ public class StreamExpressionTest extends SolrCloudTestCase {
         .add(id, "8", "a_s", "hello3", "a_i", "13", "a_f", "9")
         .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10")
         .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
-    
-    StreamExpression expression;
-    TupleStream stream;
-    List<Tuple> tuples;
-    Tuple t0, t1, t2;
-    List<Map> maps0, maps1, maps2;
-    StreamContext streamContext = new StreamContext();
-    SolrClientCache solrClientCache = new SolrClientCache();
-    streamContext.setSolrClientCache(solrClientCache);
 
     StreamFactory factory = new StreamFactory()
-        .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
-        .withFunctionName("search", CloudSolrStream.class)
-        .withFunctionName("reduce", ReducerStream.class)
-        .withFunctionName("group", GroupOperation.class);
-
+    .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
+    .withFunctionName("stats", StatsStream.class)
+    .withFunctionName("sum", SumMetric.class)
+    .withFunctionName("min", MinMetric.class)
+    .withFunctionName("max", MaxMetric.class)
+    .withFunctionName("avg", MeanMetric.class)
+    .withFunctionName("count", CountMetric.class);     
+  
+    StreamExpression expression;
+    TupleStream stream;
+    List<Tuple> tuples;
+    StreamContext streamContext = new StreamContext();
+    SolrClientCache cache = new SolrClientCache();
     try {
-      // basic
-      expression = StreamExpressionParser.parse("reduce("
-          + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_s asc, a_f asc\"),"
-          + "by=\"a_s\","
-          + "group(sort=\"a_f desc\", n=\"4\"))");
-
+      streamContext.setSolrClientCache(cache);
+      String expr = "stats(" + COLLECTIONORALIAS + ", q=*:*, sum(a_i), sum(a_f), min(a_i), min(a_f), max(a_i), max(a_f), avg(a_i), avg(a_f), count(*))";
+      expression = StreamExpressionParser.parse(expr);
       stream = factory.constructStream(expression);
       stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
-
-      assert (tuples.size() == 3);
-
-      t0 = tuples.get(0);
-      maps0 = t0.getMaps("group");
-      assertMaps(maps0, 9, 1, 2, 0);
-
-      t1 = tuples.get(1);
-      maps1 = t1.getMaps("group");
-      assertMaps(maps1, 8, 7, 5, 3);
 
-
-      t2 = tuples.get(2);
-      maps2 = t2.getMaps("group");
-      assertMaps(maps2, 6, 4);
-
-      // basic w/spaces
-      expression = StreamExpressionParser.parse("reduce("
-          + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_s asc, a_f       asc\"),"
-          + "by=\"a_s\"," +
-          "group(sort=\"a_i asc\", n=\"2\"))");
-      stream = factory.constructStream(expression);
-      stream.setStreamContext(streamContext);
       tuples = getTuples(stream);
 
-      assert (tuples.size() == 3);
-
-      t0 = tuples.get(0);
-      maps0 = t0.getMaps("group");
-      assert (maps0.size() == 2);
-
-      assertMaps(maps0, 0, 1);
-
-      t1 = tuples.get(1);
-      maps1 = t1.getMaps("group");
-      assertMaps(maps1, 3, 5);
-
-      t2 = tuples.get(2);
-      maps2 = t2.getMaps("group");
-      assertMaps(maps2, 4, 6);
-    } finally {
-      solrClientCache.close();
-    }
-  }
-
-
-  @Test
-  public void testHavingStream() throws Exception {
-
-    SolrClientCache solrClientCache = new SolrClientCache();
-
-    new UpdateRequest()
-        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1", "subject", "blah blah blah 0")
-        .add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2", "subject", "blah blah blah 2")
-        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3", "subject", "blah blah blah 3")
-        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4", "subject", "blah blah blah 4")
-        .add(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5", "subject", "blah blah blah 1")
-        .add(id, "5", "a_s", "hello3", "a_i", "5", "a_f", "6", "subject", "blah blah blah 5")
-        .add(id, "6", "a_s", "hello4", "a_i", "6", "a_f", "7", "subject", "blah blah blah 6")
-        .add(id, "7", "a_s", "hello3", "a_i", "7", "a_f", "8", "subject", "blah blah blah 7")
-        .add(id, "8", "a_s", "hello3", "a_i", "8", "a_f", "9", "subject", "blah blah blah 8")
-        .add(id, "9", "a_s", "hello0", "a_i", "9", "a_f", "10", "subject", "blah blah blah 9")
-        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+      assert (tuples.size() == 1);
 
-    TupleStream stream;
-    List<Tuple> tuples;
+      //Test Long and Double Sums
 
-    StreamFactory factory = new StreamFactory()
-        .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
-        .withFunctionName("search", CloudSolrStream.class)
-        .withFunctionName("having", HavingStream.class)
-        .withFunctionName("rollup", RollupStream.class)
-        .withFunctionName("sum", SumMetric.class)
-        .withFunctionName("and", AndEvaluator.class)
-        .withFunctionName("or", OrEvaluator.class)
-        .withFunctionName("not", NotEvaluator.class)
-        .withFunctionName("gt", GreaterThanEvaluator.class)
-        .withFunctionName("lt", LessThanEvaluator.class)
-        .withFunctionName("eq", EqualToEvaluator.class)
-        .withFunctionName("lteq", LessThanEqualToEvaluator.class)
-        .withFunctionName("gteq", GreaterThanEqualToEvaluator.class);
-
-    stream = factory.constructStream("having(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), eq(a_i, 9))");
-    StreamContext context = new StreamContext();
-    context.setSolrClientCache(solrClientCache);
-    stream.setStreamContext(context);
-    tuples = getTuples(stream);
+      Tuple tuple = tuples.get(0);
 
-    assert(tuples.size() == 1);
-    Tuple t = tuples.get(0);
-    assertTrue(t.getString("id").equals("9"));
+      Double sumi = tuple.getDouble("sum(a_i)");
+      Double sumf = tuple.getDouble("sum(a_f)");
+      Double mini = tuple.getDouble("min(a_i)");
+      Double minf = tuple.getDouble("min(a_f)");
+      Double maxi = tuple.getDouble("max(a_i)");
+      Double maxf = tuple.getDouble("max(a_f)");
+      Double avgi = tuple.getDouble("avg(a_i)");
+      Double avgf = tuple.getDouble("avg(a_f)");
+      Double count = tuple.getDouble("count(*)");
 
-    stream = factory.constructStream("having(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), and(eq(a_i, 9),lt(a_i, 10)))");
-    context = new StreamContext();
-    context.setSolrClientCache(solrClientCache);
-    stream.setStreamContext(context);
-    tuples = getTuples(stream);
+      assertTrue(sumi.longValue() == 70);
+      assertTrue(sumf.doubleValue() == 55.0D);
+      assertTrue(mini.doubleValue() == 0.0D);
+      assertTrue(minf.doubleValue() == 1.0D);
+      assertTrue(maxi.doubleValue() == 14.0D);
+      assertTrue(maxf.doubleValue() == 10.0D);
+      assertTrue(avgi.doubleValue() == 7.0D);
+      assertTrue(avgf.doubleValue() == 5.5D);
+      assertTrue(count.doubleValue() == 10);
 
-    assert(tuples.size() == 1);
-    t = tuples.get(0);
-    assertTrue(t.getString("id").equals("9"));
 
-    stream = factory.constructStream("having(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), or(eq(a_i, 9),eq(a_i, 8)))");
-    context = new StreamContext();
-    context.setSolrClientCache(solrClientCache);
-    stream.setStreamContext(context);
-    tuples = getTuples(stream);
+      //Test with shards parameter
+      List<String> shardUrls = TupleStream.getShards(cluster.getZkServer().getZkAddress(), COLLECTIONORALIAS, streamContext);
+      expr = "stats(myCollection, q=*:*, sum(a_i), sum(a_f), min(a_i), min(a_f), max(a_i), max(a_f), avg(a_i), avg(a_f), count(*))";
+      Map<String, List<String>> shardsMap = new HashMap();
+      shardsMap.put("myCollection", shardUrls);
+      StreamContext context = new StreamContext();
+      context.put("shards", shardsMap);
+      context.setSolrClientCache(cache);
+      stream = factory.constructStream(expr);
+      stream.setStreamContext(context);
 
-    assert(tuples.size() == 2);
-    t = tuples.get(0);
-    assertTrue(t.getString("id").equals("8"));
+      tuples = getTuples(stream);
 
-    t = tuples.get(1);
-    assertTrue(t.getString("id").equals("9"));
+      assert (tuples.size() == 1);
 
+      //Test Long and Double Sums
 
-    stream = factory.constructStream("having(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), and(eq(a_i, 9),not(eq(a_i, 9))))");
-    context = new StreamContext();
-    context.setSolrClientCache(solrClientCache);
-    stream.setStreamContext(context);
-    tuples = getTuples(stream);
+      tuple = tuples.get(0);
 
-    assert(tuples.size() == 0);
+      sumi = tuple.getDouble("sum(a_i)");
+      sumf = tuple.getDouble("sum(a_f)");
+      mini = tuple.getDouble("min(a_i)");
+      minf = tuple.getDouble("min(a_f)");
+      maxi = tuple.getDouble("max(a_i)");
+      maxf = tuple.getDouble("max(a_f)");
+      avgi = tuple.getDouble("avg(a_i)");
+      avgf = tuple.getDouble("avg(a_f)");
+      count = tuple.getDouble("count(*)");
 
+      assertTrue(sumi.longValue() == 70);
+      assertTrue(sumf.doubleValue() == 55.0D);
+      assertTrue(mini.doubleValue() == 0.0D);
+      assertTrue(minf.doubleValue() == 1.0D);
+      assertTrue(maxi.doubleValue() == 14.0D);
+      assertTrue(maxf.doubleValue() == 10.0D);
+      assertTrue(avgi.doubleValue() == 7.0D);
+      assertTrue(avgf.doubleValue() == 5.5D);
+      assertTrue(count.doubleValue() == 10);
 
-    stream = factory.constructStream("having(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), and(lteq(a_i, 9), gteq(a_i, 8)))");
-    context = new StreamContext();
-    context.setSolrClientCache(solrClientCache);
-    stream.setStreamContext(context);
-    tuples = getTuples(stream);
+      //Execersise the /stream hander
 
-    assert(tuples.size() == 2);
+      //Add the shards http parameter for the myCollection
+      StringBuilder buf = new StringBuilder();
+      for (String shardUrl : shardUrls) {
+        if (buf.length() > 0) {
+          buf.append(",");
+        }
+        buf.append(shardUrl);
+      }
 
-    t = tuples.get(0);
-    assertTrue(t.getString("id").equals("8"));
+      ModifiableSolrParams solrParams = new ModifiableSolrParams();
+      solrParams.add("qt", "/stream");
+      solrParams.add("expr", expr);
+      solrParams.add("myCollection.shards", buf.toString());
+      SolrStream solrStream = new SolrStream(shardUrls.get(0), solrParams);
+      tuples = getTuples(solrStream);
+      assert (tuples.size() == 1);
 
-    t = tuples.get(1);
-    assertTrue(t.getString("id").equals("9"));
+      tuple =tuples.get(0);
 
-    stream = factory.constructStream("having(rollup(over=a_f, sum(a_i), search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\")), and(eq(sum(a_i), 9),eq(sum(a_i), 9)))");
-    context = new StreamContext();
-    context.setSolrClientCache(solrClientCache);
-    stream.setStreamContext(context);
-    tuples = getTuples(stream);
+      sumi = tuple.getDouble("sum(a_i)");
+      sumf = tuple.getDouble("sum(a_f)");
+      mini = tuple.getDouble("min(a_i)");
+      minf = tuple.getDouble("min(a_f)");
+      maxi = tuple.getDouble("max(a_i)");
+      maxf = tuple.getDouble("max(a_f)");
+      avgi = tuple.getDouble("avg(a_i)");
+      avgf = tuple.getDouble("avg(a_f)");
+      count = tuple.getDouble("count(*)");
 
-    assert(tuples.size() == 1);
-    t = tuples.get(0);
-    assertTrue(t.getDouble("a_f") == 10.0D);
+      assertTrue(sumi.longValue() == 70);
+      assertTrue(sumf.doubleValue() == 55.0D);
+      assertTrue(mini.doubleValue() == 0.0D);
+      assertTrue(minf.doubleValue() == 1.0D);
+      assertTrue(maxi.doubleValue() == 14.0D);
+      assertTrue(maxf.doubleValue() == 10.0D);
+      assertTrue(avgi.doubleValue() == 7.0D);
+      assertTrue(avgf.doubleValue() == 5.5D);
+      assertTrue(count.doubleValue() == 10);
+      //Add a negative test to prove that it cannot find slices if shards parameter is removed
 
-    solrClientCache.close();
+      try {
+        ModifiableSolrParams solrParamsBad = new ModifiableSolrParams();
+        solrParamsBad.add("qt", "/stream");
+        solrParamsBad.add("expr", expr);
+        solrStream = new SolrStream(shardUrls.get(0), solrParamsBad);
+        tuples = getTuples(solrStream);
+        throw new Exception("Exception should have been thrown above");
+      } catch (IOException e) {
+        assertTrue(e.getMessage().contains("Collection not found: myCollection"));
+      }
+    } finally {
+      cache.close();
+    }
   }
 
-
   @Test
-  public void testParallelHavingStream() throws Exception {
-
-    SolrClientCache solrClientCache = new SolrClientCache();
+  public void testFacetStream() throws Exception {
 
     new UpdateRequest()
-        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1", "subject", "blah blah blah 0")
-        .add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2", "subject", "blah blah blah 2")
-        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3", "subject", "blah blah blah 3")
-        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4", "subject", "blah blah blah 4")
-        .add(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5", "subject", "blah blah blah 1")
-        .add(id, "5", "a_s", "hello3", "a_i", "5", "a_f", "6", "subject", "blah blah blah 5")
-        .add(id, "6", "a_s", "hello4", "a_i", "6", "a_f", "7", "subject", "blah blah blah 6")
-        .add(id, "7", "a_s", "hello3", "a_i", "7", "a_f", "8", "subject", "blah blah blah 7")
-        .add(id, "8", "a_s", "hello3", "a_i", "8", "a_f", "9", "subject", "blah blah blah 8")
-        .add(id, "9", "a_s", "hello0", "a_i", "9", "a_f", "10", "subject", "blah blah blah 9")
+        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1")
+        .add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2")
+        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
+        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
+        .add(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5")
+        .add(id, "5", "a_s", "hello3", "a_i", "10", "a_f", "6")
+        .add(id, "6", "a_s", "hello4", "a_i", "11", "a_f", "7")
+        .add(id, "7", "a_s", "hello3", "a_i", "12", "a_f", "8")
+        .add(id, "8", "a_s", "hello3", "a_i", "13", "a_f", "9")
+        .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10")
         .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
-
+    
+    String clause;
     TupleStream stream;
     List<Tuple> tuples;
-
+    
     StreamFactory factory = new StreamFactory()
-        .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
-        .withFunctionName("search", CloudSolrStream.class)
-        .withFunctionName("having", HavingStream.class)
-        .withFunctionName("rollup", RollupStream.class)
-        .withFunctionName("sum", SumMetric.class)
-        .withFunctionName("and", AndEvaluator.class)
-        .withFunctionName("or", OrEvaluator.class)
-        .withFunctionName("not", NotEvaluator.class)
-        .withFunctionName("gt", GreaterThanEvaluator.class)
-        .withFunctionName("lt", LessThanEvaluator.class)
-        .withFunctionName("eq", EqualToEvaluator.class)
-        .withFunctionName("lteq", LessThanEqualToEvaluator.class)
-        .withFunctionName("gteq", GreaterThanEqualToEvaluator.class)
-        .withFunctionName("val", RawValueEvaluator.class)
-        .withFunctionName("parallel", ParallelStream.class);
-
-    stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"a_f asc\", having(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=id), eq(a_i, 9)))");
-    StreamContext context = new StreamContext();
-    context.setSolrClientCache(solrClientCache);
-    stream.setStreamContext(context);
-    tuples = getTuples(stream);
-
-    assert(tuples.size() == 1);
-    Tuple t = tuples.get(0);
-    assertTrue(t.getString("id").equals("9"));
-
-    stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"a_f asc\", having(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=id), and(eq(a_i, 9),lt(a_i, 10))))");
-    context = new StreamContext();
-    context.setSolrClientCache(solrClientCache);
-    stream.setStreamContext(context);
+      .withCollectionZkHost("collection1", cluster.getZkServer().getZkAddress())
+      .withFunctionName("facet", FacetStream.class)
+      .withFunctionName("sum", SumMetric.class)
+      .withFunctionName("min", MinMetric.class)
+      .withFunctionName("max", MaxMetric.class)
+      .withFunctionName("avg", MeanMetric.class)
+      .withFunctionName("count", CountMetric.class);
+    
+    // Basic test
+    clause = "facet("
+              +   "collection1, "
+              +   "q=\"*:*\", "
+              +   "fl=\"a_s,a_i,a_f\", "
+              +   "sort=\"a_s asc\", "
+              +   "buckets=\"a_s\", "
+              +   "bucketSorts=\"sum(a_i) asc\", "
+              +   "bucketSizeLimit=100, "
+              +   "sum(a_i), sum(a_f), "
+              +   "min(a_i), min(a_f), "
+              +   "max(a_i), max(a_f), "
+              +   "avg(a_i), avg(a_f), "
+              +   "count(*)"
+              + ")";
+    
+    stream = factory.constructStream(clause);
     tuples = getTuples(stream);
 
-    assert(tuples.size() == 1);
-    t = tuples.get(0);
-    assertTrue(t.getString("id").equals("9"));
+    assert(tuples.size() == 3);
 
-    stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"a_f asc\",having(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=id), or(eq(a_i, 9),eq(a_i, 8))))");
-    context = new StreamContext();
-    context.setSolrClientCache(solrClientCache);
-    stream.setStreamContext(context);
-    tuples = getTuples(stream);
+    //Test Long and Double Sums
 
-    assert(tuples.size() == 2);
-    t = tuples.get(0);
-    assertTrue(t.getString("id").equals("8"));
+    Tuple tuple = tuples.get(0);
+    String bucket = tuple.getString("a_s");
+    Double sumi = tuple.getDouble("sum(a_i)");
+    Double sumf = tuple.getDouble("sum(a_f)");
+    Double mini = tuple.getDouble("min(a_i)");
+    Double minf = tuple.getDouble("min(a_f)");
+    Double maxi = tuple.getDouble("max(a_i)");
+    Double maxf = tuple.getDouble("max(a_f)");
+    Double avgi = tuple.getDouble("avg(a_i)");
+    Double avgf = tuple.getDouble("avg(a_f)");
+    Double count = tuple.getDouble("count(*)");
 
-    t = tuples.get(1);
-    assertTrue(t.getString("id").equals("9"));
+    assertTrue(bucket.equals("hello4"));
+    assertTrue(sumi.longValue() == 15);
+    assertTrue(sumf.doubleValue() == 11.0D);
+    assertTrue(mini.doubleValue() == 4.0D);
+    assertTrue(minf.doubleValue() == 4.0D);
+    assertTrue(maxi.doubleValue() == 11.0D);
+    assertTrue(maxf.doubleValue() == 7.0D);
+    assertTrue(avgi.doubleValue() == 7.5D);
+    assertTrue(avgf.doubleValue() == 5.5D);
+    assertTrue(count.doubleValue() == 2);
 
+    tuple = tuples.get(1);
+    bucket = tuple.getString("a_s");
+    sumi = tuple.getDouble("sum(a_i)");
+    sumf = tuple.getDouble("sum(a_f)");
+    mini = tuple.getDouble("min(a_i)");
+    minf = tuple.getDouble("min(a_f)");
+    maxi = tuple.getDouble("max(a_i)");
+    maxf = tuple.getDouble("max(a_f)");
+    avgi = tuple.getDouble("avg(a_i)");
+    avgf = tuple.getDouble("avg(a_f)");
+    count = tuple.getDouble("count(*)");
 
-    stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"a_f asc\", having(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=id), and(eq(a_i, 9),not(eq(a_i, 9)))))");
-    context = new StreamContext();
-    context.setSolrClientCache(solrClientCache);
-    stream.setStreamContext(context);
-    tuples = getTuples(stream);
-
-    assert(tuples.size() == 0);
+    assertTrue(bucket.equals("hello0"));
+    assertTrue(sumi.doubleValue() == 17.0D);
+    assertTrue(sumf.doubleValue() == 18.0D);
+    assertTrue(mini.doubleValue() == 0.0D);
+    assertTrue(minf.doubleValue() == 1.0D);
+    assertTrue(maxi.doubleValue() == 14.0D);
+    assertTrue(maxf.doubleValue() == 10.0D);
+    assertTrue(avgi.doubleValue() == 4.25D);
+    assertTrue(avgf.doubleValue() == 4.5D);
+    assertTrue(count.doubleValue() == 4);
 
+    tuple = tuples.get(2);
+    bucket = tuple.getString("a_s");
+    sumi = tuple.getDouble("sum(a_i)");
+    sumf = tuple.getDouble("sum(a_f)");
+    mini = tuple.getDouble("min(a_i)");
+    minf = tuple.getDouble("min(a_f)");
+    maxi = tuple.getDouble("max(a_i)");
+    maxf = tuple.getDouble("max(a_f)");
+    avgi = tuple.getDouble("avg(a_i)");
+    avgf = tuple.getDouble("avg(a_f)");
+    count = tuple.getDouble("count(*)");
 
-    stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"a_f asc\",having(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=id), and(lteq(a_i, 9), gteq(a_i, 8))))");
-    context = new StreamContext();
-    context.setSolrClientCache(solrClientCache);
-    stream.setStreamContext(context);
-    tuples = getTuples(stream);
+    assertTrue(bucket.equals("hello3"));
+    assertTrue(sumi.doubleValue() == 38.0D);
+    assertTrue(sumf.doubleValue() == 26.0D);
+    assertTrue(mini.doubleValue() == 3.0D);
+    assertTrue(minf.doubleValue() == 3.0D);
+    assertTrue(maxi.doubleValue() == 13.0D);
+    assertTrue(maxf.doubleValue() == 9.0D);
+    assertTrue(avgi.doubleValue() == 9.5D);
+    assertTrue(avgf.doubleValue() == 6.5D);
+    assertTrue(count.doubleValue() == 4);
 
-    assert(tuples.size() == 2);
 
-    t = tuples.get(0);
-    assertTrue(t.getString("id").equals("8"));
+    //Reverse the Sort.
 
-    t = tuples.get(1);
-    assertTrue(t.getString("id").equals("9"));
+    clause = "facet("
+        +   "collection1, "
+        +   "q=\"*:*\", "
+        +   "fl=\"a_s,a_i,a_f\", "
+        +   "sort=\"a_s asc\", "
+        +   "buckets=\"a_s\", "
+        +   "bucketSorts=\"sum(a_i) desc\", "
+        +   "bucketSizeLimit=100, "
+        +   "sum(a_i), sum(a_f), "
+        +   "min(a_i), min(a_f), "
+        +   "max(a_i), max(a_f), "
+        +   "avg(a_i), avg(a_f), "
+        +   "count(*)"
+        + ")";
 
-    stream = factory.constructStream("parallel("+COLLECTIONORALIAS+", workers=2, sort=\"a_f asc\", having(rollup(over=a_f, sum(a_i), search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=a_f)), and(eq(sum(a_i), 9),eq(sum(a_i),9))))");
-    context = new StreamContext();
-    context.setSolrClientCache(solrClientCache);
-    stream.setStreamContext(context);
+    stream = factory.constructStream(clause);
     tuples = getTuples(stream);
 
-    assert(tuples.size() == 1);
 
-    t = tuples.get(0);
-    assertTrue(t.getDouble("a_f") == 10.0D);
-
-    solrClientCache.close();
-  }
+    //Test Long and Double Sums
 
-  @Test
-  public void testFetchStream() throws Exception {
+    tuple = tuples.get(0);
+    bucket = tuple.getString("a_s");
+    sumi = tuple.getDouble("sum(a_i)");
+    sumf = tuple.getDouble("sum(a_f)");
+    mini = tuple.getDouble("min(a_i)");
+    minf = tuple.getDouble("min(a_f)");
+    maxi = tuple.getDouble("max(a_i)");
+    maxf = tuple.getDouble("max(a_f)");
+    avgi = tuple.getDouble("avg(a_i)");
+    avgf = tuple.getDouble("avg(a_f)");
+    count = tuple.getDouble("count(*)");
 
-    SolrClientCache solrClientCache = new SolrClientCache();//TODO share in @Before ; close in @After ?
+    assertTrue(bucket.equals("hello3"));
+    assertTrue(sumi.doubleValue() == 38.0D);
+    assertTrue(sumf.doubleValue() == 26.0D);
+    assertTrue(mini.doubleValue() == 3.0D);
+    assertTrue(minf.doubleValue() == 3.0D);
+    assertTrue(maxi.doubleValue() == 13.0D);
+    assertTrue(maxf.doubleValue() == 9.0D);
+    assertTrue(avgi.doubleValue() == 9.5D);
+    assertTrue(avgf.doubleValue() == 6.5D);
+    assertTrue(count.doubleValue() == 4);
 
-    new UpdateRequest()
-        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1", "subject", "blah blah blah 0")
-        .add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2", "subject", "blah blah blah 2")
-        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3", "subject", "blah blah blah 3")
-        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4", "subject", "blah blah blah 4")
-        .add(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5", "subject", "blah blah blah 1")
-        .add(id, "5", "a_s", "hello3", "a_i", "5", "a_f", "6", "subject", "blah blah blah 5")
-        .add(id, "6", "a_s", "hello4", "a_i", "6", "a_f", "7", "subject", "blah blah blah 6")
-        .add(id, "7", "a_s", "hello3", "a_i", "7", "a_f", "8", "subject", "blah blah blah 7")
-        .add(id, "8", "a_s", "hello3", "a_i", "8", "a_f", "9", "subject", "blah blah blah 8")
-        .add(id, "9", "a_s", "hello0", "a_i", "9", "a_f", "10", "subject", "blah blah blah 9")
-        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+    tuple = tuples.get(1);
+    bucket = tuple.getString("a_s");
+    sumi = tuple.getDouble("sum(a_i)");
+    sumf = tuple.getDouble("sum(a_f)");
+    mini = tuple.getDouble("min(a_i)");
+    minf = tuple.getDouble("min(a_f)");
+    maxi = tuple.getDouble("max(a_i)");
+    maxf = tuple.getDouble("max(a_f)");
+    avgi = tuple.getDouble("avg(a_i)");
+    avgf = tuple.getDouble("avg(a_f)");
+    count = tuple.getDouble("count(*)");
 
-    TupleStream stream;
-    List<Tuple> tuples;
+    assertTrue(bucket.equals("hello0"));
+    assertTrue(sumi.doubleValue() == 17.0D);
+    assertTrue(sumf.doubleValue() == 18.0D);
+    assertTrue(mini.doubleValue() == 0.0D);
+    assertTrue(minf.doubleValue() == 1.0D);
+    assertTrue(maxi.doubleValue() == 14.0D);
+    assertTrue(maxf.doubleValue() == 10.0D);
+    assertTrue(avgi.doubleValue() == 4.25D);
+    assertTrue(avgf.doubleValue() == 4.5D);
+    assertTrue(count.doubleValue() == 4);
 
-    StreamFactory factory = new StreamFactory()
-        .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
-        .withFunctionName("search", CloudSolrStream.class)
-        .withFunctionName("fetch", FetchStream.class);
+    tuple = tuples.get(2);
+    bucket = tuple.getString("a_s");
+    sumi = tuple.getDouble("sum(a_i)");
+    sumf = tuple.getDouble("sum(a_f)");
+    mini = tuple.getDouble("min(a_i)");
+    minf = tuple.getDouble("min(a_f)");
+    maxi = tuple.getDouble("max(a_i)");
+    maxf = tuple.getDouble("max(a_f)");
+    avgi = tuple.getDouble("avg(a_i)");
+    avgf = tuple.getDouble("avg(a_f)");
+    count = tuple.getDouble("count(*)");
 
-    stream = factory.constructStream("fetch("+ COLLECTIONORALIAS +",  search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), on=\"id=a_i\", batchSize=\"2\", fl=\"subject\")");
-    StreamContext context = new StreamContext();
-    context.setSolrClientCache(solrClientCache);
-    stream.setStreamContext(context);
-    tuples = getTuples(stream);
+    assertTrue(bucket.equals("hello4"));
+    assertTrue(sumi.longValue() == 15);
+    assertTrue(sumf.doubleValue() == 11.0D);
+    assertTrue(mini.doubleValue() == 4.0D);
+    assertTrue(minf.doubleValue() == 4.0D);
+    assertTrue(maxi.doubleValue() == 11.0D);
+    assertTrue(maxf.doubleValue() == 7.0D);
+    assertTrue(avgi.doubleValue() == 7.5D);
+    assertTrue(avgf.doubleValue() == 5.5D);
+    assertTrue(count.doubleValue() == 2);
 
-    assert(tuples.size() == 10);
-    Tuple t = tuples.get(0);
-    assertTrue("blah blah blah 0".equals(t.getString("subject")));
-    t = tuples.get(1);
-    assertTrue("blah blah blah 2".equals(t.getString("subject")));
-    t = tuples.get(2);
-    assertTrue("blah blah blah 3".equals(t.getString("subject")));
-    t = tuples.get(3);
-    assertTrue("blah blah blah 4".equals(t.getString("subject")));
-    t = tuples.get(4);
-    assertTrue("blah blah blah 1".equals(t.getString("subject")));
-    t = tuples.get(5);
-    assertTrue("blah blah blah 5".equals(t.getString("subject")));
-    t = tuples.get(6);
-    assertTrue("blah blah blah 6".equals(t.getString("subject")));
-    t = tuples.get(7);
-    assertTrue("blah blah blah 7".equals(t.getString("subject")));
-    t = tuples.get(8);
-    assertTrue("blah blah blah 8".equals(t.getString("subject")));
-    t = tuples.get(9);
-    assertTrue("blah blah blah 9".equals(t.getString("subject")));
-
-    //Change the batch size
-    stream = factory.constructStream("fetch(" + COLLECTIONORALIAS + ",  search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), on=\"id=a_i\", batchSize=\"3\", fl=\"subject\")");
-    context = new StreamContext();
-    context.setSolrClientCache(solrClientCache);
-    stream.setStreamContext(context);
-    tuples = getTuples(stream);
 
-    assert(tuples.size() == 10);
-    t = tuples.get(0);
-    assertTrue("blah blah blah 0".equals(t.getString("subject")));
-    t = tuples.get(1);
-    assertTrue("blah blah blah 2".equals(t.getString("subject")));
-    t = tuples.get(2);
-    assertTrue("blah blah blah 3".equals(t.getString("subject")));
-    t = tuples.get(3);
-    assertTrue("blah blah blah 4".equals(t.getString("subject")));
-    t = tuples.get(4);
-    assertTrue("blah blah blah 1".equals(t.getString("subject")));
-    t = tuples.get(5);
-    assertTrue("blah blah blah 5".equals(t.getString("subject")));
-    t = tuples.get(6);
-    assertTrue("blah blah blah 6".equals(t.getString("subject")));
-    t = tuples.get(7);
-    assertTrue("blah blah blah 7".equals(t.getString("subject")));
-    t = tuples.get(8);
-    assertTrue("blah blah blah 8".equals(t.getString("subject")));
-    t = tuples.get(9);
-    assertTrue("blah blah blah 9".equals(t.getString("subject")));
-
-    // SOLR-10404 test that "hello 99" as a value gets escaped
-    new UpdateRequest()
-        .add(id, "99", "a1_s", "hello 99", "a2_s", "hello 99", "subject", "blah blah blah 99")
-        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+    //Test index sort
+    clause = "facet("
+        +   "collection1, "
+        +   "q=\"*:*\", "
+        +   "fl=\"a_s,a_i,a_f\", "
+        +   "sort=\"a_s asc\", "
+        +   "buckets=\"a_s\", "
+        +   "bucketSorts=\"a_s desc\", "
+        +   "bucketSizeLimit=100, "
+        +   "sum(a_i), sum(a_f), "
+        +   "min(a_i), min(a_f), "
+        +   "max(a_i), max(a_f), "
+        +   "avg(a_i), avg(a_f), "
+        +   "count(*)"
+        + ")";
 
-    stream = factory.constructStream("fetch("+ COLLECTIONORALIAS +",  search(" + COLLECTIONORALIAS + ", q=" + id + ":99, fl=\"id,a1_s\", sort=\"id asc\"), on=\"a1_s=a2_s\", fl=\"subject\")");
-    context = new StreamContext();
-    context.setSolrClientCache(solrClientCache);
-    stream.setStreamContext(context);
+    stream = factory.constructStream(clause);
     tuples = getTuples(stream);
 
-    assertEquals(1, tuples.size());
-    t = tuples.get(0);
-    assertTrue("blah blah blah 99".equals(t.getString("subject")));
-
-    solrClientCache.close();
-  }
+    assert(tuples.size() == 3);
 
-  @Test
-  public void testParallelFetchStream() throws Exception {
 
-    new UpdateRequest()
-        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1", "subject", "blah blah blah 0")
-        .add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2", "subject", "blah blah blah 2")
-        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3", "subject", "blah blah blah 3")
-        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4", "subject", "blah blah blah 4")
-        .add(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5", "subject", "blah blah blah 1")
-        .add(id, "5", "a_s", "hello3", "a_i", "5", "a_f", "6", "subject", "blah blah blah 5")
-        .add(id, "6", "a_s", "hello4", "a_i", "6", "a_f", "7", "subject", "blah blah blah 6")
-        .add(id, "7", "a_s", "hello3", "a_i", "7", "a_f", "8", "subject", "blah blah blah 7")
-        .add(id, "8", "a_s", "hello3", "a_i", "8", "a_f", "9", "subject", "blah blah blah 8")
-        .add(id, "9", "a_s", "hello0", "a_i", "9", "a_f", "10", "subject", "blah blah blah 9")
-        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+    tuple = tuples.get(0);
+    bucket = tuple.getString("a_s");
+    sumi = tuple.getDouble("sum(a_i)");
+    sumf = tuple.getDouble("sum(a_f)");
+    mini = tuple.getDouble("min(a_i)");
+    minf = tuple.getDouble("min(a_f)");
+    maxi = tuple.getDouble("max(a_i)");
+    maxf = tuple.getDouble("max(a_f)");
+    avgi = tuple.getDouble("avg(a_i)");
+    avgf = tuple.getDouble("avg(a_f)");
+    count = tuple.getDouble("count(*)");
 
-    StreamContext streamContext = new StreamContext();
-    SolrClientCache solrClientCache = new SolrClientCache();
-    streamContext.setSolrClientCache(solrClientCache);
 
-    TupleStream stream;
-    List<Tuple> tuples;
+    assertTrue(bucket.equals("hello4"));
+    assertTrue(sumi.longValue() == 15);
+    assertTrue(sumf.doubleValue() == 11.0D);
+    assertTrue(mini.doubleValue() == 4.0D);
+    assertTrue(minf.doubleValue() == 4.0D);
+    assertTrue(maxi.doubleValue() == 11.0D);
+    assertTrue(maxf.doubleValue() == 7.0D);
+    assertTrue(avgi.doubleValue() == 7.5D);
+    assertTrue(avgf.doubleValue() == 5.5D);
+    assertTrue(count.doubleValue() == 2);
 
-    StreamFactory factory = new StreamFactory()
-        .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
-        .withFunctionName("search", CloudSolrStream.class)
-        .withFunctionName("parallel", ParallelStream.class)
-        .withFunctionName("fetch", FetchStream.class);
 
-    try {
+    tuple = tuples.get(1);
+    bucket = tuple.getString("a_s");
+    sumi = tuple.getDouble("sum(a_i)");
+    sumf = tuple.getDouble("sum(a_f)");
+    mini = tuple.getDouble("min(a_i)");
+    minf = tuple.getDouble("min(a_f)");
+    maxi = tuple.getDouble("max(a_i)");
+    maxf = tuple.getDouble("max(a_f)");
+    avgi = tuple.getDouble("avg(a_i)");
+    avgf = tuple.getDouble("avg(a_f)");
+    count = tuple.getDouble("count(*)");
 
-      stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"a_f asc\", fetch(" + COLLECTIONORALIAS + ",  search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=\"id\"), on=\"id=a_i\", batchSize=\"2\", fl=\"subject\"))");
-      stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
+    assertTrue(bucket.equals("hello3"));
+    assertTrue(sumi.doubleValue() == 38.0D);
+    assertTrue(sumf.doubleValue() == 26.0D);
+    assertTrue(mini.doubleValue() == 3.0D);
+    assertTrue(minf.doubleValue() == 3.0D);
+    assertTrue(maxi.doubleValue() == 13.0D);
+    assertTrue(maxf.doubleValue() == 9.0D);
+    assertTrue(avgi.doubleValue() == 9.5D);
+    assertTrue(avgf.doubleValue() == 6.5D);
+    assertTrue(count.doubleValue() == 4);
 
-      assert (tuples.size() == 10);
-      Tuple t = tuples.get(0);
-      assertTrue("blah blah blah 0".equals(t.getString("subject")));
-      t = tuples.get(1);
-      assertTrue("blah blah blah 2".equals(t.getString("subject")));
-      t = tuples.get(2);
-      assertTrue("blah blah blah 3".equals(t.getString("subject")));
-      t = tuples.get(3);
-      assertTrue("blah blah blah 4".equals(t.getString("subject")));
-      t = tuples.get(4);
-      assertTrue("blah blah blah 1".equals(t.getString("subject")));
-      t = tuples.get(5);
-      assertTrue("blah blah blah 5".equals(t.getString("subject")));
-      t = tuples.get(6);
-      assertTrue("blah blah blah 6".equals(t.getString("subject")));
-      t = tuples.get(7);
-      assertTrue("blah blah blah 7".equals(t.getString("subject")));
-      t = tuples.get(8);
-      assertTrue("blah blah blah 8".equals(t.getString("subject")));
-      t = tuples.get(9);
-      assertTrue("blah blah blah 9".equals(t.getString("subject")));
-
-
-      stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"a_f asc\", fetch(" + COLLECTIONORALIAS + ",  search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=\"id\"), on=\"id=a_i\", batchSize=\"3\", fl=\"subject\"))");
-      stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
 
-      assert (tuples.size() == 10);
-      t = tuples.get(0);
-      assertTrue("blah blah blah 0".equals(t.getString("subject")));
-      t = tuples.get(1);
-      assertTrue("blah blah blah 2".equals(t.getString("subject")));
-      t = tuples.get(2);
-      assertTrue("blah blah blah 3".equals(t.getString("subject")));
-      t = tuples.get(3);
-      assertTrue("blah blah blah 4".equals(t.getString("subject")));
-      t = tuples.get(4);
-      assertTrue("blah blah blah 1".equals(t.getString("subject")));
-      t = tuples.get(5);
-      assertTrue("blah blah blah 5".equals(t.getString("subject")));
-      t = tuples.get(6);
-      assertTrue("blah blah blah 6".equals(t.getString("subject")));
-      t = tuples.get(7);
-      assertTrue("blah blah blah 7".equals(t.getString("subject")));
-      t = tuples.get(8);
-      assertTrue("blah blah blah 8".equals(t.getString("subject")));
-      t = tuples.get(9);
-      assertTrue("blah blah blah 9".equals(t.getString("subject")));
-    } finally {
-      solrClientCache.close();
-    }
-  }
+    tuple = tuples.get(2);
+    bucket = tuple.getString("a_s");
+    sumi = tuple.getDouble("sum(a_i)");
+    sumf = tuple.getDouble("sum(a_f)");
+    mini = tuple.getDouble("min(a_i)");
+    minf = tuple.getDouble("min(a_f)");
+    maxi = tuple.getDouble("max(a_i)");
+    maxf = tuple.getDouble("max(a_f)");
+    avgi = tuple.getDouble("avg(a_i)");
+    avgf = tuple.getDouble("avg(a_f)");
+    count = tuple.getDouble("count(*)");
 
+    assertTrue(bucket.equals("hello0"));
+    assertTrue(sumi.doubleValue() == 17.0D);
+    assertTrue(sumf.doubleValue() == 18.0D);
+    assertTrue(mini.doubleValue() == 0.0D);
+    assertTrue(minf.doubleValue() == 1.0D);
+    assertTrue(maxi.doubleValue() == 14.0D);
+    assertTrue(maxf.doubleValue() == 10.0D);
+    assertTrue(avgi.doubleValue() == 4.25D);
+    assertTrue(avgf.doubleValue() == 4.5D);
+    assertTrue(count.doubleValue() == 4);
 
+    //Test index sort
 
+    clause = "facet("
+        +   "collection1, "
+        +   "q=\"*:*\", "
+        +   "fl=\"a_s,a_i,a_f\", "
+        +   "sort=\"a_s asc\", "
+        +   "buckets=\"a_s\", "
+        +   "bucketSorts=\"a_s asc\", "
+        +   "bucketSizeLimit=100, "
+        +   "sum(a_i), sum(a_f), "
+        +   "min(a_i), min(a_f), "
+        +   "max(a_i), max(a_f), "
+        +   "avg(a_i), avg(a_f), "
+        +   "count(*)"
+        + ")";
 
+    stream = factory.constructStream(clause);
+    tuples = getTuples(stream);
 
-  @Test
-  public void testDaemonStream() throws Exception {
+    assert(tuples.size() == 3);
 
-    new UpdateRequest()
-        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1")
-        .add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2")
-        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
-        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
-        .add(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5")
-        .add(id, "5", "a_s", "hello3", "a_i", "10", "a_f", "6")
-        .add(id, "6", "a_s", "hello4", "a_i", "11", "a_f", "7")
-        .add(id, "7", "a_s", "hello3", "a_i", "12", "a_f", "8")
-        .add(id, "8", "a_s", "hello3", "a_i", "13", "a_f", "9")
-        .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10")
-        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
 
-    StreamFactory factory = new StreamFactory()
-        .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
-        .withFunctionName("search", CloudSolrStream.class)
-        .withFunctionName("rollup", RollupStream.class)
-        .withFunctionName("sum", SumMetric.class)
-        .withFunctionName("min", MinMetric.class)
-        .withFunctionName("max", MaxMetric.class)
-        .withFunctionName("avg", MeanMetric.class)
-        .withFunctionName("count", CountMetric.class)
-        .withFunctionName("daemon", DaemonStream.class);
-
-    StreamExpression expression;
-    DaemonStream daemonStream;
-
-    expression = StreamExpressionParser.parse("daemon(rollup("
-        + "search(" + COLLECTIONORALIAS + ", q=\"*:*\", fl=\"a_i,a_s\", sort=\"a_s asc\"),"
-        + "over=\"a_s\","
-        + "sum(a_i)"
-        + "), id=\"test\", runInterval=\"1000\", queueSize=\"9\")");
-    daemonStream = (DaemonStream)factory.constructStream(expression);
-    StreamContext streamContext = new StreamContext();
-    SolrClientCache solrClientCache = new SolrClientCache();
-    streamContext.setSolrClientCache(solrClientCache);
-    daemonStream.setStreamContext(streamContext);
-    try {
-      //Test Long and Double Sums
-
-      daemonStream.open(); // This will start the daemon thread
-
-      for (int i = 0; i < 4; i++) {
-        Tuple tuple = daemonStream.read(); // Reads from the queue
-        String bucket = tuple.getString("a_s");
-        Double sumi = tuple.getDouble("sum(a_i)");
-
-        //System.out.println("#################################### Bucket 1:"+bucket);
-        assertTrue(bucket.equals("hello0"));
-        assertTrue(sumi.doubleValue() == 17.0D);
-
-        tuple = daemonStream.read();
-        bucket = tuple.getString("a_s");
-        sumi = tuple.getDouble("sum(a_i)");
-
-        //System.out.println("#################################### Bucket 2:"+bucket);
-        assertTrue(bucket.equals("hello3"));
-        assertTrue(sumi.doubleValue() == 38.0D);
-
-        tuple = daemonStream.read();
-        bucket = tuple.getString("a_s");
-        sumi = tuple.getDouble("sum(a_i)");
-        //System.out.println("#################################### Bucket 3:"+bucket);
-        assertTrue(bucket.equals("hello4"));
-        assertTrue(sumi.longValue() == 15);
-      }
-
-      //Now lets wait until the internal queue fills up
-
-      while (daemonStream.remainingCapacity() > 0) {
-        try {
-          Thread.sleep(1000);
-        } catch (Exception e) {
+    tuple = tuples.get(0);
+    bucket = tuple.getString("a_s");
+    sumi = tuple.getDouble("sum(a_i)");
+    sumf = tuple.getDouble("sum(a_f)");
+    mini = tuple.getDouble("min(a_i)");
+    minf = tuple.getDouble("min(a_f)");
+    maxi = tuple.getDouble("max(a_i)");
+    maxf = tuple.getDouble("max(a_f)");
+    avgi = tuple.getDouble("avg(a_i)");
+    avgf = tuple.getDouble("avg(a_f)");
+    count = tuple.getDouble("count(*)");
 
-        }
-      }
+    assertTrue(bucket.equals("hello0"));
+    assertTrue(sumi.doubleValue() == 17.0D);
+    assertTrue(sumf.doubleValue() == 18.0D);
+    assertTrue(mini.doubleValue() == 0.0D);
+    assertTrue(minf.doubleValue() == 1.0D);
+    assertTrue(maxi.doubleValue() == 14.0D);
+    assertTrue(maxf.doubleValue() == 10.0D);
+    assertTrue(avgi.doubleValue() == 4.25D);
+    assertTrue(avgf.doubleValue() == 4.5D);
+    assertTrue(count.doubleValue() == 4);
 
-      //OK capacity is full, let's index a new doc
 
-      new UpdateRequest()
-          .add(id, "10", "a_s", "hello0", "a_i", "1", "a_f", "10")
-          .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+    tuple = tuples.get(1);
+    bucket = tuple.getString("a_s");
+    sumi = tuple.getDouble("sum(a_i)");
+    sumf = tuple.getDouble("sum(a_f)");
+    mini = tuple.getDouble("min(a_i)");
+    minf = tuple.getDouble("min(a_f)");
+    maxi = tuple.getDouble("max(a_i)");
+    maxf = tuple.getDouble("max(a_f)");
+    avgi = tuple.getDouble("avg(a_i)");
+    avgf = tuple.getDouble("avg(a_f)");
+    count = tuple.getDouble("count(*)");
 
-      //Now lets clear the existing docs in the queue 9, plus 3 more to get passed the run that was blocked. The next run should
-      //have the tuples with the updated count.
-      for (int i = 0; i < 12; i++) {
-        daemonStream.read();
-      }
+    assertTrue(bucket.equals("hello3"));
+    assertTrue(sumi.doubleValue() == 38.0D);
+    assertTrue(sumf.doubleValue() == 26.0D);
+    assertTrue(mini.doubleValue() == 3.0D);
+    assertTrue(minf.doubleValue() == 3.0D);
+    assertTrue(maxi.doubleValue() == 13.0D);
+    assertTrue(maxf.doubleValue() == 9.0D);
+    assertTrue(avgi.doubleValue() == 9.5D);
+    assertTrue(avgf.doubleValue() == 6.5D);
+    assertTrue(count.doubleValue() == 4);
 
-      //And rerun the loop. It should have a new count for hello0
-      for (int i = 0; i < 4; i++) {
-        Tuple tuple = daemonStream.read(); // Reads from the queue
-        String bucket = tuple.getString("a_s");
-        Double sumi = tuple.getDouble("sum(a_i)");
-
-        //System.out.println("#################################### Bucket 1:"+bucket);
-        assertTrue(bucket.equals("hello0"));
-        assertTrue(sumi.doubleValue() == 18.0D);
-
-        tuple = daemonStream.read();
-        bucket = tuple.getString("a_s");
-        sumi = tuple.getDouble("sum(a_i)");
-
-        //System.out.println("#################################### Bucket 2:"+bucket);
-        assertTrue(bucket.equals("hello3"));
-        assertTrue(sumi.doubleValue() == 38.0D);
-
-        tuple = daemonStream.read();
-        bucket = tuple.getString("a_s");
-        sumi = tuple.getDouble("sum(a_i)");
-        //System.out.println("#################################### Bucket 3:"+bucket);
-        assertTrue(bucket.equals("hello4"));
-        assertTrue(sumi.longValue() == 15);
-      }
-    } finally {
-      daemonStream.close(); //This should stop the daemon thread
-      solrClientCache.close();
-    }
-  }
 
+    tuple = tuples.get(2);
+    bucket = tuple.getString("a_s");
+    sumi = tuple.getDouble("sum(a_i)");
+    sumf = tuple.getDouble("sum(a_f)");
+    mini = tuple.getDouble("min(a_i)");
+    minf = tuple.getDouble("min(a_f)");
+    maxi = tuple.getDouble("max(a_i)");
+    maxf = tuple.getDouble("max(a_f)");
+    avgi = tuple.getDouble("avg(a_i)");
+    avgf = tuple.getDouble("avg(a_f)");
+    count = tuple.getDouble("count(*)");
 
-  @Test
-  public void testTerminatingDaemonStream() throws Exception {
-    Assume.assumeTrue(!useAlias);
+    assertTrue(bucket.equals("hello4"));
+    assertTrue(sumi.longValue() == 15);
+    assertTrue(sumf.doubleValue() == 11.0D);
+    assertTrue(mini.doubleValue() == 4.0D);
+    assertTrue(minf.doubleValue() == 4.0D);
+    assertTrue(maxi.doubleValue() == 11.0D);
+    assertTrue(maxf.doubleValue() == 7.0D);
+    assertTrue(avgi.doubleValue() == 7.5D);
+    assertTrue(avgf.doubleValue() == 5.5D);
+    assertTrue(count.doubleValue() == 2);
 
-    new UpdateRequest()
-        .add(id, "0", "a_s", "hello", "a_i", "0", "a_f", "1")
-        .add(id, "2", "a_s", "hello", "a_i", "2", "a_f", "2")
-        .add(id, "3", "a_s", "hello", "a_i", "3", "a_f", "3")
-        .add(id, "4", "a_s", "hello", "a_i", "4", "a_f", "4")
-        .add(id, "1", "a_s", "hello", "a_i", "1", "a_f", "5")
-        .add(id, "5", "a_s", "hello", "a_i", "10", "a_f", "6")
-        .add(id, "6", "a_s", "hello", "a_i", "11", "a_f", "7")
-        .add(id, "7", "a_s", "hello", "a_i", "12", "a_f", "8")
-        .add(id, "8", "a_s", "hello", "a_i", "13", "a_f", "9")
-        .add(id, "9", "a_s", "hello", "a_i", "14", "a_f", "10")
-        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+    //Test zero result facets
+    clause = "facet("
+        +   "collection1, "
+        +   "q=\"blahhh\", "
+        +   "fl=\"a_s,a_i,a_f\", "
+        +   "sort=\"a_s asc\", "
+        +   "buckets=\"a_s\", "
+        +   "bucketSorts=\"a_s asc\", "
+        +   "bucketSizeLimit=100, "
+        +   "sum(a_i), sum(a_f), "
+        +   "min(a_i), min(a_f), "
+        +   "max(a_i), max(a_f), "
+        +   "avg(a_i), avg(a_f), "
+        +   "count(*)"
+        + ")";
 
-    StreamFactory factory = new StreamFactory()
-        .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
-        .withFunctionName("topic", TopicStream.class)
-        .withFunctionName("daemon", DaemonStream.class);
+    stream = factory.constructStream(clause);
+    tuples = getTuples(stream);
 
-    StreamExpression expression;
-    DaemonStream daemonStream;
+    assert(tuples.size() == 0);
 
-    SolrClientCache cache = new SolrClientCache();
-    StreamContext context = new StreamContext();
-    context.setSolrClientCache(cache);
-    expression = StreamExpressionParser.parse("daemon(topic("+ COLLECTIONORALIAS +","+ COLLECTIONORALIAS +", q=\"a_s:hello\", initialCheckpoint=0, id=\"topic1\", rows=2, fl=\"id\""
-        + "), id=test, runInterval=1000, terminate=true, queueSize=50)");
-    daemonStream = (DaemonStream)factory.constructStream(expression);
-    daemonStream.setStreamContext(context);
-
-    List<Tuple> tuples = getTuples(daemonStream);
-    assertTrue(tuples.size() == 10);
-    cache.close();
   }
 
-
   @Test
-  public void testRollupStream() throws Exception {
+  public void testSubFacetStream() throws Exception {
 
     new UpdateRequest()
-        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1")
-        .add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2")
-        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
-        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
-        .add(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5")
-        .add(id, "5", "a_s", "hello3", "a_i", "10", "a_f", "6")
-        .add(id, "6", "a_s", "hello4", "a_i", "11", "a_f", "7")
-        .add(id, "7", "a_s", "hello3", "a_i", "12", "a_f", "8")
-        .add(id, "8", "a_s", "hello3", "a_i", "13", "a_f", "9")
-        .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10")
+        .add(id, "0", "level1_s", "hello0", "level2_s", "a", "a_i", "0", "a_f", "1")
+        .add(id, "2", "level1_s", "hello0", "level2_s", "a", "a_i", "2", "a_f", "2")
+        .add(id, "3", "level1_s", "hello3", "level2_s", "a", "a_i", "3", "a_f", "3")
+        .add(id, "4", "level1_s", "hello4", "level2_s", "a", "a_i", "4", "a_f", "4")
+        .add(id, "1", "level1_s", "hello0", "level2_s", "b", "a_i", "1", "a_f", "5")
+        .add(id, "5", "level1_s", "hello3", "level2_s", "b", "a_i", "10", "a_f", "6")
+        .add(id, "6", "level1_s", "hello4", "level2_s", "b", "a_i", "11", "a_f", "7")
+        .add(id, "7", "level1_s", "hello3", "level2_s", "b", "a_i", "12", "a_f", "8")
+        .add(id, "8", "level1_s", "hello3", "level2_s", "b", "a_i", "13", "a_f", "9")
+        .add(id, "9", "level1_s", "hello0", "level2_s", "b", "a_i", "14", "a_f", "10")
         .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
 
+    String clause;
+    TupleStream stream;
+    List<Tuple> tuples;
+    
     StreamFactory factory = new StreamFactory()
-      .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
-      .withFunctionName("search", CloudSolrStream.class)
-      .withFunctionName("rollup", RollupStream.class)
+      .withCollectionZkHost("collection1", cluster.getZkServer().getZkAddress())
+      .withFunctionName("facet", FacetStream.class)
       .withFunctionName("sum", SumMetric.class)
       .withFunctionName("min", MinMetric.class)
       .withFunctionName("max", MaxMetric.class)
       .withFunctionName("avg", MeanMetric.class)
-      .withFunctionName("count", CountMetric.class);     
+      .withFunctionName("count", CountMetric.class);
     
-    StreamExpression expression;
-    TupleStream stream;
-    List<Tuple> tuples;
-    StreamContext streamContext = new StreamContext();
-    SolrClientCache solrClientCache = new SolrClientCache();
-    streamContext.setSolrClientCache(solrClientCache);
-    try {
-      expression = StreamExpressionParser.parse("rollup("
-          + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"a_s,a_i,a_f\", sort=\"a_s asc\"),"
-          + "over=\"a_s\","
-          + "sum(a_i),"
-          + "sum(a_f),"
-          + "min(a_i),"
-          + "min(a_f),"
-          + "max(a_i),"
-          + "max(a_f),"
-          + "avg(a_i),"
-          + "avg(a_f),"
-          + "count(*),"
-          + ")");
-      stream = factory.constructStream(expression);
-      stream.setStreamContext(streamContext);
-      tuples = getTuples(stream);
-
-      assert (tuples.size() == 3);
+    // Basic test
+    clause = "facet("
+              +   "collection1, "
+              +   "q=\"*:*\", "
+              +   "buckets=\"level1_s, level2_s\", "
+              +   "bucketSorts=\"sum(a_i) desc, sum(a_i) desc)\", "
+              +   "bucketSizeLimit=100, "
+              +   "sum(a_i), count(*)"
+              + ")";
+    
+    stream = factory.constructStream(clause);
+    tuples = getTuples(stream);
 
-      //Test Long and Double Sums
+    assert(tuples.size() == 6);
 
-      Tuple tuple = tuples.get(0);
-      String bucket = tuple.getString("a_s");
-      Double sumi = tuple.getDouble("sum(a_i)");
-      Double sumf = tuple.getDouble("sum(a_f)");
-      Double mini = tuple.getDouble("min(a_i)");
-      Double minf = tuple.getDouble("min(a_f)");
-      Double maxi = tuple.getDouble("max(a_i)");
-      Double maxf = tuple.getDouble("max(a_f)");
-      Double avgi = tuple.getDouble("avg(a_i)");
-      Double avgf = tuple.getDouble("avg(a_f)");
-      Double count = tuple.getDouble("count(*)");
+    Tuple tuple = tuples.get(0);
+    String bucket1 = tuple.getString("level1_s");
+    String bucket2 = tuple.getString("level2_s");
+    Double sumi = tuple.getDouble("sum(a_i)");
+    Double count = tuple.getDouble("count(*)");
 
-      assertTrue(bucket.equals("hello0"));
-      assertTrue(sumi.doubleValue() == 17.0D);
-      assertTrue(sumf.doubleValue() == 18.0D);
-      assertTrue(mini.doubleValue() == 0.0D);
-      assertTrue(minf.doubleValue() == 1.0D);
-      assertTrue(maxi.doubleValue() == 14.0D);
-      assertTrue(maxf.doubleValue() == 10.0D);
-      assertTrue(avgi.doubleValue() == 4.25D);
-      assertTrue(avgf.doubleValue() == 4.5D);
-      assertTrue(count.doubleValue() == 4);
+    assertTrue(bucket1.equals("hello3"));
+    assertTrue(bucket2.equals("b"));
+    assertTrue(sumi.longValue() == 35);
+    assertTrue(count.doubleValue() == 3);
 
-      tuple = tuples.get(1);
-      bucket = tuple.getString("a_s");
-      sumi = tuple.getDouble("sum(a_i)");
-      sumf = tuple.getDouble("sum(a_f)");
-      mini = tuple.getDouble("min(a_i)");
-      minf = tuple.getDouble("min(a_f)");
-      maxi = tuple.getDouble("max(a_i)");
-      maxf = tuple.getDouble("max(a_f)");
-      avgi = tuple.getDouble("avg(a_i)");
-      avgf = tuple.getDouble("avg(a_f)");
-      count = tuple.getDouble("count(*)");
+    tuple = tuples.get(1);
+    bucket1 = tuple.getString("level1_s");
+    bucket2 = tuple.getString("level2_s");
+    sumi = tuple.getDouble("sum(a_i)");
+    count = tuple.getDouble("count(*)");
 
-      assertTrue(bucket.equals("hello3"));
-      assertTrue(sumi.doubleValue() == 38.0D);
-      assertTrue(sumf.doubleValue() == 26.0D);
-      assertTrue(mini.doubleValue() == 3.0D);
-      assertTrue(minf.doubleValue() == 3.0D);
-      assertTrue(maxi.doubleValue() == 13.0D);
-      assertTrue(maxf.doubleValue() == 9.0D);
-      assertTrue(avgi.doubleValue() == 9.5D);
-      assertTrue(avgf.doubleValue() == 6.5D);
-      assertTrue(count.doubleValue() == 4);
-
-      tuple = tuples.get(2);
-      bucket = tuple.getString("a_s");
-      sumi = tuple.getDouble("sum(a_i)");
-      sumf = tuple.getDouble("sum(a_f)");
-      mini = tuple.getDouble("min(a_i)");
-      minf = tuple.getDouble("min(a_f)");
-      maxi = tuple.getDouble("max(a_i)");
-      maxf = tuple.getDouble("max(a_f)");
-      avgi = tuple.getDouble("avg(a_i)");
-      avgf = tuple.getDouble("avg(a_f)");
-      count = tuple.getDouble("count(*)");
+    assertTrue(bucket1.equals("hello0"));
+    assertTrue(bucket2.equals("b"));
+    assertTrue(sumi.longValue() == 15);
+    assertTrue(count.doubleValue() == 2);
 
-      assertTrue(bucket.equals("hello4"));
-      assertTrue(sumi.longValue() == 15);
-      assertTrue(sumf.doubleValue() == 11.0D);
-      assertTrue(mini.doubleValue() == 4.0D);
-      assertTrue(minf.doubleValue() == 4.0D);
-      assertTrue(maxi.doubleValue() == 11.0D);
-      assertTrue(maxf.doubleValue() == 7.0D);
-      assertTrue(avgi.doubleValue() == 7.5D);
-      assertTrue(avgf.doubleValue() == 5.5D);
-      assertTrue(count.doubleValue() == 2);
+    tuple = tuples.get(2);
+    bucket1 = tuple.getString("level1_s");
+    bucket2 = tuple.getString("level2_s");
+    sumi = tuple.getDouble("sum(a_i)");
+    count = tuple.getDouble("count(*)");
 
-    } finally {
-      solrClientCache.close();
-    }
-  }
+    assertTrue(bucket1.equals("hello4"));
+    assertTrue(bucket2.equals("b"));
+    assertTrue(sumi.longValue() == 11);
+    assertTrue(count.doubleValue() == 1);
 
-  @Test
-  public void testStatsStream() throws Exception {
+    tuple = tuples.get(3);
+    bucket1 = tuple.getString("level1_s");
+    bucket2 = tuple.getString("level2_s");
+    sumi = tuple.getDouble("sum(a_i)");
+    count = tuple.getDouble("count(*)");
 
-    new UpdateRequest()
-        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1")
-        .add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2")
-        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
-        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
-        .add(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5")
-        .add(id, "5", "a_s", "hello3", "a_i", "10", "a_f", "6")
-        .add(id, "6", "a_s", "hello4", "a_i", "11", "a_f", "7")
-        .add(id, "7", "a_s", "hello3", "a_i", "12", "a_f", "8")
-        .add(id, "8", "a_s", "hello3", "a_i", "13", "a_f", "9")
-        .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10")
-        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
-
-    StreamFactory factory = new StreamFactory()
-    .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
-    .withFunctionName("stats", StatsStream.class)
-    .withFunctionName("sum", SumMetric.class)
-    .withFunctionName("min", MinMetric.class)
-    .withFunctionName("max", MaxMetric.class)
-    .withFunctionName("avg", MeanMetric.class)
-    .withFunctionName("count", CountMetric.class);     
-  
-    StreamExpression expression;
-    TupleStream stream;
-    List<Tuple> tuples;
-    StreamContext streamContext = new StreamContext();
-    SolrClientCache cache = new SolrClientCache();
-    try {
-      streamContext.setSolrClientCache(cache);
-      String expr = "stats(" + COLLECTIONORALIAS + ", q=*:*, sum(a_i), sum(a_f), min(a_i), min(a_f), max(a_i), max(a_f), avg(a_i), avg(a_f), count(*))";
-      expression = StreamExpressionParser.parse(expr);
-      stream = factory.constructStream(expression);
-      stream.setStreamContext(streamContext);
-
-      tuples = getTuples(stream);
-
-      assert (tuples.size() == 1);
-
-      //Test Long and Double Sums
-
-      Tuple tuple = tuples.get(0);
-
-      Double sumi = tuple.getDouble("sum(a_i)");
-      Double sumf = tuple.getDouble("sum(a_f)");
-      Double mini = tuple.getDouble("min(a_i)");
-      Double minf = tuple.getDouble("min(a_f)");
-      Double maxi = tuple.getDouble("max(a_i)");
-      Double maxf = tuple.getDouble("max(a_f)");
-      Double avgi = tuple.getDouble("avg(a_i)");
-      Double avgf = tuple.getDouble("avg(a_f)");
-      Double count = tuple.getDouble("count(*)");
-
-      assertTrue(sumi.longValue() == 70);
-      assertTrue(sumf.doubleValue() == 55.0D);
-      assertTrue(mini.doubleValue() == 0.0D);
-      assertTrue(minf.doubleValue() == 1.0D);
-      assertTrue(maxi.doubleValue() == 14.0D);
-      assertTrue(maxf.doubleValue() == 10.0D);
-      assertTrue(avgi.doubleValue() == 7.0D);
-      assertTrue(avgf.doubleValue() == 5.5D);
-      assertTrue(count.doubleValue() == 10);
-
-
-      //Test with shards parameter
-      List<String> shardUrls = TupleStream.getShards(cluster.getZkServer().getZkAddress(), COLLECTIONORALIAS, streamContext);
-      expr = "stats(myCollection, q=*:*, sum(a_i), sum(a_f), min(a_i), min(a_f), max(a_i), max(a_f), avg(a_i), avg(a_f), count(*))";
-      Map<String, List<String>> shardsMap = new HashMap();
-      shardsMap.put("myCollection", shardUrls);
-      StreamContext context = new StreamContext();
-      context.put("shards", shardsMap);
-      context.setSolrClientCache(cache);
-      stream = factory.constructStream(expr);
-      stream.setStreamContext(context);
-
-      tuples = getTuples(stream);
-
-      assert (tuples.size() == 1);
-
-      //Test Long and Double Sums
-
-      tuple = tuples.get(0);
-
-      sumi = tuple.getDouble("sum(a_i)");
-      sumf = tuple.getDouble("sum(a_f)");
-      mini = tuple.getDouble("min(a_i)");
-      minf = tuple.getDouble("min(a_f)");
-      maxi = tuple.getDouble("max(a_i)");
-      maxf = tuple.getDouble("max(a_f)");
-      avgi = tuple.getDouble("avg(a_i)");
-      avgf = tuple.getDouble("avg(a_f)");
-      count = tuple.getDouble("count(*)");
-
-      assertTrue(sumi.longValue() == 70);
-      assertTrue(sumf.doubleValue() == 55.0D);
-      assertTrue(mini.doubleValue() == 0.0D);
-      assertTrue(minf.doubleValue() == 1.0D);
-      assertTrue(maxi.doubleValue() == 14.0D);
-      assertTrue(maxf.doubleValue() == 10.0D);
-      assertTrue(avgi.doubleValue() == 7.0D);
-      assertTrue(avgf.doubleValue() == 5.5D);
-      assertTrue(count.doubleValue() == 10);
-
-      //Execersise the /stream hander
-
-      //Add the shards http parameter for the myCollection
-      StringBuilder buf = new StringBuilder();
-      for (String shardUrl : shardUrls) {
-        if (buf.length() > 0) {
-          buf.append(",");
-        }
-        buf.append(shardUrl);
-      }
-
-      ModifiableSolrParams solrParams = new ModifiableSolrParams();
-      solrParams.add("qt", "/stream");
-      solrParams.add("expr", expr);
-      solrParams.add("myCollection.shards", buf.toString());
-      SolrStream solrStream = new SolrStream(shardUrls.get(0), solrParams);
-      tuples = getTuples(solrStream);
-      assert (tuples.size() == 1);
-
-      tuple =tuples.get(0);
-
-      sumi = tuple.getDouble("sum(a_i)");
-      sumf = tuple.getDouble("sum(a_f)");
-      mini = tuple.getDouble("min(a_i)");
-      minf = tuple.getDouble("min(a_f)");
-      maxi = tuple.getDouble("max(a_i)");
-      maxf = tuple.getDouble("max(a_f)");
-      avgi = tuple.getDouble("avg(a_i)");
-      avgf = tuple.getDouble("avg(a_f)");
-      count = tuple.getDouble("count(*)");
-
-      assertTrue(sumi.longValue() == 70);
-      assertTrue(sumf.doubleValue() == 55.0D);
-      assertTrue(mini.doubleValue() == 0.0D);
-      assertTrue(minf.doubleValue() == 1.0D);
-      assertTrue(maxi.doubleValue() == 14.0D);
-      assertTrue(maxf.doubleValue() == 10.0D);
-      assertTrue(avgi.doubleValue() == 7.0D);
-      assertTrue(avgf.doubleValue() == 5.5D);
-      assertTrue(count.doubleValue() == 10);
-      //Add a negative test to prove that it cannot find slices if shards parameter is removed
-
-      try {
-        ModifiableSolrParams solrParamsBad = new ModifiableSolrParams();
-        solrParamsBad.add("qt", "/stream");
-        solrParamsBad.add("expr", expr);
-        solrStream = new SolrStream(shardUrls.get(0), solrParamsBad);
-        tuples = getTuples(solrStream);
-        throw new Exception("Exception should have been thrown above");
-      } catch (IOException e) {
-        assertTrue(e.getMessage().contains("Collection not found: myCollection"));
-      }
-    } finally {
-      cache.close();
-    }
-  }
-
-  @Test
-  public void testParallelUniqueStream() throws Exception {
-
-    new UpdateRequest()
-        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0")
-        .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0")
-        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
-        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
-        .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1")
-        .add(id, "5", "a_s", "hello1", "a_i", "10", "a_f", "1")
-        .add(id, "6", "a_s", "hello1", "a_i", "11", "a_f", "5")
-        .add(id, "7", "a_s", "hello1", "a_i", "12", "a_f", "5")
-        .add(id, "8", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
-
-    String zkHost = cluster.getZkServer().getZkAddress();
-    StreamFactory streamFactory = new StreamFactory().withCollectionZkHost(COLLECTIONORALIAS, zkHost)
-        .withFunctionName("search", CloudSolrStream.class)
-        .withFunctionName("unique", UniqueStream.class)
-        .withFunctionName("top", RankStream.class)
-        .withFunctionName("group", ReducerStream.class)
-        .withFunctionName("parallel", ParallelStream.class);
-    StreamContext streamContext = new StreamContext();
-    SolrClientCache solrClientCache = new SolrClientCache();
-    streamContext.setSolrClientCache(solrClientCache);
-
-
-
-    try {
-
-      ParallelStream pstream = (ParallelStream) streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", unique(search(collection1, q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\", partitionKeys=\"a_f\"), over=\"a_f\"), workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"a_f asc\")");
-      pstream.setStreamContext(streamContext);
-      List<Tuple> tuples = getTuples(pstream);
-      assert (tuples.size() == 5);
-      assertOrder(tuples, 0, 1, 3, 4, 6);
-
-      //Test the eofTuples
-
-      Map<String, Tuple> eofTuples = pstream.getEofTuples();
-      assert (eofTuples.size() == 2); //There should be an EOF tuple for each worker.
-    } finally {
-      solrClientCache.close();
-    }
-  }
-
-  @Test
-  public void testParallelShuffleStream() throws Exception {
-
-    new UpdateRequest()
-        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0")
-        .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0")
-        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
-        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
-        .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1")
-        .add(id, "5", "a_s", "hello1", "a_i", "10", "a_f", "1")
-        .add(id, "6", "a_s", "hello1", "a_i", "11", "a_f", "5")
-        .add(id, "7", "a_s", "hello1", "a_i", "12", "a_f", "5")
-        .add(id, "8", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "9", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "10", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "11", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "12", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "13", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "14", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "15", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "16", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "17", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "18", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "19", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "20", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "21", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "22", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "23", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "24", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "25", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "26", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "27", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "28", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "29", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "30", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "31", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "32", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "33", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "34", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "35", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "36", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "37", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "38", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "39", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "40", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "41", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "42", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "43", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "44", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "45", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "46", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "47", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "48", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "49", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "50", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "51", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "52", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "53", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "54", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "55", "a_s", "hello1", "a_i", "13", "a_f", "4")
-        .add(id, "56", "a_s", "hello1", "a_i", "13", "a_f", "1000")
-
-        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
-
-    StreamContext streamContext = new StreamContext();
-    SolrClientCache solrClientCache = new SolrClientCache();
-    streamContext.setSolrClientCache(solrClientCache);
-
-    String zkHost = cluster.getZkServer().getZkAddress();
-    StreamFactory streamFactory = new StreamFactory().withCollectionZkHost(COLLECTIONORALIAS, zkHost)
-        .withFunctionName("shuffle", ShuffleStream.class)
-        .withFunctionName("unique", UniqueStream.class)
-        .withFunctionName("parallel", ParallelStream.class);
-
-    try {
-      ParallelStream pstream = (ParallelStream) streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", unique(shuffle(collection1, q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\", partitionKeys=\"a_f\"), over=\"a_f\"), workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"a_f asc\")");
-      pstream.setStreamFactory(streamFactory);
-      pstream.setStreamContext(streamContext);
-     

<TRUNCATED>

[09/50] lucene-solr:jira/solr-12181: Moved system.out from the forked subprocess to test because it uses inherited i/o handles directly and shows as unexpected JVM output.

Posted by ab...@apache.org.
Moved system.out from the forked subprocess to test because it uses inherited i/o handles directly and shows as unexpected JVM output.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/469979dc
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/469979dc
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/469979dc

Branch: refs/heads/jira/solr-12181
Commit: 469979dc25ac999069267ef4f542fa9bb95bd3d6
Parents: ef902f9
Author: Dawid Weiss <da...@carrotsearch.com>
Authored: Wed Apr 4 21:54:27 2018 +0200
Committer: Dawid Weiss <da...@carrotsearch.com>
Committed: Wed Apr 4 21:54:27 2018 +0200

----------------------------------------------------------------------
 .../apache/lucene/codecs/TestCodecLoadingDeadlock.java    | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/469979dc/lucene/core/src/test/org/apache/lucene/codecs/TestCodecLoadingDeadlock.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/TestCodecLoadingDeadlock.java b/lucene/core/src/test/org/apache/lucene/codecs/TestCodecLoadingDeadlock.java
index 8137f23..8080c87 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/TestCodecLoadingDeadlock.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/TestCodecLoadingDeadlock.java
@@ -61,8 +61,11 @@ public class TestCodecLoadingDeadlock extends Assert {
         .get(rnd.nextInt(avail.size()));
     final String dvfName = new ArrayList<>(avail = DocValuesFormat.availableDocValuesFormats())
         .get(rnd.nextInt(avail.size()));
-    
-    // spawn separate JVM:
+
+    System.out.println(String.format(Locale.ROOT,
+        "codec: %s, pf: %s, dvf: %s", codecName, pfName, dvfName));
+
+    // Fork a separate JVM to reinitialize classes.
     final Process p = new ProcessBuilder(
       Paths.get(System.getProperty("java.home"), "bin", "java").toString(),
       "-cp",
@@ -86,9 +89,6 @@ public class TestCodecLoadingDeadlock extends Assert {
     final String pfName = args[1];
     final String dvfName = args[2];
 
-    System.out.println(String.format(Locale.ROOT,
-        "codec: %s, pf: %s, dvf: %s", codecName, pfName, dvfName));
-
     final int numThreads = 14; // two times the modulo in switch statement below
     final CopyOnWriteArrayList<Thread> allThreads = new CopyOnWriteArrayList<>();
     final ExecutorService pool = Executors.newFixedThreadPool(numThreads, new NamedThreadFactory("deadlockchecker") {


[37/50] lucene-solr:jira/solr-12181: SOLR-12147: Don't use MemoryPostingsFormat for TestDocTermOrds.testTriggerUnInvertLimit

Posted by ab...@apache.org.
SOLR-12147: Don't use MemoryPostingsFormat for TestDocTermOrds.testTriggerUnInvertLimit

This can lead to OOM on nightly runs, as it needs to create a very large index,
and the CI machines don't have huge amounts of RAM.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/2ae488aa
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/2ae488aa
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/2ae488aa

Branch: refs/heads/jira/solr-12181
Commit: 2ae488aae2a0601148dcd1b6aa794489a0572349
Parents: 005da87
Author: Alan Woodward <ro...@apache.org>
Authored: Sat Apr 7 18:34:22 2018 +0100
Committer: Alan Woodward <ro...@apache.org>
Committed: Sat Apr 7 18:34:25 2018 +0100

----------------------------------------------------------------------
 .../org/apache/solr/uninverting/TestDocTermOrds.java    | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2ae488aa/solr/core/src/test/org/apache/solr/uninverting/TestDocTermOrds.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/uninverting/TestDocTermOrds.java b/solr/core/src/test/org/apache/solr/uninverting/TestDocTermOrds.java
index c2d9e17..21f44ea 100644
--- a/solr/core/src/test/org/apache/solr/uninverting/TestDocTermOrds.java
+++ b/solr/core/src/test/org/apache/solr/uninverting/TestDocTermOrds.java
@@ -21,6 +21,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Objects;
 import java.util.Set;
 
 import org.apache.lucene.analysis.Analyzer;
@@ -42,17 +43,17 @@ import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum.SeekStatus;
-import org.apache.solr.legacy.LegacyIntField;
-import org.apache.solr.legacy.LegacyLongField;
-import org.apache.solr.legacy.LegacyNumericUtils;
 import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.index.TermsEnum.SeekStatus;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.StringHelper;
 import org.apache.lucene.util.TestUtil;
 import org.apache.solr.index.SlowCompositeReaderWrapper;
+import org.apache.solr.legacy.LegacyIntField;
+import org.apache.solr.legacy.LegacyLongField;
+import org.apache.solr.legacy.LegacyNumericUtils;
 
 // TODO:
 //   - test w/ del docs
@@ -145,6 +146,9 @@ public class TestDocTermOrds extends LuceneTestCase {
   public void testTriggerUnInvertLimit() throws IOException {
     final boolean SHOULD_TRIGGER = false; // Set this to true to use the test with the old implementation
 
+    assumeFalse("Don't run this massive test with MemoryPostingsFormat, as it can OOM",
+        Objects.equals(Codec.getDefault().postingsFormat().getName(), "Memory"));
+
     // Ensure enough terms inside of a single UnInvert-pass-structure to trigger the limit
     final int REF_LIMIT = (int) Math.pow(2, 24); // Maximum number of references within a single pass-structure
     final int DOCS = (1<<16)-1;                  // The number of documents within a single pass (simplified)


[34/50] lucene-solr:jira/solr-12181: SOLR-12199: TestReplicationHandler.doTestRepeater(): TEST_PORT interpolation failure: Server refused connection at: http://127.0.0.1:TEST_PORT/solr

Posted by ab...@apache.org.
SOLR-12199: TestReplicationHandler.doTestRepeater(): TEST_PORT interpolation failure: Server refused connection at: http://127.0.0.1:TEST_PORT/solr


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/5c37b07a
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/5c37b07a
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/5c37b07a

Branch: refs/heads/jira/solr-12181
Commit: 5c37b07a3d53e64c2f0cebd33eb7024d693d62f5
Parents: abaf378
Author: Steve Rowe <sa...@apache.org>
Authored: Fri Apr 6 14:50:21 2018 -0400
Committer: Steve Rowe <sa...@apache.org>
Committed: Fri Apr 6 14:50:21 2018 -0400

----------------------------------------------------------------------
 solr/CHANGES.txt                                             | 3 +++
 .../test/org/apache/solr/handler/TestReplicationHandler.java | 8 ++++++--
 2 files changed, 9 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/5c37b07a/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 09c330b..c7270da 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -111,6 +111,9 @@ Bug Fixes
   
 * SOLR-11929: UpdateLog metrics are not initialized on core reload.  (ab, Steve Rowe) 
 
+* SOLR-12199: TestReplicationHandler.doTestRepeater(): TEST_PORT interpolation failure: 
+  Server refused connection at: http://127.0.0.1:TEST_PORT/solr  (Mikhail Khludnev, Dawid Weiss, Steve Rowe)
+
 Optimizations
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/5c37b07a/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java b/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java
index e8caf99..e4b7fa3 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java
@@ -294,6 +294,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
   public void doTestDetails() throws Exception {
     slaveJetty.stop();
     
+    slave.setTestPort(masterJetty.getLocalPort());
     slave.copyConfigFile(CONF_DIR + "solrconfig-slave.xml", "solrconfig.xml");
     slaveJetty = createJetty(slave);
     
@@ -706,6 +707,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
   public void doTestIndexFetchWithMasterUrl() throws Exception {
     //change solrconfig on slave
     //this has no entry for pollinginterval
+    slave.setTestPort(masterJetty.getLocalPort());
     slave.copyConfigFile(CONF_DIR + "solrconfig-slave1.xml", "solrconfig.xml");
     slaveJetty.stop();
     slaveJetty = createJetty(slave);
@@ -840,7 +842,8 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
     String slaveSchema = SLAVE_SCHEMA_1;
 
     try {
-      
+
+      slave.setTestPort(masterJetty.getLocalPort());
       slave.copyConfigFile(CONF_DIR +"solrconfig-slave1.xml", "solrconfig.xml");
       slave.copyConfigFile(CONF_DIR +slaveSchema, "schema.xml");
       slaveJetty.stop();
@@ -986,6 +989,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
   @Test
   public void doTestRepeater() throws Exception {
     // no polling
+    slave.setTestPort(masterJetty.getLocalPort());
     slave.copyConfigFile(CONF_DIR + "solrconfig-slave1.xml", "solrconfig.xml");
     slaveJetty.stop();
     slaveJetty = createJetty(slave);
@@ -993,7 +997,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
     slaveClient = createNewSolrClient(slaveJetty.getLocalPort());
 
     try {
-      repeater = new SolrInstance(createTempDir("solr-instance").toFile(), "repeater", null);
+      repeater = new SolrInstance(createTempDir("solr-instance").toFile(), "repeater", masterJetty.getLocalPort());
       repeater.setUp();
       repeater.copyConfigFile(CONF_DIR + "solrconfig-repeater.xml",
           "solrconfig.xml");


[46/50] lucene-solr:jira/solr-12181: LUCENE-8237: Add missing CHANGES.TXT entry

Posted by ab...@apache.org.
LUCENE-8237: Add missing CHANGES.TXT entry


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a7a3c0a2
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a7a3c0a2
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a7a3c0a2

Branch: refs/heads/jira/solr-12181
Commit: a7a3c0a282978074b03d8855774ba0287f6ba810
Parents: ed62b99
Author: Simon Willnauer <si...@apache.org>
Authored: Mon Apr 9 12:09:32 2018 +0200
Committer: Simon Willnauer <si...@apache.org>
Committed: Mon Apr 9 12:09:32 2018 +0200

----------------------------------------------------------------------
 lucene/CHANGES.txt | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a7a3c0a2/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 74efacc..777ec07 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -121,6 +121,10 @@ New Features
   to selectively carry over soft_deleted document across merges for retention
   policies (Simon Willnauer, Mike McCandless, Robert Muir)
 
+* LUCENE-8237: Add a SoftDeletesDirectoryReaderWrapper that allows to respect
+  soft deletes if the reader is opened form a directory. (Simon Willnauer,
+  Mike McCandless, Uwe Schindler, Adrien Grand)
+
 Bug Fixes
 
 * LUCENE-8234: Fixed bug in how spatial relationship is computed for


[45/50] lucene-solr:jira/solr-12181: LUCENE-8237: Add a SoftDeletesDirectoryReaderWrapper

Posted by ab...@apache.org.
LUCENE-8237: Add a SoftDeletesDirectoryReaderWrapper

This adds support for soft deletes if the reader is opened form a directory.
Today we only support soft deletes for NRT readers, this change allows to wrap
existing DirectoryReader with a SoftDeletesDirectoryReaderWrapper to also filter
out soft deletes in the case of a non-NRT reader.

Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/ed62b990
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/ed62b990
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/ed62b990

Branch: refs/heads/jira/solr-12181
Commit: ed62b990d8bfa61eefd918cfdfc82e5848afa5dd
Parents: b82f591
Author: Simon Willnauer <si...@apache.org>
Authored: Mon Apr 9 11:50:38 2018 +0200
Committer: GitHub <no...@github.com>
Committed: Mon Apr 9 11:50:38 2018 +0200

----------------------------------------------------------------------
 .../apache/lucene/index/PendingSoftDeletes.java |  23 ++-
 .../SoftDeletesDirectoryReaderWrapper.java      | 177 +++++++++++++++++
 .../index/SoftDeletesRetentionMergePolicy.java  |  33 +--
 .../lucene/index/StandardDirectoryReader.java   |   2 +-
 .../lucene/index/TestDirectoryReaderReopen.java |  54 +++++
 .../TestSoftDeletesDirectoryReaderWrapper.java  | 199 +++++++++++++++++++
 .../org/apache/lucene/index/TestStressNRT.java  |   8 +-
 .../lucene/search/TestSearcherManager.java      |   2 +-
 .../apache/lucene/index/RandomIndexWriter.java  |   9 +-
 9 files changed, 480 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ed62b990/lucene/core/src/java/org/apache/lucene/index/PendingSoftDeletes.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/PendingSoftDeletes.java b/lucene/core/src/java/org/apache/lucene/index/PendingSoftDeletes.java
index 1f6c2ef..b73ac83 100644
--- a/lucene/core/src/java/org/apache/lucene/index/PendingSoftDeletes.java
+++ b/lucene/core/src/java/org/apache/lucene/index/PendingSoftDeletes.java
@@ -73,7 +73,7 @@ final class PendingSoftDeletes extends PendingDeletes {
         this.pendingDeleteCount = 0;
       } else {
         assert info.info.maxDoc() > 0 : "maxDoc is 0";
-        applyUpdates(iterator);
+        pendingDeleteCount += applySoftDeletes(iterator, getMutableBits());
       }
       dvGeneration = info.getDocValuesGen();
     }
@@ -94,19 +94,26 @@ final class PendingSoftDeletes extends PendingDeletes {
     hardDeletes.reset();
   }
 
-  private void applyUpdates(DocIdSetIterator iterator) throws IOException {
-    final MutableBits mutableBits = getMutableBits();
+  /**
+   * Clears all bits in the given bitset that are set and are also in the given DocIdSetIterator.
+   *
+   * @param iterator the doc ID set iterator for apply
+   * @param bits the bit set to apply the deletes to
+   * @return the number of bits changed by this function
+   */
+  static int applySoftDeletes(DocIdSetIterator iterator, MutableBits bits) throws IOException {
+    assert iterator != null;
     int newDeletes = 0;
     int docID;
     while ((docID = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
-      if (mutableBits.get(docID)) { // doc is live - clear it
-        mutableBits.clear(docID);
+      if (bits.get(docID)) { // doc is live - clear it
+        bits.clear(docID);
         newDeletes++;
         // now that we know we deleted it and we fully control the hard deletes we can do correct accounting
         // below.
       }
     }
-    pendingDeleteCount += newDeletes;
+    return newDeletes;
   }
 
   @Override
@@ -118,7 +125,7 @@ final class PendingSoftDeletes extends PendingDeletes {
         subs[i] = updatesToApply.get(i).iterator();
       }
       DocValuesFieldUpdates.Iterator iterator = DocValuesFieldUpdates.mergedIterator(subs);
-      applyUpdates(new DocIdSetIterator() {
+      pendingDeleteCount += applySoftDeletes(new DocIdSetIterator() {
         int docID = -1;
         @Override
         public int docID() {
@@ -139,7 +146,7 @@ final class PendingSoftDeletes extends PendingDeletes {
         public long cost() {
           throw new UnsupportedOperationException();
         }
-      });
+      }, getMutableBits());
       dvGeneration = info.getDocValuesGen();
     }
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ed62b990/lucene/core/src/java/org/apache/lucene/index/SoftDeletesDirectoryReaderWrapper.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/SoftDeletesDirectoryReaderWrapper.java b/lucene/core/src/java/org/apache/lucene/index/SoftDeletesDirectoryReaderWrapper.java
new file mode 100644
index 0000000..36568f6
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/index/SoftDeletesDirectoryReaderWrapper.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.index;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.DocValuesFieldExistsQuery;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.FixedBitSet;
+
+/**
+ * This reader filters out documents that have a doc values value in the given field and treat these
+ * documents as soft deleted. Hard deleted documents will also be filtered out in the life docs of this reader.
+ * @see IndexWriterConfig#setSoftDeletesField(String)
+ * @see IndexWriter#softUpdateDocument(Term, Iterable, Field...)
+ * @see SoftDeletesRetentionMergePolicy
+ */
+public final class SoftDeletesDirectoryReaderWrapper extends FilterDirectoryReader {
+  private final String field;
+  private final CacheHelper readerCacheHelper;
+  /**
+   * Creates a new soft deletes wrapper.
+   * @param in the incoming directory reader
+   * @param field the soft deletes field
+   */
+  public SoftDeletesDirectoryReaderWrapper(DirectoryReader in, String field) throws IOException {
+    this(in, new SoftDeletesSubReaderWrapper(Collections.emptyMap(), field));
+  }
+
+  private SoftDeletesDirectoryReaderWrapper(DirectoryReader in, SoftDeletesSubReaderWrapper wrapper) throws IOException {
+    super(in, wrapper);
+    this.field = wrapper.field;
+    readerCacheHelper = in.getReaderCacheHelper() == null ? null : new DelegatingCacheHelper(in.getReaderCacheHelper());
+  }
+
+  @Override
+  protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException {
+    Map<CacheKey, LeafReader> readerCache = new HashMap<>();
+    for (LeafReader reader : getSequentialSubReaders()) {
+      // we try to reuse the life docs instances here if the reader cache key didn't change
+      if (reader instanceof SoftDeletesFilterLeafReader && reader.getReaderCacheHelper() != null) {
+        readerCache.put(((SoftDeletesFilterLeafReader) reader).reader.getReaderCacheHelper().getKey(), reader);
+      }
+
+    }
+    return new SoftDeletesDirectoryReaderWrapper(in, new SoftDeletesSubReaderWrapper(readerCache, field));
+  }
+
+  @Override
+  public CacheHelper getReaderCacheHelper() {
+    return readerCacheHelper;
+  }
+
+  private static class SoftDeletesSubReaderWrapper extends SubReaderWrapper {
+    private final Map<CacheKey, LeafReader> mapping;
+    private final String field;
+
+    public SoftDeletesSubReaderWrapper(Map<CacheKey, LeafReader> oldReadersCache, String field) {
+      Objects.requireNonNull(field, "Field must not be null");
+      assert oldReadersCache != null;
+      this.mapping = oldReadersCache;
+      this.field = field;
+    }
+
+    @Override
+    public LeafReader wrap(LeafReader reader) {
+      CacheHelper readerCacheHelper = reader.getReaderCacheHelper();
+      if (readerCacheHelper != null && mapping.containsKey(readerCacheHelper.getKey())) {
+        // if the reader cache helper didn't change and we have it in the cache don't bother creating a new one
+        return mapping.get(readerCacheHelper.getKey());
+      }
+      try {
+        return SoftDeletesDirectoryReaderWrapper.wrap(reader, field);
+      } catch (IOException e) {
+        throw new UncheckedIOException(e);
+      }
+    }
+  }
+
+  static LeafReader wrap(LeafReader reader, String field) throws IOException {
+      DocIdSetIterator iterator = DocValuesFieldExistsQuery.getDocValuesDocIdSetIterator(field, reader);
+      if (iterator == null) {
+        return reader;
+      }
+      Bits liveDocs = reader.getLiveDocs();
+      final FixedBitSet bits;
+      if (liveDocs != null) {
+        bits = SoftDeletesRetentionMergePolicy.cloneLiveDocs(liveDocs);
+      } else {
+        bits = new FixedBitSet(reader.maxDoc());
+        bits.set(0, reader.maxDoc());
+      }
+      int numDeletes = reader.numDeletedDocs() + PendingSoftDeletes.applySoftDeletes(iterator, bits);
+      int numDocs = reader.maxDoc() - numDeletes;
+      return new SoftDeletesFilterLeafReader(reader, bits, numDocs);
+  }
+
+  static final class SoftDeletesFilterLeafReader extends FilterLeafReader {
+    private final LeafReader reader;
+    private final FixedBitSet bits;
+    private final int numDocs;
+    private final CacheHelper readerCacheHelper;
+
+    private SoftDeletesFilterLeafReader(LeafReader reader, FixedBitSet bits, int numDocs) {
+      super(reader);
+      this.reader = reader;
+      this.bits = bits;
+      this.numDocs = numDocs;
+      this.readerCacheHelper = reader.getReaderCacheHelper() == null ? null :
+          new DelegatingCacheHelper(reader.getReaderCacheHelper());
+    }
+
+    @Override
+    public Bits getLiveDocs() {
+      return bits;
+    }
+
+    @Override
+    public int numDocs() {
+      return numDocs;
+    }
+
+    @Override
+    public CacheHelper getCoreCacheHelper() {
+      return reader.getCoreCacheHelper();
+    }
+
+    @Override
+    public CacheHelper getReaderCacheHelper() {
+      return readerCacheHelper;
+    }
+  }
+
+  private static class DelegatingCacheHelper implements CacheHelper {
+    private final CacheHelper delegate;
+    private final CacheKey cacheKey = new CacheKey();
+
+    public DelegatingCacheHelper(CacheHelper delegate) {
+      this.delegate = delegate;
+    }
+
+    @Override
+    public CacheKey getKey() {
+      return cacheKey;
+    }
+
+    @Override
+    public void addClosedListener(ClosedListener listener) {
+      // here we wrap the listener and call it with our cache key
+      // this is important since this key will be used to cache the reader and otherwise we won't free caches etc.
+      delegate.addClosedListener(unused -> listener.onClose(cacheKey));
+    }
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ed62b990/lucene/core/src/java/org/apache/lucene/index/SoftDeletesRetentionMergePolicy.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/SoftDeletesRetentionMergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/SoftDeletesRetentionMergePolicy.java
index debe7d7..b088755 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SoftDeletesRetentionMergePolicy.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SoftDeletesRetentionMergePolicy.java
@@ -99,32 +99,39 @@ public final class SoftDeletesRetentionMergePolicy extends OneMergeWrappingMerge
     }, reader.maxDoc() - reader.numDocs());
     Scorer scorer = getScorer(softDeleteField, retentionQuery, wrappedReader);
     if (scorer != null) {
-      FixedBitSet mutableBits;
-      if (liveDocs instanceof FixedBitSet) {
-        mutableBits = ((FixedBitSet) liveDocs).clone();
-      } else { // mainly if we have asserting codec
-        mutableBits = new FixedBitSet(liveDocs.length());
-        for (int i = 0; i < liveDocs.length(); i++) {
-          if (liveDocs.get(i)) {
-            mutableBits.set(i);
-          }
-        }
-      }
+      FixedBitSet cloneLiveDocs = cloneLiveDocs(liveDocs);
       DocIdSetIterator iterator = scorer.iterator();
       int numExtraLiveDocs = 0;
       while (iterator.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
-        if (mutableBits.getAndSet(iterator.docID()) == false) {
+        if (cloneLiveDocs.getAndSet(iterator.docID()) == false) {
           // if we bring one back to live we need to account for it
           numExtraLiveDocs++;
         }
       }
       assert reader.numDocs() + numExtraLiveDocs <= reader.maxDoc() : "numDocs: " + reader.numDocs() + " numExtraLiveDocs: " + numExtraLiveDocs + " maxDoc: " + reader.maxDoc();
-      return wrapLiveDocs(reader, mutableBits, reader.numDocs() + numExtraLiveDocs);
+      return wrapLiveDocs(reader, cloneLiveDocs, reader.numDocs() + numExtraLiveDocs);
     } else {
       return reader;
     }
   }
 
+  /**
+   * Clones the given live docs
+   */
+  static FixedBitSet cloneLiveDocs(Bits liveDocs) {
+    if (liveDocs instanceof FixedBitSet) {
+      return ((FixedBitSet) liveDocs).clone();
+    } else { // mainly if we have asserting codec
+      FixedBitSet mutableBits = new FixedBitSet(liveDocs.length());
+      for (int i = 0; i < liveDocs.length(); i++) {
+        if (liveDocs.get(i)) {
+          mutableBits.set(i);
+        }
+      }
+      return mutableBits;
+    }
+  }
+
   private static Scorer getScorer(String softDeleteField, Query retentionQuery, CodecReader reader) throws IOException {
     BooleanQuery.Builder builder = new BooleanQuery.Builder();
     builder.add(new DocValuesFieldExistsQuery(softDeleteField), BooleanClause.Occur.FILTER);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ed62b990/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
index 23fbb04..488ccaf 100644
--- a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
@@ -197,7 +197,7 @@ public final class StandardDirectoryReader extends DirectoryReader {
 
               if (oldReader.getSegmentInfo().getDelGen() == commitInfo.getDelGen()) {
                 // only DV updates
-                newReaders[i] = new SegmentReader(commitInfo, oldReader, oldReader.getLiveDocs(), oldReader.numDocs());
+                newReaders[i] = new SegmentReader(commitInfo, oldReader, oldReader.getLiveDocs(), oldReader.numDocs(), false); // this is not an NRT reader!
               } else {
                 // both DV and liveDocs have changed
                 newReaders[i] = new SegmentReader(commitInfo, oldReader);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ed62b990/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java
index b38696a..468e8e2 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java
@@ -43,6 +43,7 @@ import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.store.MockDirectoryWrapper.FakeIOException;
 import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 
@@ -1013,6 +1014,59 @@ public class TestDirectoryReaderReopen extends LuceneTestCase {
       DirectoryReader.openIfChanged(r);
     });
   }
+
+  public void testReuseUnchangedLeafReaderOnDVUpdate() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriterConfig indexWriterConfig = newIndexWriterConfig();
+    indexWriterConfig.setMergePolicy(NoMergePolicy.INSTANCE);
+    IndexWriter writer = new IndexWriter(dir, indexWriterConfig);
+
+    Document doc = new Document();
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc.add(new StringField("version", "1", Field.Store.YES));
+    doc.add(new NumericDocValuesField("some_docvalue", 2));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new StringField("id", "2", Field.Store.YES));
+    doc.add(new StringField("version", "1", Field.Store.YES));
+    writer.addDocument(doc);
+    writer.commit();
+    DirectoryReader reader = DirectoryReader.open(dir);
+    assertEquals(2, reader.numDocs());
+    assertEquals(2, reader.maxDoc());
+    assertEquals(0, reader.numDeletedDocs());
+
+    doc = new Document();
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc.add(new StringField("version", "2", Field.Store.YES));
+    writer.updateDocValues(new Term("id", "1"), new NumericDocValuesField("some_docvalue", 1));
+    writer.commit();
+    DirectoryReader newReader = DirectoryReader.openIfChanged(reader);
+    assertNotSame(newReader, reader);
+    reader.close();
+    reader = newReader;
+    assertEquals(2, reader.numDocs());
+    assertEquals(2, reader.maxDoc());
+    assertEquals(0, reader.numDeletedDocs());
+
+    doc = new Document();
+    doc.add(new StringField("id", "3", Field.Store.YES));
+    doc.add(new StringField("version", "3", Field.Store.YES));
+    writer.updateDocument(new Term("id", "3"), doc);
+    writer.commit();
+
+    newReader = DirectoryReader.openIfChanged(reader);
+    assertNotSame(newReader, reader);
+    assertEquals(2, newReader.getSequentialSubReaders().size());
+    assertEquals(1, reader.getSequentialSubReaders().size());
+    assertSame(reader.getSequentialSubReaders().get(0), newReader.getSequentialSubReaders().get(0));
+    reader.close();
+    reader = newReader;
+    assertEquals(3, reader.numDocs());
+    assertEquals(3, reader.maxDoc());
+    assertEquals(0, reader.numDeletedDocs());
+    IOUtils.close(reader, writer, dir);
+  }
 }
 
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ed62b990/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesDirectoryReaderWrapper.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesDirectoryReaderWrapper.java b/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesDirectoryReaderWrapper.java
new file mode 100644
index 0000000..dea7bc9
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesDirectoryReaderWrapper.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.index;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestSoftDeletesDirectoryReaderWrapper extends LuceneTestCase {
+
+  public void testReuseUnchangedLeafReader() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriterConfig indexWriterConfig = newIndexWriterConfig();
+    String softDeletesField = "soft_delete";
+    indexWriterConfig.setSoftDeletesField(softDeletesField);
+    indexWriterConfig.setMergePolicy(NoMergePolicy.INSTANCE);
+    IndexWriter writer = new IndexWriter(dir, indexWriterConfig);
+
+    Document doc = new Document();
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc.add(new StringField("version", "1", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new StringField("id", "2", Field.Store.YES));
+    doc.add(new StringField("version", "1", Field.Store.YES));
+    writer.addDocument(doc);
+    writer.commit();
+    DirectoryReader reader = new SoftDeletesDirectoryReaderWrapper(DirectoryReader.open(dir), softDeletesField);
+    assertEquals(2, reader.numDocs());
+    assertEquals(2, reader.maxDoc());
+    assertEquals(0, reader.numDeletedDocs());
+
+    doc = new Document();
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc.add(new StringField("version", "2", Field.Store.YES));
+    writer.softUpdateDocument(new Term("id", "1"), doc, new NumericDocValuesField("soft_delete", 1));
+
+    doc = new Document();
+    doc.add(new StringField("id", "3", Field.Store.YES));
+    doc.add(new StringField("version", "1", Field.Store.YES));
+    writer.addDocument(doc);
+    writer.commit();
+
+    DirectoryReader newReader = DirectoryReader.openIfChanged(reader);
+    assertNotSame(newReader, reader);
+    reader.close();
+    reader = newReader;
+    assertEquals(3, reader.numDocs());
+    assertEquals(4, reader.maxDoc());
+    assertEquals(1, reader.numDeletedDocs());
+
+    doc = new Document();
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc.add(new StringField("version", "3", Field.Store.YES));
+    writer.softUpdateDocument(new Term("id", "1"), doc, new NumericDocValuesField("soft_delete", 1));
+    writer.commit();
+
+    newReader = DirectoryReader.openIfChanged(reader);
+    assertNotSame(newReader, reader);
+    assertEquals(3, newReader.getSequentialSubReaders().size());
+    assertEquals(2, reader.getSequentialSubReaders().size());
+    assertSame(reader.getSequentialSubReaders().get(0), newReader.getSequentialSubReaders().get(0));
+    assertNotSame(reader.getSequentialSubReaders().get(1), newReader.getSequentialSubReaders().get(1));
+    assertTrue(isWrapped(reader.getSequentialSubReaders().get(0)));
+    // last one has no soft deletes
+    assertFalse(isWrapped(reader.getSequentialSubReaders().get(1)));
+
+    assertTrue(isWrapped(newReader.getSequentialSubReaders().get(0)));
+    assertTrue(isWrapped(newReader.getSequentialSubReaders().get(1)));
+    // last one has no soft deletes
+    assertFalse(isWrapped(newReader.getSequentialSubReaders().get(2)));
+    reader.close();
+    reader = newReader;
+    assertEquals(3, reader.numDocs());
+    assertEquals(5, reader.maxDoc());
+    assertEquals(2, reader.numDeletedDocs());
+    IOUtils.close(reader, writer, dir);
+  }
+
+  private boolean isWrapped(LeafReader reader) {
+    return reader instanceof SoftDeletesDirectoryReaderWrapper.SoftDeletesFilterLeafReader;
+  }
+
+  public void testMixSoftAndHardDeletes() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriterConfig indexWriterConfig = newIndexWriterConfig();
+    String softDeletesField = "soft_delete";
+    indexWriterConfig.setSoftDeletesField(softDeletesField);
+    IndexWriter writer = new IndexWriter(dir, indexWriterConfig);
+    Set<Integer> uniqueDocs = new HashSet<>();
+    for (int i = 0; i < 100; i++) {
+      int docId = random().nextInt(5);
+      uniqueDocs.add(docId);
+      Document doc = new Document();
+      doc.add(new StringField("id",  String.valueOf(docId), Field.Store.YES));
+      if (docId %  2 == 0) {
+        writer.updateDocument(new Term("id", String.valueOf(docId)), doc);
+      } else {
+        writer.softUpdateDocument(new Term("id", String.valueOf(docId)), doc,
+            new NumericDocValuesField(softDeletesField,  0));
+      }
+    }
+
+    writer.commit();
+    writer.close();
+    DirectoryReader reader = new SoftDeletesDirectoryReaderWrapper(DirectoryReader.open(dir), softDeletesField);
+    assertEquals(uniqueDocs.size(), reader.numDocs());
+    IndexSearcher searcher = new IndexSearcher(reader);
+    for (Integer docId : uniqueDocs) {
+      assertEquals(1, searcher.search(new TermQuery(new Term("id", docId.toString())), 1).totalHits);
+    }
+
+    IOUtils.close(reader, dir);
+  }
+
+  public void testReaderCacheKey() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriterConfig indexWriterConfig = newIndexWriterConfig();
+    String softDeletesField = "soft_delete";
+    indexWriterConfig.setSoftDeletesField(softDeletesField);
+    indexWriterConfig.setMergePolicy(NoMergePolicy.INSTANCE);
+    IndexWriter writer = new IndexWriter(dir, indexWriterConfig);
+
+    Document doc = new Document();
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc.add(new StringField("version", "1", Field.Store.YES));
+    writer.addDocument(doc);
+    doc = new Document();
+    doc.add(new StringField("id", "2", Field.Store.YES));
+    doc.add(new StringField("version", "1", Field.Store.YES));
+    writer.addDocument(doc);
+    writer.commit();
+    DirectoryReader reader = new SoftDeletesDirectoryReaderWrapper(DirectoryReader.open(dir), softDeletesField);
+    IndexReader.CacheHelper readerCacheHelper = reader.leaves().get(0).reader().getReaderCacheHelper();
+    AtomicInteger leafCalled = new AtomicInteger(0);
+    AtomicInteger dirCalled = new AtomicInteger(0);
+    readerCacheHelper.addClosedListener(key -> {
+      leafCalled.incrementAndGet();
+      assertSame(key, readerCacheHelper.getKey());
+    });
+    IndexReader.CacheHelper dirReaderCacheHelper = reader.getReaderCacheHelper();
+    dirReaderCacheHelper.addClosedListener(key -> {
+      dirCalled.incrementAndGet();
+      assertSame(key, dirReaderCacheHelper.getKey());
+    });
+    assertEquals(2, reader.numDocs());
+    assertEquals(2, reader.maxDoc());
+    assertEquals(0, reader.numDeletedDocs());
+
+    doc = new Document();
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc.add(new StringField("version", "2", Field.Store.YES));
+    writer.softUpdateDocument(new Term("id", "1"), doc, new NumericDocValuesField("soft_delete", 1));
+
+    doc = new Document();
+    doc.add(new StringField("id", "3", Field.Store.YES));
+    doc.add(new StringField("version", "1", Field.Store.YES));
+    writer.addDocument(doc);
+    writer.commit();
+    assertEquals(0, leafCalled.get());
+    assertEquals(0, dirCalled.get());
+    DirectoryReader newReader = DirectoryReader.openIfChanged(reader);
+    assertEquals(0, leafCalled.get());
+    assertEquals(0, dirCalled.get());
+    assertNotSame(newReader.getReaderCacheHelper().getKey(), reader.getReaderCacheHelper().getKey());
+    assertNotSame(newReader, reader);
+    reader.close();
+    reader = newReader;
+    assertEquals(1, dirCalled.get());
+    assertEquals(1, leafCalled.get());
+    IOUtils.close(reader, writer, dir);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ed62b990/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java b/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java
index e6c91b8..06aa277 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java
@@ -110,7 +110,11 @@ public class TestStressNRT extends LuceneTestCase {
     final RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(new MockAnalyzer(random())), useSoftDeletes);
     writer.setDoRandomForceMergeAssert(false);
     writer.commit();
-    reader = useSoftDeletes ? writer.getReader() : DirectoryReader.open(dir);
+    if (useSoftDeletes) {
+      reader = new SoftDeletesDirectoryReaderWrapper(DirectoryReader.open(dir), writer.w.getConfig().getSoftDeletesField());
+    } else {
+      reader = DirectoryReader.open(dir);
+    }
 
     for (int i=0; i<nWriteThreads; i++) {
       Thread thread = new Thread("WRITER"+i) {
@@ -136,7 +140,7 @@ public class TestStressNRT extends LuceneTestCase {
                   }
 
                   DirectoryReader newReader;
-                  if (rand.nextInt(100) < softCommitPercent || useSoftDeletes) {
+                  if (rand.nextInt(100) < softCommitPercent) {
                     // assertU(h.commit("softCommit","true"));
                     if (random().nextBoolean()) {
                       if (VERBOSE) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ed62b990/lucene/core/src/test/org/apache/lucene/search/TestSearcherManager.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSearcherManager.java b/lucene/core/src/test/org/apache/lucene/search/TestSearcherManager.java
index cc9a919..c9d7e25 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSearcherManager.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSearcherManager.java
@@ -487,7 +487,7 @@ public class TestSearcherManager extends ThreadedIndexingAndSearchingTestCase {
 
     FilterDirectoryReader reader = new MyFilterDirectoryReader(nrtReader);
     assertEquals(nrtReader, reader.getDelegate());
-    assertEquals(nrtReader, FilterDirectoryReader.unwrap(reader));
+    assertEquals(FilterDirectoryReader.unwrap(nrtReader), FilterDirectoryReader.unwrap(reader));
 
     SearcherManager mgr = new SearcherManager(reader, null);
     for(int i=0;i<10;i++) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ed62b990/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
index b82df68..15ca469 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
@@ -381,7 +381,7 @@ public class RandomIndexWriter implements Closeable {
     if (r.nextInt(20) == 2) {
       doRandomForceMerge();
     }
-    if (!applyDeletions || r.nextBoolean() || w.getConfig().getSoftDeletesField() != null) {
+    if (!applyDeletions || r.nextBoolean()) {
       // if we have soft deletes we can't open from a directory
       if (LuceneTestCase.VERBOSE) {
         System.out.println("RIW.getReader: use NRT reader");
@@ -396,7 +396,12 @@ public class RandomIndexWriter implements Closeable {
       }
       w.commit();
       if (r.nextBoolean()) {
-        return DirectoryReader.open(w.getDirectory());
+        DirectoryReader reader = DirectoryReader.open(w.getDirectory());
+        if (w.getConfig().getSoftDeletesField() != null) {
+          return new SoftDeletesDirectoryReaderWrapper(reader, w.getConfig().getSoftDeletesField());
+        } else {
+          return reader;
+        }
       } else {
         return w.getReader(applyDeletions, writeAllDeletes);
       }


[38/50] lucene-solr:jira/solr-12181: LUCENE-8245: Adjust envelope to not leave as big a gap between travel planes and above/below planes.

Posted by ab...@apache.org.
LUCENE-8245: Adjust envelope to not leave as big a gap between travel planes and above/below planes.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/348de9e8
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/348de9e8
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/348de9e8

Branch: refs/heads/jira/solr-12181
Commit: 348de9e8b59d1ba6cf37999fe4c11c8365147fdd
Parents: aba793d
Author: Karl Wright <Da...@gmail.com>
Authored: Sun Apr 8 06:44:10 2018 -0400
Committer: Karl Wright <Da...@gmail.com>
Committed: Sun Apr 8 06:44:10 2018 -0400

----------------------------------------------------------------------
 .../org/apache/lucene/spatial3d/geom/Plane.java |  2 +-
 .../lucene/spatial3d/geom/GeoPolygonTest.java   | 32 ++++++++++++++++++++
 2 files changed, 33 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/348de9e8/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Plane.java
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Plane.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Plane.java
index fe3418c..34a2fce 100755
--- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Plane.java
+++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/Plane.java
@@ -24,7 +24,7 @@ package org.apache.lucene.spatial3d.geom;
  */
 public class Plane extends Vector {
   /** For plane envelopes, we need a small distance that can't lead to numerical confusion. */
-  public final static double MINIMUM_PLANE_OFFSET = MINIMUM_RESOLUTION * 1.5;
+  public final static double MINIMUM_PLANE_OFFSET = MINIMUM_RESOLUTION * 1.1;
   /** An array with no points in it */
   public final static GeoPoint[] NO_POINTS = new GeoPoint[0];
   /** An array with no bounds in it */

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/348de9e8/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
index 581112d..d1e6688 100755
--- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
+++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
@@ -1469,5 +1469,37 @@ shape:
     final GeoPoint point3 = new GeoPoint(PlanetModel.SPHERE, 0, 1e-6);
     assertTrue(polygon.isWithin(point3) == largePolygon.isWithin(point3));
   }
+
+  @Test
+  public void testAboveBelowCrossingDifferentEdges() {
+    //POLYGON((130.846821906638 -5.066128831305991,134.5635278421427 21.75703481126756,156.31803093908155 44.5755831677161,0.0 8.860146581178396E-33,130.846821906638 -5.066128831305991))
+    final List<GeoPoint> points = new ArrayList<>();
+    points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-5.066128831305991), Geo3DUtil.fromDegrees(130.846821906638)));
+    points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(21.75703481126756), Geo3DUtil.fromDegrees(134.5635278421427)));
+    points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(44.5755831677161), Geo3DUtil.fromDegrees(156.31803093908155)));
+    points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(8.860146581178396E-33), Geo3DUtil.fromDegrees(0.0)));
+    final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points);
+    final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, description);
+    final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, Collections.singletonList(description));
+    //POINT(-15.37308034708334 1.3353777223310798E-11)
+    final GeoPoint point1 = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(1.3353777223310798E-11), Geo3DUtil.fromDegrees(-15.37308034708334));
+    assertTrue(polygon.isWithin(point1) == largePolygon.isWithin(point1));
+  }
+
+  @Test
+  public void testBelowCrossingTwiceEdgePoint() {
+    //POLYGON((162.9024012378976 -0.17652184258966092,162.56882659034474 -0.009075185910497524,162.52932263918404 1.6235907240799453E-189,162.17731099253956 -0.2154890860855618,162.9024012378976 -0.17652184258966092))
+    List<GeoPoint> points = new ArrayList<>();
+    points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.17652184258966092), Geo3DUtil.fromDegrees(162.9024012378976)));
+    points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.009075185910497524), Geo3DUtil.fromDegrees(162.56882659034474)));
+    points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(1.6235907240799453E-189), Geo3DUtil.fromDegrees(162.52932263918404)));
+    points.add(new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-0.2154890860855618), Geo3DUtil.fromDegrees(162.17731099253956)));
+    final GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points);
+    final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, description);
+    final GeoPolygon largePolygon = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.SPHERE, Collections.singletonList(description));
+    //POINT(91.60559215160585 -6.782152464351765E-11)
+    final GeoPoint point = new GeoPoint(PlanetModel.SPHERE, Geo3DUtil.fromDegrees(-6.782152464351765E-11), Geo3DUtil.fromDegrees(91.60559215160585));
+    assertTrue(polygon.isWithin(point) == largePolygon.isWithin(point));
+  }
   
 }


[12/50] lucene-solr:jira/solr-12181: LUCENE-8233: Use a consistent merge policy in test to make sure segment counts match

Posted by ab...@apache.org.
LUCENE-8233: Use a consistent merge policy in test to make sure segment counts match


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/4a902f35
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/4a902f35
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/4a902f35

Branch: refs/heads/jira/solr-12181
Commit: 4a902f35db6c4758c1ae2da1b711494e73878c96
Parents: 60ae7be
Author: Simon Willnauer <si...@apache.org>
Authored: Thu Apr 5 12:23:41 2018 +0200
Committer: Simon Willnauer <si...@apache.org>
Committed: Thu Apr 5 12:23:41 2018 +0200

----------------------------------------------------------------------
 .../apache/lucene/index/TestSoftDeletesRetentionMergePolicy.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4a902f35/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesRetentionMergePolicy.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesRetentionMergePolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesRetentionMergePolicy.java
index 3f4f405..3d8ffe3 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesRetentionMergePolicy.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesRetentionMergePolicy.java
@@ -159,7 +159,7 @@ public class TestSoftDeletesRetentionMergePolicy extends LuceneTestCase {
     IndexWriterConfig indexWriterConfig = newIndexWriterConfig();
     indexWriterConfig.setMergePolicy(new SoftDeletesRetentionMergePolicy("soft_delete",
         () -> new MatchAllDocsQuery(),
-        indexWriterConfig.getMergePolicy()));
+        new LogDocMergePolicy()));
     indexWriterConfig.setSoftDeletesField("soft_delete");
     IndexWriter writer = new IndexWriter(dir, indexWriterConfig);
 


[26/50] lucene-solr:jira/solr-12181: SOLR-12183: Refactor Streaming Expression test cases

Posted by ab...@apache.org.
SOLR-12183: Refactor Streaming Expression test cases


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/80375acb
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/80375acb
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/80375acb

Branch: refs/heads/jira/solr-12181
Commit: 80375acb7f696df7fb3cf0424d5e82777e3f5c87
Parents: d420139
Author: Joel Bernstein <jb...@apache.org>
Authored: Thu Apr 5 11:21:49 2018 -0400
Committer: Joel Bernstein <jb...@apache.org>
Committed: Thu Apr 5 14:00:07 2018 -0400

----------------------------------------------------------------------
 .../solrj/io/stream/MathExpressionTest.java     |  4145 +++++++
 .../solrj/io/stream/StreamDecoratorTest.java    |  3954 +++++++
 .../solrj/io/stream/StreamExpressionTest.java   | 10213 ++---------------
 3 files changed, 9217 insertions(+), 9095 deletions(-)
----------------------------------------------------------------------



[21/50] lucene-solr:jira/solr-12181: SOLR-12175: Fix TestConfigSetsAPI

Posted by ab...@apache.org.
SOLR-12175: Fix TestConfigSetsAPI


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d2845b03
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d2845b03
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d2845b03

Branch: refs/heads/jira/solr-12181
Commit: d2845b033e3d2b7c09c013742a60bc5826c5f5f2
Parents: c58516e
Author: Joel Bernstein <jb...@apache.org>
Authored: Thu Apr 5 12:39:33 2018 -0400
Committer: Joel Bernstein <jb...@apache.org>
Committed: Thu Apr 5 14:00:07 2018 -0400

----------------------------------------------------------------------
 .../src/test-files/solr/configsets/_default/conf/managed-schema   | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d2845b03/solr/core/src/test-files/solr/configsets/_default/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/configsets/_default/conf/managed-schema b/solr/core/src/test-files/solr/configsets/_default/conf/managed-schema
index 4168a83..6f4e2ef 100644
--- a/solr/core/src/test-files/solr/configsets/_default/conf/managed-schema
+++ b/solr/core/src/test-files/solr/configsets/_default/conf/managed-schema
@@ -139,6 +139,7 @@
     <dynamicField name="*_fs" type="pfloats"  indexed="true"  stored="true"/>
     <dynamicField name="*_d"  type="pdouble" indexed="true"  stored="true"/>
     <dynamicField name="*_ds" type="pdoubles" indexed="true"  stored="true"/>
+    <dynamicField name="random_*" type="random"/>
 
     <!-- Type used for data-driven schema, to add a string copy for each text field -->
     <dynamicField name="*_str" type="strings" stored="false" docValues="true" indexed="false" />
@@ -211,6 +212,8 @@
     <fieldType name="pfloats" class="solr.FloatPointField" docValues="true" multiValued="true"/>
     <fieldType name="plongs" class="solr.LongPointField" docValues="true" multiValued="true"/>
     <fieldType name="pdoubles" class="solr.DoublePointField" docValues="true" multiValued="true"/>
+    <fieldType name="random" class="solr.RandomSortField" indexed="true"/>
+
 
     <!-- The format for this date field is of the form 1995-12-31T23:59:59Z, and
          is a more restricted form of the canonical representation of dateTime


[39/50] lucene-solr:jira/solr-12181: Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/lucene-solr

Posted by ab...@apache.org.
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/lucene-solr


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/bd8fe724
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/bd8fe724
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/bd8fe724

Branch: refs/heads/jira/solr-12181
Commit: bd8fe72426b2a9df45050143e85481f523854239
Parents: 348de9e 2ae488a
Author: Karl Wright <Da...@gmail.com>
Authored: Sun Apr 8 06:44:45 2018 -0400
Committer: Karl Wright <Da...@gmail.com>
Committed: Sun Apr 8 06:44:45 2018 -0400

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |  5 +++
 .../miscellaneous/WordDelimiterFilter.java      | 18 +++++---
 .../miscellaneous/WordDelimiterGraphFilter.java | 17 +++++---
 .../apache/lucene/index/TestIndexSorting.java   |  7 +--
 solr/CHANGES.txt                                |  3 ++
 .../solr/handler/TestReplicationHandler.java    |  8 +++-
 .../solr/uninverting/TestDocTermOrds.java       | 12 ++++--
 solr/solr-ref-guide/build.xml                   |  1 +
 solr/solr-ref-guide/src/_config.yml.template    |  1 +
 solr/solr-ref-guide/src/css/customstyles.css    |  2 +-
 solr/solr-ref-guide/src/learning-to-rank.adoc   |  2 +-
 solr/solr-ref-guide/src/meta-docs/pdf.adoc      |  2 +-
 .../src/rule-based-replica-placement.adoc       |  2 +-
 .../src/updating-parts-of-documents.adoc        | 45 +++++++++++++++++---
 .../src/upgrading-a-solr-cluster.adoc           |  2 +-
 ...store-data-with-the-data-import-handler.adoc |  2 +-
 16 files changed, 93 insertions(+), 36 deletions(-)
----------------------------------------------------------------------



[41/50] lucene-solr:jira/solr-12181: SOLR-11971: Add CVE number: CVE-2018-1308

Posted by ab...@apache.org.
SOLR-11971: Add CVE number: CVE-2018-1308


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/3530397f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/3530397f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/3530397f

Branch: refs/heads/jira/solr-12181
Commit: 3530397f1777332872eac2760f9aa0e2ae1d7450
Parents: 9936458
Author: Uwe Schindler <us...@apache.org>
Authored: Sun Apr 8 19:20:12 2018 +0200
Committer: Uwe Schindler <us...@apache.org>
Committed: Sun Apr 8 19:20:12 2018 +0200

----------------------------------------------------------------------
 solr/CHANGES.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3530397f/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index c7270da..f910224 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -386,7 +386,7 @@ Bug Fixes
 
 * SOLR-11988: Fix exists() method in EphemeralDirectoryFactory/MockDirectoryFactory to prevent false positives (hossman)
 
-* SOLR-11971: Don't allow referal to external resources in DataImportHandler's dataConfig request parameter.
+* SOLR-11971: Don't allow referal to external resources in DataImportHandler's dataConfig request parameter (CVE-2018-1308).
   (麦 香浓郁, Uwe Schindler)
 
 * SOLR-12021: Fixed a bug in ApiSpec and other JSON resource loading that was causing unclosed file handles (hossman)


[32/50] lucene-solr:jira/solr-12181: Ref Guide: add language to source blocks; split optimistic concurrency example & add explanations

Posted by ab...@apache.org.
Ref Guide: add language to source blocks; split optimistic concurrency example & add explanations


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/abaf378d
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/abaf378d
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/abaf378d

Branch: refs/heads/jira/solr-12181
Commit: abaf378d0e9e2e4af705d964edc2aaf74103cb95
Parents: b2d756c
Author: Cassandra Targett <ct...@apache.org>
Authored: Fri Apr 6 10:59:42 2018 -0500
Committer: Cassandra Targett <ct...@apache.org>
Committed: Fri Apr 6 11:00:09 2018 -0500

----------------------------------------------------------------------
 .../src/updating-parts-of-documents.adoc        | 45 +++++++++++++++++---
 .../src/upgrading-a-solr-cluster.adoc           |  2 +-
 2 files changed, 40 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/abaf378d/solr/solr-ref-guide/src/updating-parts-of-documents.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/updating-parts-of-documents.adoc b/solr/solr-ref-guide/src/updating-parts-of-documents.adoc
index 5e25d51..949b60b 100644
--- a/solr/solr-ref-guide/src/updating-parts-of-documents.adoc
+++ b/solr/solr-ref-guide/src/updating-parts-of-documents.adoc
@@ -58,7 +58,7 @@ The core functionality of atomically updating a document requires that all field
 
 If `<copyField/>` destinations are configured as stored, then Solr will attempt to index both the current value of the field as well as an additional copy from any source fields. If such fields contain some information that comes from the indexing program and some information that comes from copyField, then the information which originally came from the indexing program will be lost when an atomic update is made.
 
-There are other kinds of derived fields that must also be set so they aren't stored. Some spatial field types use derived fields. Examples of this are solr.BBoxField and solr.LatLonType. CurrencyFieldType also uses derived fields.  These types create additional fields which are normally specified by a dynamic field definition. That dynamic field definition must be not stored, or indexing will fail.
+There are other kinds of derived fields that must also be set so they aren't stored. Some spatial field types, such as BBoxField and LatLonType, use derived fields. CurrencyFieldType also uses derived fields. These types create additional fields which are normally specified by a dynamic field definition. That dynamic field definition must be not stored, or indexing will fail.
 
 === Example Updating Part of a Document
 
@@ -188,30 +188,61 @@ When the client resubmits a changed document to Solr, the `\_version_` can be in
 
 If the document being updated does not include the `\_version_` field, and atomic updates are not being used, the document will be treated by normal Solr rules, which is usually to discard the previous version.
 
-When using Optimistic Concurrency, clients can include an optional `versions=true` request parameter to indicate that the _new_ versions of the documents being added should be included in the response. This allows clients to immediately know what the `\_version_` is of every documented added without needing to make a redundant <<realtime-get.adoc#realtime-get,`/get` request>>.
+When using Optimistic Concurrency, clients can include an optional `versions=true` request parameter to indicate that the _new_ versions of the documents being added should be included in the response. This allows clients to immediately know what the `\_version_` is of every document added without needing to make a redundant <<realtime-get.adoc#realtime-get,`/get` request>>.
 
-For example:
+Following are some examples using `versions=true` in queries:
 
-[source]
+[source,bash]
 ----
 $ curl -X POST -H 'Content-Type: application/json' 'http://localhost:8983/solr/techproducts/update?versions=true' --data-binary '
 [ { "id" : "aaa" },
   { "id" : "bbb" } ]'
+----
+[source,json]
+----
 {"responseHeader":{"status":0,"QTime":6},
  "adds":["aaa",1498562471222312960,
          "bbb",1498562471225458688]}
+----
+
+In this example, we have added 2 documents "aaa" and "bbb". Because we added `versions=true` to the request, the response shows the document version for each document.
+
+[source,bash]
+----
 $ curl -X POST -H 'Content-Type: application/json' 'http://localhost:8983/solr/techproducts/update?_version_=999999&versions=true' --data-binary '
 [{ "id" : "aaa",
    "foo_s" : "update attempt with wrong existing version" }]'
+----
+[source,json]
+----
 {"responseHeader":{"status":409,"QTime":3},
  "error":{"msg":"version conflict for aaa expected=999999 actual=1498562471222312960",
           "code":409}}
+----
+
+
+In this example, we've attempted to update document "aaa" but specified the wrong version in the request: `_version_=999999` doesn't match the document version we just got when we added the document. We get an error in response.
+
+[source,bash]
+----
 $ curl -X POST -H 'Content-Type: application/json' 'http://localhost:8983/solr/techproducts/update?_version_=1498562471222312960&versions=true&commit=true' --data-binary '
 [{ "id" : "aaa",
    "foo_s" : "update attempt with correct existing version" }]'
+----
+[source,json]
+----
 {"responseHeader":{"status":0,"QTime":5},
  "adds":["aaa",1498562624496861184]}
+----
+
+Now we've sent an update with a value for `\_version_` that matches the value in the index, and it succeeds. Because we included `versions=true` to the update request, the response includes a different value for the `\_version_` field.
+
+[source,bash]
+----
 $ curl 'http://localhost:8983/solr/techproducts/query?q=*:*&fl=id,_version_'
+----
+[source,json]
+----
 {
   "responseHeader":{
     "status":0,
@@ -229,11 +260,13 @@ $ curl 'http://localhost:8983/solr/techproducts/query?q=*:*&fl=id,_version_'
   }}
 ----
 
-For more information, please also see https://www.youtube.com/watch?v=WYVM6Wz-XTw[Yonik Seeley's presentation on NoSQL features in Solr 4] from Apache Lucene EuroCon 2012.
+Finally, we can issue a query that requests the `\_version_` field be included in the response, and we can see that for the two documents in our example index.
+
+For more information, please also see Yonik Seeley's presentation on https://www.youtube.com/watch?v=WYVM6Wz-XTw[NoSQL features in Solr 4] from Apache Lucene EuroCon 2012.
 
 == Document Centric Versioning Constraints
 
-Optimistic Concurrency is extremely powerful, and works very efficiently because it uses an internally assigned, globally unique values for the `\_version_` field. However, In some situations users may want to configure their own document specific version field, where the version values are assigned on a per-document basis by an external system, and have Solr reject updates that attempt to replace a document with an "older" version. In situations like this the {solr-javadocs}/solr-core/org/apache/solr/update/processor/DocBasedVersionConstraintsProcessorFactory.html[`DocBasedVersionConstraintsProcessorFactory`] can be useful.
+Optimistic Concurrency is extremely powerful, and works very efficiently because it uses an internally assigned, globally unique values for the `\_version_` field. However, in some situations users may want to configure their own document specific version field, where the version values are assigned on a per-document basis by an external system, and have Solr reject updates that attempt to replace a document with an "older" version. In situations like this the {solr-javadocs}/solr-core/org/apache/solr/update/processor/DocBasedVersionConstraintsProcessorFactory.html[`DocBasedVersionConstraintsProcessorFactory`] can be useful.
 
 The basic usage of `DocBasedVersionConstraintsProcessorFactory` is to configure it in `solrconfig.xml` as part of the <<update-request-processors.adoc#update-request-processor-configuration,UpdateRequestProcessorChain>> and specify the name of your custom `versionField` in your schema that should be checked when validating updates:
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/abaf378d/solr/solr-ref-guide/src/upgrading-a-solr-cluster.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/upgrading-a-solr-cluster.adoc b/solr/solr-ref-guide/src/upgrading-a-solr-cluster.adoc
index 01855f1..6abe138 100644
--- a/solr/solr-ref-guide/src/upgrading-a-solr-cluster.adoc
+++ b/solr/solr-ref-guide/src/upgrading-a-solr-cluster.adoc
@@ -65,7 +65,7 @@ If you have a `/var/solr/solr.in.sh` file for your existing Solr install, runnin
 
 Open `/etc/default/solr.in.sh` with a text editor and verify that the following variables are set correctly, or add them bottom of the include file as needed:
 
-[source]
+[source,properties]
 ZK_HOST=
 SOLR_HOST=
 SOLR_PORT=


[04/50] lucene-solr:jira/solr-12181: LUCENE-8233: Add support for soft deletes to IndexWriter

Posted by ab...@apache.org.
LUCENE-8233: Add support for soft deletes to IndexWriter 

This change adds support for soft deletes as a fully supported feature
by the index writer. Soft deletes are accounted for inside the index
writer and therefor also by merge policies.

This change also adds a SoftDeletesRetentionMergePolicy that allows
users to selectively carry over soft_deleted document across merges
for renention policies. The merge policy selects documents that should
be kept around in the merged segment based on a user provided query.

Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/ecc17f90
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/ecc17f90
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/ecc17f90

Branch: refs/heads/jira/solr-12181
Commit: ecc17f9023309ca2c46eaf65fd031e4af0ef5a25
Parents: cf56890
Author: Simon Willnauer <si...@apache.org>
Authored: Wed Apr 4 13:44:17 2018 +0200
Committer: Simon Willnauer <si...@apache.org>
Committed: Wed Apr 4 13:45:14 2018 +0200

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |   6 +
 .../lucene/index/BufferedUpdatesStream.java     |  12 +-
 .../lucene/index/FrozenBufferedUpdates.java     |   2 +-
 .../org/apache/lucene/index/IndexWriter.java    |  51 +--
 .../apache/lucene/index/IndexWriterConfig.java  |  29 ++
 .../lucene/index/LiveIndexWriterConfig.java     |  12 +
 .../org/apache/lucene/index/MergePolicy.java    |   8 +
 .../apache/lucene/index/MergePolicyWrapper.java |   4 +
 .../org/apache/lucene/index/NoMergePolicy.java  |   7 +-
 .../org/apache/lucene/index/PendingDeletes.java |  97 ++++--
 .../apache/lucene/index/PendingSoftDeletes.java | 157 ++++++++++
 .../org/apache/lucene/index/ReaderUtil.java     |   2 -
 .../apache/lucene/index/ReadersAndUpdates.java  |  19 +-
 .../index/SoftDeletesRetentionMergePolicy.java  | 163 ++++++++++
 .../lucene/index/StandardDirectoryReader.java   |   2 +-
 .../search/DocValuesFieldExistsQuery.java       |  49 +--
 .../src/java/org/apache/lucene/util/Bits.java   |  10 +-
 .../apache/lucene/index/TestIndexWriter.java    | 189 ++++-------
 .../lucene/index/TestIndexWriterConfig.java     |   1 +
 .../lucene/index/TestIndexWriterOnDiskFull.java |  11 +-
 .../index/TestIndexingSequenceNumbers.java      |   6 +-
 .../apache/lucene/index/TestMultiFields.java    |  11 +-
 .../apache/lucene/index/TestPendingDeletes.java |  10 +-
 .../lucene/index/TestPendingSoftDeletes.java    | 232 ++++++++++++++
 .../TestSoftDeletesRetentionMergePolicy.java    | 312 +++++++++++++++++++
 .../org/apache/lucene/index/TestStressNRT.java  |   7 +-
 .../idversion/TestIDVersionPostingsFormat.java  |  28 +-
 .../asserting/AssertingLiveDocsFormat.java      |   2 +-
 .../apache/lucene/index/RandomIndexWriter.java  |  79 ++---
 29 files changed, 1225 insertions(+), 293 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 95d8738..84e242d 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -115,6 +115,12 @@ New Features
   searches based on minimum-interval semantics. (Alan Woodward, Adrien Grand,
   Jim Ferenczi, Simon Willnauer)
 
+* LUCENE-8233: Add support for soft deletes to IndexWriter delete accounting. 
+  Soft deletes are accounted for inside the index writer and therefor also
+  by merge policies. A SoftDeletesRetentionMergePolicy is added that allows
+  to selectively carry over soft_deleted document across merges for retention
+  policies (Simon Willnauer, Mike McCandless, Robert Muir)
+
 Bug Fixes
 
 * LUCENE-8234: Fixed bug in how spatial relationship is computed for

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java
index 63001d4..78fe950 100644
--- a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java
+++ b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java
@@ -27,7 +27,6 @@ import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 
-import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.util.Accountable;
 import org.apache.lucene.util.BytesRef;
@@ -63,7 +62,6 @@ class BufferedUpdatesStream implements Accountable {
   private final AtomicLong bytesUsed = new AtomicLong();
   private final AtomicInteger numTerms = new AtomicInteger();
   private final IndexWriter writer;
-  private boolean closed;
 
   public BufferedUpdatesStream(IndexWriter writer) {
     this.writer = writer;
@@ -122,12 +120,6 @@ class BufferedUpdatesStream implements Accountable {
     return bytesUsed.get();
   }
 
-  private synchronized void ensureOpen() {
-    if (closed) {
-      throw new AlreadyClosedException("already closed");
-    }
-  }
-
   public static class ApplyDeletesResult {
     
     // True if any actual deletes took place:
@@ -300,8 +292,6 @@ class BufferedUpdatesStream implements Accountable {
   /** Opens SegmentReader and inits SegmentState for each segment. */
   public SegmentState[] openSegmentStates(IndexWriter.ReaderPool pool, List<SegmentCommitInfo> infos,
                                           Set<SegmentCommitInfo> alreadySeenSegments, long delGen) throws IOException {
-    ensureOpen();
-
     List<SegmentState> segStates = new ArrayList<>();
     try {
       for (SegmentCommitInfo info : infos) {
@@ -334,7 +324,7 @@ class BufferedUpdatesStream implements Accountable {
         totDelCount += segState.rld.getPendingDeleteCount() - segState.startDelCount;
         int fullDelCount = segState.rld.info.getDelCount() + segState.rld.getPendingDeleteCount();
         assert fullDelCount <= segState.rld.info.info.maxDoc() : fullDelCount + " > " + segState.rld.info.info.maxDoc();
-        if (segState.rld.isFullyDeleted()) {
+        if (segState.rld.isFullyDeleted() && writer.getConfig().mergePolicy.keepFullyDeletedSegment(segState.reader) == false) {
           if (allDeleted == null) {
             allDeleted = new ArrayList<>();
           }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java b/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java
index 1636319..f7d16c4 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java
@@ -412,7 +412,7 @@ class FrozenBufferedUpdates {
         writer.checkpoint();
       }
 
-      if (writer.keepFullyDeletedSegments == false && result.allDeleted != null) {
+      if (result.allDeleted != null) {
         if (infoStream.isEnabled("IW")) {
           infoStream.message("IW", "drop 100% deleted segments: " + writer.segString(result.allDeleted));
         }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
index 2e14166..4305176 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
@@ -842,7 +842,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
         if (create == false) {
           return null;
         }
-        rld = new ReadersAndUpdates(segmentInfos.getIndexCreatedVersionMajor(), info, null, new PendingDeletes(null, info));
+        rld = new ReadersAndUpdates(segmentInfos.getIndexCreatedVersionMajor(), info, newPendingDeletes(info));
         // Steal initial reference:
         readerMap.put(info, rld);
       } else {
@@ -884,6 +884,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
     if (rld != null) {
       delCount += rld.getPendingDeleteCount();
     }
+    assert delCount <= info.info.maxDoc(): "delCount: " + delCount + " maxDoc: " + info.info.maxDoc();
     return delCount;
   }
 
@@ -1151,7 +1152,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
           LeafReaderContext leaf = leaves.get(i);
           SegmentReader segReader = (SegmentReader) leaf.reader();
           SegmentReader newReader = new SegmentReader(segmentInfos.info(i), segReader, segReader.getLiveDocs(), segReader.numDocs());
-          readerPool.readerMap.put(newReader.getSegmentInfo(), new ReadersAndUpdates(segmentInfos.getIndexCreatedVersionMajor(), newReader, new PendingDeletes(newReader, newReader.getSegmentInfo())));
+          readerPool.readerMap.put(newReader.getSegmentInfo(), new ReadersAndUpdates(segmentInfos.getIndexCreatedVersionMajor(), newReader, newPendingDeletes(newReader, newReader.getSegmentInfo())));
         }
 
         // We always assume we are carrying over incoming changes when opening from reader:
@@ -1641,7 +1642,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
       if (rld != null) {
         synchronized(bufferedUpdatesStream) {
           if (rld.delete(docID)) {
-            if (rld.isFullyDeleted()) {
+            if (isFullyDeleted(rld)) {
               dropDeletedSegment(rld.info);
               checkpoint();
             }
@@ -4003,21 +4004,21 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
 
     final boolean allDeleted = merge.segments.size() == 0 ||
       merge.info.info.maxDoc() == 0 ||
-      (mergedUpdates != null && mergedUpdates.isFullyDeleted());
+      (mergedUpdates != null && isFullyDeleted(mergedUpdates));
 
     if (infoStream.isEnabled("IW")) {
       if (allDeleted) {
-        infoStream.message("IW", "merged segment " + merge.info + " is 100% deleted" +  (keepFullyDeletedSegments ? "" : "; skipping insert"));
+        infoStream.message("IW", "merged segment " + merge.info + " is 100% deleted; skipping insert");
       }
     }
 
-    final boolean dropSegment = allDeleted && !keepFullyDeletedSegments;
+    final boolean dropSegment = allDeleted;
 
     // If we merged no segments then we better be dropping
     // the new segment:
     assert merge.segments.size() > 0 || dropSegment;
 
-    assert merge.info.info.maxDoc() != 0 || keepFullyDeletedSegments || dropSegment;
+    assert merge.info.info.maxDoc() != 0 || dropSegment;
 
     if (mergedUpdates != null) {
       boolean success = false;
@@ -4716,19 +4717,6 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
     }
   }
 
-  boolean keepFullyDeletedSegments;
-
-  /** Only for testing.
-   *
-   * @lucene.internal */
-  void setKeepFullyDeletedSegments(boolean v) {
-    keepFullyDeletedSegments = v;
-  }
-
-  boolean getKeepFullyDeletedSegments() {
-    return keepFullyDeletedSegments;
-  }
-
   // called only from assert
   private boolean filesExist(SegmentInfos toSync) throws IOException {
     
@@ -5207,4 +5195,27 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
     assert count >= 0 : "pendingNumDocs is negative: " + count;
     return count;
   }
+
+  private PendingDeletes newPendingDeletes(SegmentCommitInfo info) {
+    String softDeletesField = config.getSoftDeletesField();
+    return softDeletesField == null ? new PendingDeletes(info) : new PendingSoftDeletes(softDeletesField, info);
+  }
+
+  private PendingDeletes newPendingDeletes(SegmentReader reader, SegmentCommitInfo info) {
+    String softDeletesField = config.getSoftDeletesField();
+    return softDeletesField == null ? new PendingDeletes(reader, info) : new PendingSoftDeletes(softDeletesField, reader, info);
+  }
+
+  final boolean isFullyDeleted(ReadersAndUpdates readersAndUpdates) throws IOException {
+    if (readersAndUpdates.isFullyDeleted()) {
+      SegmentReader reader = readersAndUpdates.getReader(IOContext.READ);
+      try {
+        return config.mergePolicy.keepFullyDeletedSegment(reader) == false;
+      } finally {
+        readersAndUpdates.release(reader);
+      }
+    }
+    return false;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java
index 997a686..d657d52 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java
@@ -25,6 +25,7 @@ import java.util.stream.Collectors;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriter.IndexReaderWarmer;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
@@ -484,5 +485,33 @@ public final class IndexWriterConfig extends LiveIndexWriterConfig {
   public IndexWriterConfig setCheckPendingFlushUpdate(boolean checkPendingFlushOnUpdate) {
     return (IndexWriterConfig) super.setCheckPendingFlushUpdate(checkPendingFlushOnUpdate);
   }
+
+  /**
+   * Sets the soft deletes field. A soft delete field in lucene is a doc-values field that marks a document as soft-deleted if a
+   * document has at least one value in that field. If a document is marked as soft-deleted the document is treated as
+   * if it has been hard-deleted through the IndexWriter API ({@link IndexWriter#deleteDocuments(Term...)}.
+   * Merges will reclaim soft-deleted as well as hard-deleted documents and index readers obtained from the IndexWriter
+   * will reflect all deleted documents in it's live docs. If soft-deletes are used documents must be indexed via
+   * {@link IndexWriter#softUpdateDocument(Term, Iterable, Field...)}. Deletes are applied via
+   * {@link IndexWriter#updateDocValues(Term, Field...)}.
+   *
+   * Soft deletes allow to retain documents across merges if the merge policy modifies the live docs of a merge reader.
+   * {@link SoftDeletesRetentionMergePolicy} for instance allows to specify an arbitrary query to mark all documents
+   * that should survive the merge. This can be used to for example keep all document modifications for a certain time
+   * interval or the last N operations if some kind of sequence ID is available in the index.
+   *
+   * Currently there is no API support to un-delete a soft-deleted document. In oder to un-delete the document must be
+   * re-indexed using {@link IndexWriter#softUpdateDocument(Term, Iterable, Field...)}.
+   *
+   * The default value for this is <code>null</code> which disables soft-deletes. If soft-deletes are enabled documents
+   * can still be hard-deleted. Hard-deleted documents will won't considered as soft-deleted even if they have
+   * a value in the soft-deletes field.
+   *
+   * @see #getSoftDeletesField()
+   */
+  public IndexWriterConfig setSoftDeletesField(String softDeletesField) {
+    this.softDeletesField = softDeletesField;
+    return this;
+  }
   
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/java/org/apache/lucene/index/LiveIndexWriterConfig.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/LiveIndexWriterConfig.java b/lucene/core/src/java/org/apache/lucene/index/LiveIndexWriterConfig.java
index af8ff15..016e880 100644
--- a/lucene/core/src/java/org/apache/lucene/index/LiveIndexWriterConfig.java
+++ b/lucene/core/src/java/org/apache/lucene/index/LiveIndexWriterConfig.java
@@ -106,6 +106,9 @@ public class LiveIndexWriterConfig {
   /** if an indexing thread should check for pending flushes on update in order to help out on a full flush*/
   protected volatile boolean checkPendingFlushOnUpdate = true;
 
+  /** soft deletes field */
+  protected String softDeletesField = null;
+
   // used by IndexWriterConfig
   LiveIndexWriterConfig(Analyzer analyzer) {
     this.analyzer = analyzer;
@@ -452,6 +455,14 @@ public class LiveIndexWriterConfig {
     return this;
   }
 
+  /**
+   * Returns the soft deletes field or <code>null</code> if soft-deletes are disabled.
+   * See {@link IndexWriterConfig#setSoftDeletesField(String)} for details.
+   */
+  public String getSoftDeletesField() {
+    return softDeletesField;
+  }
+
   @Override
   public String toString() {
     StringBuilder sb = new StringBuilder();
@@ -475,6 +486,7 @@ public class LiveIndexWriterConfig {
     sb.append("commitOnClose=").append(getCommitOnClose()).append("\n");
     sb.append("indexSort=").append(getIndexSort()).append("\n");
     sb.append("checkPendingFlushOnUpdate=").append(isCheckPendingFlushOnUpdate()).append("\n");
+    sb.append("softDeletesField=").append(getSoftDeletesField()).append("\n");
     return sb.toString();
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java
index d9a0ab8..c0d9748 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java
@@ -604,4 +604,12 @@ public abstract class MergePolicy {
     v *= 1024 * 1024;
     this.maxCFSSegmentSize = v > Long.MAX_VALUE ? Long.MAX_VALUE : (long) v;
   }
+
+  /**
+   * Returns true if the segment represented by the given CodecReader should be keep even if it's fully deleted.
+   * This is useful for testing of for instance if the merge policy implements retention policies for soft deletes.
+   */
+  public boolean keepFullyDeletedSegment(CodecReader reader) throws IOException {
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/java/org/apache/lucene/index/MergePolicyWrapper.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/MergePolicyWrapper.java b/lucene/core/src/java/org/apache/lucene/index/MergePolicyWrapper.java
index c51cd00..606f3c2 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MergePolicyWrapper.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MergePolicyWrapper.java
@@ -86,4 +86,8 @@ public class MergePolicyWrapper extends MergePolicy {
     return getClass().getSimpleName() + "(" + in + ")";
   }
 
+  @Override
+  public boolean keepFullyDeletedSegment(CodecReader reader) throws IOException {
+    return in.keepFullyDeletedSegment(reader);
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/java/org/apache/lucene/index/NoMergePolicy.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/NoMergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/NoMergePolicy.java
index ec309b8..4387f25 100644
--- a/lucene/core/src/java/org/apache/lucene/index/NoMergePolicy.java
+++ b/lucene/core/src/java/org/apache/lucene/index/NoMergePolicy.java
@@ -67,7 +67,12 @@ public final class NoMergePolicy extends MergePolicy {
   public void setNoCFSRatio(double noCFSRatio) {
     super.setNoCFSRatio(noCFSRatio);
   }
-  
+
+  @Override
+  public boolean keepFullyDeletedSegment(CodecReader reader) throws IOException {
+    return super.keepFullyDeletedSegment(reader);
+  }
+
   @Override
   public String toString() {
     return "NoMergePolicy";

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/java/org/apache/lucene/index/PendingDeletes.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/PendingDeletes.java b/lucene/core/src/java/org/apache/lucene/index/PendingDeletes.java
index 74043f3..bce704c 100644
--- a/lucene/core/src/java/org/apache/lucene/index/PendingDeletes.java
+++ b/lucene/core/src/java/org/apache/lucene/index/PendingDeletes.java
@@ -18,6 +18,7 @@
 package org.apache.lucene.index;
 
 import java.io.IOException;
+import java.util.List;
 
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.LiveDocsFormat;
@@ -31,57 +32,71 @@ import org.apache.lucene.util.MutableBits;
 /**
  * This class handles accounting and applying pending deletes for live segment readers
  */
-final class PendingDeletes {
-  private final SegmentCommitInfo info;
+class PendingDeletes {
+  protected final SegmentCommitInfo info;
   // True if the current liveDocs is referenced by an
   // external NRT reader:
-  private boolean liveDocsShared;
+  protected boolean liveDocsShared;
   // Holds the current shared (readable and writable)
   // liveDocs.  This is null when there are no deleted
   // docs, and it's copy-on-write (cloned whenever we need
   // to change it but it's been shared to an external NRT
   // reader).
   private Bits liveDocs;
-  private int pendingDeleteCount;
+  protected int pendingDeleteCount;
+  private boolean liveDocsInitialized;
 
   PendingDeletes(SegmentReader reader, SegmentCommitInfo info) {
+    this(info, reader.getLiveDocs(), true);
+    pendingDeleteCount = reader.numDeletedDocs() - info.getDelCount();
+  }
+
+  PendingDeletes(SegmentCommitInfo info) {
+    this(info, null, false);
+  }
+
+  private PendingDeletes(SegmentCommitInfo info, Bits liveDocs, boolean liveDocsInitialized) {
     this.info = info;
     liveDocsShared = true;
-    liveDocs = reader != null ? reader.getLiveDocs() : null;
-    if (reader != null) {
-      pendingDeleteCount = reader.numDeletedDocs() - info.getDelCount();
-    } else {
-      pendingDeleteCount = 0;
-    }
+    this.liveDocs = liveDocs;
+    pendingDeleteCount = 0;
+    this.liveDocsInitialized = liveDocsInitialized;
   }
 
 
-  /**
-   * Marks a document as deleted in this segment and return true if a document got actually deleted or
-   * if the document was already deleted.
-   */
-  boolean delete(int docID) throws IOException {
-    assert info.info.maxDoc() > 0;
+  protected MutableBits getMutableBits() throws IOException {
     if (liveDocsShared) {
       // Copy on write: this means we've cloned a
       // SegmentReader sharing the current liveDocs
       // instance; must now make a private clone so we can
       // change it:
       LiveDocsFormat liveDocsFormat = info.info.getCodec().liveDocsFormat();
+      MutableBits mutableBits;
       if (liveDocs == null) {
-        liveDocs = liveDocsFormat.newLiveDocs(info.info.maxDoc());
+        mutableBits = liveDocsFormat.newLiveDocs(info.info.maxDoc());
       } else {
-        liveDocs = liveDocsFormat.newLiveDocs(liveDocs);
+        mutableBits = liveDocsFormat.newLiveDocs(liveDocs);
       }
+      liveDocs = mutableBits;
       liveDocsShared = false;
     }
+    return (MutableBits) liveDocs;
+  }
 
-    assert liveDocs != null;
-    assert docID >= 0 && docID < liveDocs.length() : "out of bounds: docid=" + docID + " liveDocsLength=" + liveDocs.length() + " seg=" + info.info.name + " maxDoc=" + info.info.maxDoc();
+
+  /**
+   * Marks a document as deleted in this segment and return true if a document got actually deleted or
+   * if the document was already deleted.
+   */
+  boolean delete(int docID) throws IOException {
+    assert info.info.maxDoc() > 0;
+    MutableBits mutableBits = getMutableBits();
+    assert mutableBits != null;
+    assert docID >= 0 && docID < mutableBits.length() : "out of bounds: docid=" + docID + " liveDocsLength=" + mutableBits.length() + " seg=" + info.info.name + " maxDoc=" + info.info.maxDoc();
     assert !liveDocsShared;
-    final boolean didDelete = liveDocs.get(docID);
+    final boolean didDelete = mutableBits.get(docID);
     if (didDelete) {
-      ((MutableBits) liveDocs).clear(docID);
+      mutableBits.clear(docID);
       pendingDeleteCount++;
     }
     return didDelete;
@@ -114,10 +129,32 @@ final class PendingDeletes {
   /**
    * Called once a new reader is opened for this segment ie. when deletes or updates are applied.
    */
-  void onNewReader(SegmentReader reader, SegmentCommitInfo info) {
-    if (liveDocs == null) {
-      liveDocs = reader.getLiveDocs();
+  void onNewReader(SegmentReader reader, SegmentCommitInfo info) throws IOException {
+    if (liveDocsInitialized == false) {
+      if (reader.hasDeletions()) {
+        // we only initialize this once either in the ctor or here
+        // if we use the live docs from a reader it has to be in a situation where we don't
+        // have any existing live docs
+        assert pendingDeleteCount == 0 : "pendingDeleteCount: " + pendingDeleteCount;
+        liveDocs = reader.getLiveDocs();
+        assert liveDocs == null || assertCheckLiveDocs(liveDocs, info.info.maxDoc(), info.getDelCount());
+        liveDocsShared = true;
+
+      }
+      liveDocsInitialized = true;
+    }
+  }
+
+  private boolean assertCheckLiveDocs(Bits bits, int expectedLength, int expectedDeleteCount) {
+    assert bits.length() == expectedLength;
+    int deletedCount = 0;
+    for (int i = 0; i < bits.length(); i++) {
+      if (bits.get(i) == false) {
+        deletedCount++;
+      }
     }
+    assert deletedCount == expectedDeleteCount : "deleted: " + deletedCount + " != expected: " + expectedDeleteCount;
+    return true;
   }
 
   /**
@@ -188,6 +225,14 @@ final class PendingDeletes {
    * Returns <code>true</code> iff the segment represented by this {@link PendingDeletes} is fully deleted
    */
   boolean isFullyDeleted() {
-    return info.getDelCount() + pendingDeleteCount == info.info.maxDoc();
+    return info.getDelCount() + numPendingDeletes() == info.info.maxDoc();
+  }
+
+  /**
+   * Called before the given DocValuesFieldUpdates are applied
+   * @param info the field to apply
+   * @param fieldUpdates the field updates
+   */
+  void onDocValuesUpdate(FieldInfo info, List<DocValuesFieldUpdates> fieldUpdates) throws IOException {
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/java/org/apache/lucene/index/PendingSoftDeletes.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/PendingSoftDeletes.java b/lucene/core/src/java/org/apache/lucene/index/PendingSoftDeletes.java
new file mode 100644
index 0000000..1f6c2ef
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/index/PendingSoftDeletes.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.index;
+
+import java.io.IOException;
+import java.util.List;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.DocValuesFieldExistsQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.MutableBits;
+
+final class PendingSoftDeletes extends PendingDeletes {
+
+  private final String field;
+  private long dvGeneration = -2;
+  private final PendingDeletes hardDeletes;
+
+  PendingSoftDeletes(String field, SegmentCommitInfo info)  {
+    super(info);
+    this.field = field;
+    hardDeletes = new PendingDeletes(info);
+  }
+
+  PendingSoftDeletes(String field, SegmentReader reader, SegmentCommitInfo info) {
+    super(reader, info);
+    this.field = field;
+    hardDeletes = new PendingDeletes(reader, info);
+  }
+
+  @Override
+  boolean delete(int docID) throws IOException {
+    MutableBits mutableBits = getMutableBits(); // we need to fetch this first it might be a shared instance with hardDeletes
+    if (hardDeletes.delete(docID)) {
+      if (mutableBits.get(docID)) { // delete it here too!
+        mutableBits.clear(docID);
+        assert hardDeletes.delete(docID) == false;
+      } else {
+        // if it was deleted subtract the delCount
+        pendingDeleteCount--;
+      }
+      return true;
+    }
+    return false;
+  }
+
+  @Override
+  int numPendingDeletes() {
+    return super.numPendingDeletes() + hardDeletes.numPendingDeletes();
+  }
+
+  @Override
+  void onNewReader(SegmentReader reader, SegmentCommitInfo info) throws IOException {
+    super.onNewReader(reader, info);
+    hardDeletes.onNewReader(reader, info);
+    if (dvGeneration != info.getDocValuesGen()) { // only re-calculate this if we haven't seen this generation
+      final DocIdSetIterator iterator = DocValuesFieldExistsQuery.getDocValuesDocIdSetIterator(field, reader);
+      if (iterator == null) { // nothing is deleted we don't have a soft deletes field in this segment
+        this.pendingDeleteCount = 0;
+      } else {
+        assert info.info.maxDoc() > 0 : "maxDoc is 0";
+        applyUpdates(iterator);
+      }
+      dvGeneration = info.getDocValuesGen();
+    }
+    assert numPendingDeletes() + info.getDelCount() <= info.info.maxDoc() :
+        numPendingDeletes() + " + " + info.getDelCount() + " > " + info.info.maxDoc();
+  }
+
+  @Override
+  boolean writeLiveDocs(Directory dir) throws IOException {
+    // delegate the write to the hard deletes - it will only write if somebody used it.
+    return hardDeletes.writeLiveDocs(dir);
+  }
+
+  @Override
+  void reset() {
+    dvGeneration = -2;
+    super.reset();
+    hardDeletes.reset();
+  }
+
+  private void applyUpdates(DocIdSetIterator iterator) throws IOException {
+    final MutableBits mutableBits = getMutableBits();
+    int newDeletes = 0;
+    int docID;
+    while ((docID = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
+      if (mutableBits.get(docID)) { // doc is live - clear it
+        mutableBits.clear(docID);
+        newDeletes++;
+        // now that we know we deleted it and we fully control the hard deletes we can do correct accounting
+        // below.
+      }
+    }
+    pendingDeleteCount += newDeletes;
+  }
+
+  @Override
+  void onDocValuesUpdate(FieldInfo info, List<DocValuesFieldUpdates> updatesToApply) throws IOException {
+    if (field.equals(info.name)) {
+      assert dvGeneration < info.getDocValuesGen() : "we have seen this generation update already: " + dvGeneration + " vs. " + info.getDocValuesGen();
+      DocValuesFieldUpdates.Iterator[] subs = new DocValuesFieldUpdates.Iterator[updatesToApply.size()];
+      for(int i=0; i<subs.length; i++) {
+        subs[i] = updatesToApply.get(i).iterator();
+      }
+      DocValuesFieldUpdates.Iterator iterator = DocValuesFieldUpdates.mergedIterator(subs);
+      applyUpdates(new DocIdSetIterator() {
+        int docID = -1;
+        @Override
+        public int docID() {
+          return docID;
+        }
+
+        @Override
+        public int nextDoc() {
+          return docID = iterator.nextDoc();
+        }
+
+        @Override
+        public int advance(int target) {
+          throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public long cost() {
+          throw new UnsupportedOperationException();
+        }
+      });
+      dvGeneration = info.getDocValuesGen();
+    }
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("PendingSoftDeletes(seg=").append(info);
+    sb.append(" numPendingDeletes=").append(pendingDeleteCount);
+    sb.append(" field=").append(field);
+    sb.append(" dvGeneration=").append(dvGeneration);
+    sb.append(" hardDeletes=").append(hardDeletes);
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/java/org/apache/lucene/index/ReaderUtil.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/ReaderUtil.java b/lucene/core/src/java/org/apache/lucene/index/ReaderUtil.java
index 32c7b32..bb26c1c 100644
--- a/lucene/core/src/java/org/apache/lucene/index/ReaderUtil.java
+++ b/lucene/core/src/java/org/apache/lucene/index/ReaderUtil.java
@@ -16,10 +16,8 @@
  */
 package org.apache.lucene.index;
 
-
 import java.util.List;
 
-
 /**
  * Common util methods for dealing with {@link IndexReader}s and {@link IndexReaderContext}s.
  *

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java b/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
index 8a0e17e..3e06aca 100644
--- a/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
+++ b/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
@@ -87,21 +87,22 @@ final class ReadersAndUpdates {
 
   final AtomicLong ramBytesUsed = new AtomicLong();
 
-  ReadersAndUpdates(int indexCreatedVersionMajor, SegmentCommitInfo info, SegmentReader reader,
+  ReadersAndUpdates(int indexCreatedVersionMajor, SegmentCommitInfo info,
                     PendingDeletes pendingDeletes) {
     this.info = info;
     this.pendingDeletes = pendingDeletes;
     this.indexCreatedVersionMajor = indexCreatedVersionMajor;
-    this.reader = reader;
   }
 
   /** Init from a previously opened SegmentReader.
    *
    * <p>NOTE: steals incoming ref from reader. */
-  ReadersAndUpdates(int indexCreatedVersionMajor, SegmentReader reader, PendingDeletes pendingDeletes) {
-    this(indexCreatedVersionMajor, reader.getSegmentInfo(), reader, pendingDeletes);
+  ReadersAndUpdates(int indexCreatedVersionMajor, SegmentReader reader, PendingDeletes pendingDeletes) throws IOException {
+    this(indexCreatedVersionMajor, reader.getSegmentInfo(), pendingDeletes);
     assert pendingDeletes.numPendingDeletes() >= 0
         : "got " + pendingDeletes.numPendingDeletes() + " reader.numDeletedDocs()=" + reader.numDeletedDocs() + " info.getDelCount()=" + info.getDelCount() + " maxDoc=" + reader.maxDoc() + " numDocs=" + reader.numDocs();
+    this.reader = reader;
+    pendingDeletes.onNewReader(reader, info);
   }
 
   public void incRef() {
@@ -238,7 +239,8 @@ final class ReadersAndUpdates {
     Bits liveDocs = pendingDeletes.getLiveDocs();
     pendingDeletes.liveDocsShared();
     if (liveDocs != null) {
-      return new SegmentReader(reader.getSegmentInfo(), reader, liveDocs, info.info.maxDoc() - info.getDelCount() - pendingDeletes.numPendingDeletes());
+      return new SegmentReader(reader.getSegmentInfo(), reader, liveDocs,
+          info.info.maxDoc() - info.getDelCount() - pendingDeletes.numPendingDeletes());
     } else {
       // liveDocs == null and reader != null. That can only be if there are no deletes
       assert reader.getLiveDocs() == null;
@@ -317,6 +319,7 @@ final class ReadersAndUpdates {
       final TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir);
       final SegmentWriteState state = new SegmentWriteState(null, trackingDir, info.info, fieldInfos, null, updatesContext, segmentSuffix);
       try (final DocValuesConsumer fieldsConsumer = dvFormat.fieldsConsumer(state)) {
+        pendingDeletes.onDocValuesUpdate(fieldInfo, updatesToApply);
         // write the numeric updates to a new gen'd docvalues file
         fieldsConsumer.addNumericField(fieldInfo, new EmptyDocValuesProducer() {
             @Override
@@ -452,15 +455,13 @@ final class ReadersAndUpdates {
       final SegmentWriteState state = new SegmentWriteState(null, trackingDir, info.info, fieldInfos, null, updatesContext, segmentSuffix);
       try (final DocValuesConsumer fieldsConsumer = dvFormat.fieldsConsumer(state)) {
         // write the binary updates to a new gen'd docvalues file
-
+        pendingDeletes.onDocValuesUpdate(fieldInfo, updatesToApply);
         fieldsConsumer.addBinaryField(fieldInfo, new EmptyDocValuesProducer() {
             @Override
             public BinaryDocValues getBinary(FieldInfo fieldInfoIn) throws IOException {
               if (fieldInfoIn != fieldInfo) {
                 throw new IllegalArgumentException("wrong fieldInfo");
               }
-              final int maxDoc = reader.maxDoc();
-
               DocValuesFieldUpdates.Iterator[] subs = new DocValuesFieldUpdates.Iterator[updatesToApply.size()];
               for(int i=0;i<subs.length;i++) {
                 subs[i] = updatesToApply.get(i).iterator();
@@ -678,9 +679,9 @@ final class ReadersAndUpdates {
       SegmentReader newReader = new SegmentReader(info, reader, pendingDeletes.getLiveDocs(), info.info.maxDoc() - info.getDelCount() - pendingDeletes.numPendingDeletes());
       boolean success2 = false;
       try {
+        pendingDeletes.onNewReader(newReader, info);
         reader.decRef();
         reader = newReader;
-        pendingDeletes.onNewReader(reader, info);
         success2 = true;
       } finally {
         if (success2 == false) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/java/org/apache/lucene/index/SoftDeletesRetentionMergePolicy.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/SoftDeletesRetentionMergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/SoftDeletesRetentionMergePolicy.java
new file mode 100644
index 0000000..debe7d7
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/index/SoftDeletesRetentionMergePolicy.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.index;
+
+
+import java.io.IOException;
+import java.util.Objects;
+import java.util.function.Supplier;
+
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.DocValuesFieldExistsQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreMode;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.FixedBitSet;
+
+/**
+ * This {@link MergePolicy} allows to carry over soft deleted documents across merges. The policy wraps
+ * the merge reader and marks documents as "live" that have a value in the soft delete field and match the
+ * provided query. This allows for instance to keep documents alive based on time or any other constraint in the index.
+ * The main purpose for this merge policy is to implement retention policies for document modification to vanish in the
+ * index. Using this merge policy allows to control when soft deletes are claimed by merges.
+ * @lucene.experimental
+ */
+public final class SoftDeletesRetentionMergePolicy extends OneMergeWrappingMergePolicy {
+  private final String field;
+  private final Supplier<Query> retentionQuerySupplier;
+  /**
+   * Creates a new {@link SoftDeletesRetentionMergePolicy}
+   * @param field the soft deletes field
+   * @param retentionQuerySupplier a query supplier for the retention query
+   * @param in the wrapped MergePolicy
+   */
+  public SoftDeletesRetentionMergePolicy(String field, Supplier<Query> retentionQuerySupplier, MergePolicy in) {
+    super(in, toWrap -> new MergePolicy.OneMerge(toWrap.segments) {
+      @Override
+      public CodecReader wrapForMerge(CodecReader reader) throws IOException {
+        CodecReader wrapped = toWrap.wrapForMerge(reader);
+        Bits liveDocs = reader.getLiveDocs();
+        if (liveDocs == null) { // no deletes - just keep going
+          return wrapped;
+        }
+        return applyRetentionQuery(field, retentionQuerySupplier.get(), wrapped);
+      }
+    });
+    Objects.requireNonNull(field, "field must not be null");
+    Objects.requireNonNull(retentionQuerySupplier, "retentionQuerySupplier must not be null");
+    this.field = field;
+    this.retentionQuerySupplier = retentionQuerySupplier;
+  }
+
+  @Override
+  public boolean keepFullyDeletedSegment(CodecReader reader) throws IOException {
+    Scorer scorer = getScorer(field, retentionQuerySupplier.get(), wrapLiveDocs(reader, null, reader.maxDoc()));
+    if (scorer != null) {
+      DocIdSetIterator iterator = scorer.iterator();
+      boolean atLeastOneHit = iterator.nextDoc() != DocIdSetIterator.NO_MORE_DOCS;
+      return atLeastOneHit;
+    }
+    return super.keepFullyDeletedSegment(reader) ;
+  }
+
+  // pkg private for testing
+  static CodecReader applyRetentionQuery(String softDeleteField, Query retentionQuery, CodecReader reader) throws IOException {
+    Bits liveDocs = reader.getLiveDocs();
+    if (liveDocs == null) { // no deletes - just keep going
+      return reader;
+    }
+    CodecReader wrappedReader = wrapLiveDocs(reader, new Bits() { // only search deleted
+      @Override
+      public boolean get(int index) {
+        return liveDocs.get(index) == false;
+      }
+
+      @Override
+      public int length() {
+        return liveDocs.length();
+      }
+    }, reader.maxDoc() - reader.numDocs());
+    Scorer scorer = getScorer(softDeleteField, retentionQuery, wrappedReader);
+    if (scorer != null) {
+      FixedBitSet mutableBits;
+      if (liveDocs instanceof FixedBitSet) {
+        mutableBits = ((FixedBitSet) liveDocs).clone();
+      } else { // mainly if we have asserting codec
+        mutableBits = new FixedBitSet(liveDocs.length());
+        for (int i = 0; i < liveDocs.length(); i++) {
+          if (liveDocs.get(i)) {
+            mutableBits.set(i);
+          }
+        }
+      }
+      DocIdSetIterator iterator = scorer.iterator();
+      int numExtraLiveDocs = 0;
+      while (iterator.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+        if (mutableBits.getAndSet(iterator.docID()) == false) {
+          // if we bring one back to live we need to account for it
+          numExtraLiveDocs++;
+        }
+      }
+      assert reader.numDocs() + numExtraLiveDocs <= reader.maxDoc() : "numDocs: " + reader.numDocs() + " numExtraLiveDocs: " + numExtraLiveDocs + " maxDoc: " + reader.maxDoc();
+      return wrapLiveDocs(reader, mutableBits, reader.numDocs() + numExtraLiveDocs);
+    } else {
+      return reader;
+    }
+  }
+
+  private static Scorer getScorer(String softDeleteField, Query retentionQuery, CodecReader reader) throws IOException {
+    BooleanQuery.Builder builder = new BooleanQuery.Builder();
+    builder.add(new DocValuesFieldExistsQuery(softDeleteField), BooleanClause.Occur.FILTER);
+    builder.add(retentionQuery, BooleanClause.Occur.FILTER);
+    IndexSearcher s = new IndexSearcher(reader);
+    s.setQueryCache(null);
+    Weight weight = s.createWeight(builder.build(), ScoreMode.COMPLETE_NO_SCORES, 1.0f);
+    return weight.scorer(reader.getContext());
+  }
+
+  /**
+   * Returns a codec reader with the given live docs
+   */
+  private static CodecReader wrapLiveDocs(CodecReader reader, Bits liveDocs, int numDocs) {
+    return new FilterCodecReader(reader) {
+      @Override
+      public CacheHelper getCoreCacheHelper() {
+        return reader.getCoreCacheHelper();
+      }
+
+      @Override
+      public CacheHelper getReaderCacheHelper() {
+        return null; // we are altering live docs
+      }
+
+      @Override
+      public Bits getLiveDocs() {
+        return liveDocs;
+      }
+
+      @Override
+      public int numDocs() {
+        return numDocs;
+      }
+    };
+  }}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
index f95ca82..23fbb04 100644
--- a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
@@ -103,7 +103,7 @@ public final class StandardDirectoryReader extends DirectoryReader {
         final ReadersAndUpdates rld = writer.readerPool.get(info, true);
         try {
           final SegmentReader reader = rld.getReadOnlyClone(IOContext.READ);
-          if (reader.numDocs() > 0 || writer.getKeepFullyDeletedSegments()) {
+          if (reader.numDocs() > 0 || writer.getConfig().mergePolicy.keepFullyDeletedSegment(reader)) {
             // Steal the ref:
             readers.add(reader);
             infosUpto++;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/java/org/apache/lucene/search/DocValuesFieldExistsQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/DocValuesFieldExistsQuery.java b/lucene/core/src/java/org/apache/lucene/search/DocValuesFieldExistsQuery.java
index 009f11c..54c8512 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DocValuesFieldExistsQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DocValuesFieldExistsQuery.java
@@ -21,9 +21,7 @@ import java.io.IOException;
 import java.util.Objects;
 
 import org.apache.lucene.index.DocValues;
-import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
 
@@ -62,21 +60,37 @@ public final class DocValuesFieldExistsQuery extends Query {
   }
 
   @Override
-  public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
+  public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) {
     return new ConstantScoreWeight(this, boost) {
       @Override
       public Scorer scorer(LeafReaderContext context) throws IOException {
-        FieldInfos fieldInfos = context.reader().getFieldInfos();
-        FieldInfo fieldInfo = fieldInfos.fieldInfo(field);
-        if (fieldInfo == null) {
+        DocIdSetIterator iterator = getDocValuesDocIdSetIterator(field, context.reader());
+        if (iterator == null) {
           return null;
         }
-        DocValuesType dvType = fieldInfo.getDocValuesType();
-        LeafReader reader = context.reader();
-        DocIdSetIterator iterator;
-        switch(dvType) {
+        return new ConstantScoreScorer(this, score(), iterator);
+      }
+
+      @Override
+      public boolean isCacheable(LeafReaderContext ctx) {
+        return DocValues.isCacheable(ctx, field);
+      }
+
+    };
+  }
+
+  /**
+   * Returns a {@link DocIdSetIterator} from the given field or null if the field doesn't exist
+   * in the reader or if the reader has no doc values for the field.
+   */
+  public static DocIdSetIterator getDocValuesDocIdSetIterator(String field, LeafReader reader) throws IOException {
+    FieldInfo fieldInfo = reader.getFieldInfos().fieldInfo(field);
+    final DocIdSetIterator iterator;
+    if (fieldInfo != null) {
+      switch (fieldInfo.getDocValuesType()) {
         case NONE:
-          return null;
+          iterator = null;
+          break;
         case NUMERIC:
           iterator = reader.getNumericDocValues(field);
           break;
@@ -94,16 +108,9 @@ public final class DocValuesFieldExistsQuery extends Query {
           break;
         default:
           throw new AssertionError();
-        }
-
-        return new ConstantScoreScorer(this, score(), iterator);
       }
-
-      @Override
-      public boolean isCacheable(LeafReaderContext ctx) {
-        return DocValues.isCacheable(ctx, field);
-      }
-
-    };
+      return iterator;
+    }
+    return null;
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/java/org/apache/lucene/util/Bits.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/util/Bits.java b/lucene/core/src/java/org/apache/lucene/util/Bits.java
index 29935e7..1f9a7aa 100644
--- a/lucene/core/src/java/org/apache/lucene/util/Bits.java
+++ b/lucene/core/src/java/org/apache/lucene/util/Bits.java
@@ -30,17 +30,17 @@ public interface  Bits {
    *        by this interface, <b>just don't do it!</b>
    * @return <code>true</code> if the bit is set, <code>false</code> otherwise.
    */
-  public boolean get(int index);
+  boolean get(int index);
   
   /** Returns the number of bits in this set */
-  public int length();
+  int length();
 
-  public static final Bits[] EMPTY_ARRAY = new Bits[0];
+  Bits[] EMPTY_ARRAY = new Bits[0];
   
   /**
    * Bits impl of the specified length with all bits set. 
    */
-  public static class MatchAllBits implements Bits {
+  class MatchAllBits implements Bits {
     final int len;
     
     public MatchAllBits(int len) {
@@ -61,7 +61,7 @@ public interface  Bits {
   /**
    * Bits impl of the specified length with no bits set. 
    */
-  public static class MatchNoBits implements Bits {
+  class MatchNoBits implements Bits {
     final int len;
     
     public MatchNoBits(int len) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
index a95a8e3..e45716d 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
@@ -22,7 +22,6 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.io.StringReader;
-import java.io.UncheckedIOException;
 import java.net.URI;
 import java.nio.file.FileSystem;
 import java.nio.file.Files;
@@ -88,7 +87,6 @@ import org.apache.lucene.store.NoLockFactory;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.store.SimpleFSDirectory;
 import org.apache.lucene.store.SimpleFSLockFactory;
-import org.apache.lucene.util.BitSet;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.Constants;
@@ -2223,14 +2221,21 @@ public class TestIndexWriter extends LuceneTestCase {
   public void testMergeAllDeleted() throws IOException {
     Directory dir = newDirectory();
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
+    AtomicBoolean keepFullyDeletedSegments = new AtomicBoolean();
+    iwc.setMergePolicy(new MergePolicyWrapper(iwc.getMergePolicy()) {
+      @Override
+      public boolean keepFullyDeletedSegment(CodecReader reader) throws IOException {
+        return keepFullyDeletedSegments.get();
+      }
+    });
     final SetOnce<IndexWriter> iwRef = new SetOnce<>();
     IndexWriter evilWriter = RandomIndexWriter.mockIndexWriter(random(), dir, iwc, new RandomIndexWriter.TestPoint() {
       @Override
       public void apply(String message) {
         if ("startCommitMerge".equals(message)) {
-          iwRef.get().setKeepFullyDeletedSegments(false);
+          keepFullyDeletedSegments.set(false);
         } else if ("startMergeInit".equals(message)) {
-          iwRef.get().setKeepFullyDeletedSegments(true);
+          keepFullyDeletedSegments.set(true);
         }
       }
     });
@@ -2958,94 +2963,10 @@ public class TestIndexWriter extends LuceneTestCase {
       }
     }
   }
-  private static Bits getSoftDeletesLiveDocs(LeafReader reader, String field) {
-    try {
-      NumericDocValues softDelete = reader.getNumericDocValues(field);
-      if (softDelete != null) {
-        BitSet bitSet = BitSet.of(softDelete, reader.maxDoc());
-        Bits inLiveDocs = reader.getLiveDocs() == null ? new Bits.MatchAllBits(reader.maxDoc()) : reader.getLiveDocs();
-        Bits newliveDocs = new Bits() {
-          @Override
-          public boolean get(int index) {
-            return inLiveDocs.get(index) && bitSet.get(index) == false;
-          }
-
-          @Override
-          public int length() {
-            return inLiveDocs.length();
-          }
-        };
-        return newliveDocs;
-
-      } else {
-        return reader.getLiveDocs();
-      }
-    } catch (IOException e) {
-      throw new UncheckedIOException(e);
-    }
-  }
-
-  private static DirectoryReader wrapSoftDeletes(DirectoryReader reader, String field) throws IOException {
-    return new FilterDirectoryReader(reader, new FilterDirectoryReader.SubReaderWrapper() {
-      @Override
-      public LeafReader wrap(LeafReader reader) {
-        Bits softDeletesLiveDocs = getSoftDeletesLiveDocs(reader, field);
-        int numDocs = getNumDocs(reader, softDeletesLiveDocs);
-        return new FilterLeafReader(reader) {
-
-          @Override
-          public Bits getLiveDocs() {
-            return softDeletesLiveDocs;
-          }
-
-          @Override
-          public CacheHelper getReaderCacheHelper() {
-            return in.getReaderCacheHelper();
-          }
-
-          @Override
-          public CacheHelper getCoreCacheHelper() {
-            return in.getCoreCacheHelper();
-          }
-
-          @Override
-          public int numDocs() {
-            return numDocs;
-          }
-        };
-      }
-    }) {
-      @Override
-      protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException {
-        return wrapSoftDeletes(in, field);
-      }
-
-      @Override
-      public CacheHelper getReaderCacheHelper() {
-        return in.getReaderCacheHelper();
-      }
-    };
-  }
-
-  private static int getNumDocs(LeafReader reader, Bits softDeletesLiveDocs) {
-    int numDocs;
-    if (softDeletesLiveDocs == reader.getLiveDocs()) {
-      numDocs = reader.numDocs();
-    } else {
-      int tmp = 0;
-      for (int i = 0; i < softDeletesLiveDocs.length(); i++) {
-        if (softDeletesLiveDocs.get(i) ) {
-           tmp++;
-        }
-      }
-      numDocs = tmp;
-    }
-    return numDocs;
-  }
 
   public void testSoftUpdateDocuments() throws IOException {
     Directory dir = newDirectory();
-    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig());
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig().setSoftDeletesField("soft_delete"));
     expectThrows(IllegalArgumentException.class, () -> {
       writer.softUpdateDocument(null, new Document(), new NumericDocValuesField("soft_delete", 1));
     });
@@ -3071,7 +2992,7 @@ public class TestIndexWriter extends LuceneTestCase {
     doc.add(new StringField("version", "2", Field.Store.YES));
     Field field = new NumericDocValuesField("soft_delete", 1);
     writer.softUpdateDocument(new Term("id", "1"), doc, field);
-    DirectoryReader reader = wrapSoftDeletes(DirectoryReader.open(writer), "soft_delete");
+    DirectoryReader reader = DirectoryReader.open(writer);
     assertEquals(2, reader.docFreq(new Term("id", "1")));
     IndexSearcher searcher = new IndexSearcher(reader);
     TopDocs topDocs = searcher.search(new TermQuery(new Term("id", "1")), 10);
@@ -3112,43 +3033,53 @@ public class TestIndexWriter extends LuceneTestCase {
   }
 
   public void testSoftUpdatesConcurrently() throws IOException, InterruptedException {
+    softUpdatesConcurrently(false);
+  }
+
+  public void testSoftUpdatesConcurrentlyMixedDeletes() throws IOException, InterruptedException {
+    softUpdatesConcurrently(true);
+  }
+
+  public void softUpdatesConcurrently(boolean mixDeletes) throws IOException, InterruptedException {
     Directory dir = newDirectory();
     IndexWriterConfig indexWriterConfig = newIndexWriterConfig();
+    indexWriterConfig.setSoftDeletesField("soft_delete");
     AtomicBoolean mergeAwaySoftDeletes = new AtomicBoolean(random().nextBoolean());
-    indexWriterConfig.setMergePolicy(new OneMergeWrappingMergePolicy(indexWriterConfig.getMergePolicy(), towrap ->
-      new MergePolicy.OneMerge(towrap.segments) {
-        @Override
-        public CodecReader wrapForMerge(CodecReader reader) throws IOException {
-          if (mergeAwaySoftDeletes.get() == false) {
-            return towrap.wrapForMerge(reader);
-          }
-          Bits softDeletesLiveDocs = getSoftDeletesLiveDocs(reader, "soft_delete");
-          int numDocs = getNumDocs(reader, softDeletesLiveDocs);
-          CodecReader wrapped = towrap.wrapForMerge(reader);
-          return new FilterCodecReader(wrapped) {
+    if (mixDeletes == false) {
+      indexWriterConfig.setMergePolicy(new OneMergeWrappingMergePolicy(indexWriterConfig.getMergePolicy(), towrap ->
+          new MergePolicy.OneMerge(towrap.segments) {
             @Override
-            public CacheHelper getCoreCacheHelper() {
-              return in.getCoreCacheHelper();
-            }
+            public CodecReader wrapForMerge(CodecReader reader) throws IOException {
+              if (mergeAwaySoftDeletes.get()) {
+                return towrap.wrapForMerge(reader);
+              } else {
+                CodecReader wrapped = towrap.wrapForMerge(reader);
+                return new FilterCodecReader(wrapped) {
+                  @Override
+                  public CacheHelper getCoreCacheHelper() {
+                    return in.getCoreCacheHelper();
+                  }
 
-            @Override
-            public CacheHelper getReaderCacheHelper() {
-              return in.getReaderCacheHelper();
-            }
+                  @Override
+                  public CacheHelper getReaderCacheHelper() {
+                    return in.getReaderCacheHelper();
+                  }
 
-            @Override
-            public Bits getLiveDocs() {
-              return softDeletesLiveDocs;
-            }
+                  @Override
+                  public Bits getLiveDocs() {
+                    return null; // everything is live
+                  }
 
-            @Override
-            public int numDocs() {
-              return numDocs;
+                  @Override
+                  public int numDocs() {
+                    return maxDoc();
+                  }
+                };
+              }
             }
-          };
-        }
-      }
-    ));
+          }
+      ));
+    }
     IndexWriter writer = new IndexWriter(dir, indexWriterConfig);
     Thread[] threads = new Thread[2 + random().nextInt(3)];
     CountDownLatch startLatch = new CountDownLatch(1);
@@ -3165,13 +3096,21 @@ public class TestIndexWriter extends LuceneTestCase {
             if (updateSeveralDocs) {
               Document doc = new Document();
               doc.add(new StringField("id", id, Field.Store.YES));
-              writer.softUpdateDocuments(new Term("id", id), Arrays.asList(doc, doc),
-                  new NumericDocValuesField("soft_delete", 1));
+              if (mixDeletes && random().nextBoolean()) {
+                writer.updateDocuments(new Term("id", id), Arrays.asList(doc, doc));
+              } else {
+                writer.softUpdateDocuments(new Term("id", id), Arrays.asList(doc, doc),
+                    new NumericDocValuesField("soft_delete", 1));
+              }
             } else {
               Document doc = new Document();
               doc.add(new StringField("id", id, Field.Store.YES));
-              writer.softUpdateDocument(new Term("id", id), doc,
-                  new NumericDocValuesField("soft_delete", 1));
+              if (mixDeletes && random().nextBoolean()) {
+                writer.updateDocument(new Term("id", id), doc);
+              } else {
+                writer.softUpdateDocument(new Term("id", id), doc,
+                    new NumericDocValuesField("soft_delete", 1));
+              }
             }
             ids.add(id);
           }
@@ -3187,7 +3126,7 @@ public class TestIndexWriter extends LuceneTestCase {
     for (int i = 0; i < threads.length; i++) {
       threads[i].join();
     }
-    DirectoryReader reader = wrapSoftDeletes(DirectoryReader.open(writer), "soft_delete");
+    DirectoryReader reader = DirectoryReader.open(writer);
     IndexSearcher searcher = new IndexSearcher(reader);
     for (String id : ids) {
       TopDocs topDocs = searcher.search(new TermQuery(new Term("id", id)), 10);
@@ -3217,8 +3156,6 @@ public class TestIndexWriter extends LuceneTestCase {
         assertEquals(1, reader.docFreq(new Term("id", id)));
       }
     }
-
     IOUtils.close(reader, writer, dir);
   }
-
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java
index 063045e..7238869 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java
@@ -100,6 +100,7 @@ public class TestIndexWriterConfig extends LuceneTestCase {
     getters.add("getInfoStream");
     getters.add("getUseCompoundFile");
     getters.add("isCheckPendingFlushOnUpdate");
+    getters.add("getSoftDeletesField");
     
     for (Method m : IndexWriterConfig.class.getDeclaredMethods()) {
       if (m.getDeclaringClass() == IndexWriterConfig.class && m.getName().startsWith("get")) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
index be862ef..d9e73a1 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
@@ -501,11 +501,14 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
         newIndexWriterConfig(new MockAnalyzer(random()))
           .setMergeScheduler(new SerialMergeScheduler())
           .setReaderPooling(true)
-          .setMergePolicy(newLogMergePolicy(2))
+          .setMergePolicy(new MergePolicyWrapper(newLogMergePolicy(2)) {
+            @Override
+            public boolean keepFullyDeletedSegment(CodecReader reader) throws IOException {
+              // we can do this because we add/delete/add (and dont merge to "nothing")
+              return true;
+            }
+          })
     );
-    // we can do this because we add/delete/add (and dont merge to "nothing")
-    w.setKeepFullyDeletedSegments(true);
-
     Document doc = new Document();
 
     doc.add(newTextField("f", "doctor who", Field.Store.NO));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/test/org/apache/lucene/index/TestIndexingSequenceNumbers.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexingSequenceNumbers.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexingSequenceNumbers.java
index 52f806a..44ea74d 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexingSequenceNumbers.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexingSequenceNumbers.java
@@ -97,9 +97,7 @@ public class TestIndexingSequenceNumbers extends LuceneTestCase {
                   if (random().nextBoolean()) {
                     seqNos[threadID] = w.updateDocument(id, doc);
                   } else {
-                    List<Document> docs = new ArrayList<>();
-                    docs.add(doc);
-                    seqNos[threadID] = w.updateDocuments(id, docs);
+                    seqNos[threadID] = w.updateDocuments(id, Arrays.asList(doc));
                   }
                 }
               } catch (Exception e) {
@@ -128,7 +126,7 @@ public class TestIndexingSequenceNumbers extends LuceneTestCase {
       DirectoryReader r = w.getReader();
       IndexSearcher s = newSearcher(r);
       TopDocs hits = s.search(new TermQuery(id), 1);
-      assertEquals(1, hits.totalHits);
+      assertEquals("maxDoc: " + r.maxDoc(), 1, hits.totalHits);
       Document doc = r.document(hits.scoreDocs[0].doc);
       assertEquals(maxThread, doc.getField("thread").numericValue().intValue());
       r.close();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java b/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java
index 27f2f1a..6e0d643 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java
@@ -49,10 +49,13 @@ public class TestMultiFields extends LuceneTestCase {
       Directory dir = newDirectory();
 
       IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
-                                             .setMergePolicy(NoMergePolicy.INSTANCE));
-      // we can do this because we use NoMergePolicy (and dont merge to "nothing")
-      w.setKeepFullyDeletedSegments(true);
-
+                                             .setMergePolicy(new MergePolicyWrapper(NoMergePolicy.INSTANCE) {
+                                               @Override
+                                               public boolean keepFullyDeletedSegment(CodecReader reader) {
+                                                 // we can do this because we use NoMergePolicy (and dont merge to "nothing")
+                                                 return true;
+                                               }
+                                             }));
       Map<BytesRef,List<Integer>> docs = new HashMap<>();
       Set<Integer> deleted = new HashSet<>();
       List<BytesRef> terms = new ArrayList<>();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/test/org/apache/lucene/index/TestPendingDeletes.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPendingDeletes.java b/lucene/core/src/test/org/apache/lucene/index/TestPendingDeletes.java
index 39f5680..e150e06 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPendingDeletes.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPendingDeletes.java
@@ -32,12 +32,16 @@ import org.apache.lucene.util.Version;
 
 public class TestPendingDeletes extends LuceneTestCase {
 
+  protected PendingDeletes newPendingDeletes(SegmentCommitInfo commitInfo) {
+    return new PendingDeletes(commitInfo);
+  }
+
   public void testDeleteDoc() throws IOException {
     RAMDirectory dir = new RAMDirectory();
     SegmentInfo si = new SegmentInfo(dir, Version.LATEST, Version.LATEST, "test", 10, false, Codec.getDefault(),
         Collections.emptyMap(), StringHelper.randomId(), new HashMap<>(), null);
     SegmentCommitInfo commitInfo = new SegmentCommitInfo(si, 0, 0, 0, 0);
-    PendingDeletes deletes = new PendingDeletes(null, commitInfo);
+    PendingDeletes deletes = newPendingDeletes(commitInfo);
     assertNull(deletes.getLiveDocs());
     int docToDelete = TestUtil.nextInt(random(), 0, 7);
     assertTrue(deletes.delete(docToDelete));
@@ -73,7 +77,7 @@ public class TestPendingDeletes extends LuceneTestCase {
     SegmentInfo si = new SegmentInfo(dir, Version.LATEST, Version.LATEST, "test", 6, false, Codec.getDefault(),
         Collections.emptyMap(), StringHelper.randomId(), new HashMap<>(), null);
     SegmentCommitInfo commitInfo = new SegmentCommitInfo(si, 0, 0, 0, 0);
-    PendingDeletes deletes = new PendingDeletes(null, commitInfo);
+    PendingDeletes deletes = newPendingDeletes(commitInfo);
     assertFalse(deletes.writeLiveDocs(dir));
     assertEquals(0, dir.listAll().length);
     boolean secondDocDeletes = random().nextBoolean();
@@ -130,7 +134,7 @@ public class TestPendingDeletes extends LuceneTestCase {
     SegmentInfo si = new SegmentInfo(dir, Version.LATEST, Version.LATEST, "test", 3, false, Codec.getDefault(),
         Collections.emptyMap(), StringHelper.randomId(), new HashMap<>(), null);
     SegmentCommitInfo commitInfo = new SegmentCommitInfo(si, 0, 0, 0, 0);
-    PendingDeletes deletes = new PendingDeletes(null, commitInfo);
+    PendingDeletes deletes = newPendingDeletes(commitInfo);
     for (int i = 0; i < 3; i++) {
       assertTrue(deletes.delete(i));
       if (random().nextBoolean()) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/test/org/apache/lucene/index/TestPendingSoftDeletes.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPendingSoftDeletes.java b/lucene/core/src/test/org/apache/lucene/index/TestPendingSoftDeletes.java
new file mode 100644
index 0000000..c428a4b
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPendingSoftDeletes.java
@@ -0,0 +1,232 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.index;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.StringHelper;
+import org.apache.lucene.util.Version;
+
+public class TestPendingSoftDeletes extends TestPendingDeletes {
+
+  @Override
+  protected PendingSoftDeletes newPendingDeletes(SegmentCommitInfo commitInfo) {
+    return new PendingSoftDeletes("_soft_deletes", commitInfo);
+  }
+
+  public void testDeleteSoft() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); // no soft delete field hier
+    Document doc = new Document();
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    writer.softUpdateDocument(new Term("id", "1"), doc,
+        new NumericDocValuesField("_soft_deletes", 1));
+    doc = new Document();
+    doc.add(new StringField("id", "2", Field.Store.YES));
+    writer.softUpdateDocument(new Term("id", "2"), doc,
+        new NumericDocValuesField("_soft_deletes", 1));
+    doc = new Document();
+    doc.add(new StringField("id", "2", Field.Store.YES));
+    writer.softUpdateDocument(new Term("id", "2"), doc,
+        new NumericDocValuesField("_soft_deletes", 1));
+    writer.commit();
+    DirectoryReader reader = writer.getReader();
+    assertEquals(1, reader.leaves().size());
+    SegmentReader segmentReader = (SegmentReader) reader.leaves().get(0).reader();
+    SegmentCommitInfo segmentInfo = segmentReader.getSegmentInfo();
+    PendingSoftDeletes pendingSoftDeletes = newPendingDeletes(segmentInfo);
+    pendingSoftDeletes.onNewReader(segmentReader, segmentInfo);
+    assertEquals(1, pendingSoftDeletes.numPendingDeletes());
+    assertTrue(pendingSoftDeletes.getLiveDocs().get(0));
+    assertFalse(pendingSoftDeletes.getLiveDocs().get(1));
+    assertTrue(pendingSoftDeletes.getLiveDocs().get(2));
+    // pass reader again
+    Bits liveDocs = pendingSoftDeletes.getLiveDocs();
+    pendingSoftDeletes.liveDocsShared();
+    pendingSoftDeletes.onNewReader(segmentReader, segmentInfo);
+    assertEquals(1, pendingSoftDeletes.numPendingDeletes());
+    assertSame(liveDocs, pendingSoftDeletes.getLiveDocs());
+
+    // now apply a hard delete
+    writer.deleteDocuments(new Term("id", "1"));
+    writer.commit();
+    IOUtils.close(reader);
+    reader = DirectoryReader.open(dir);
+    assertEquals(1, reader.leaves().size());
+    segmentReader = (SegmentReader) reader.leaves().get(0).reader();
+    segmentInfo = segmentReader.getSegmentInfo();
+    pendingSoftDeletes = newPendingDeletes(segmentInfo);
+    pendingSoftDeletes.onNewReader(segmentReader, segmentInfo);
+    assertEquals(1, pendingSoftDeletes.numPendingDeletes());
+    assertFalse(pendingSoftDeletes.getLiveDocs().get(0));
+    assertFalse(pendingSoftDeletes.getLiveDocs().get(1));
+    assertTrue(pendingSoftDeletes.getLiveDocs().get(2));
+    IOUtils.close(reader, writer, dir);
+  }
+
+  public void testApplyUpdates() throws IOException {
+    RAMDirectory dir = new RAMDirectory();
+    SegmentInfo si = new SegmentInfo(dir, Version.LATEST, Version.LATEST, "test", 10, false, Codec.getDefault(),
+        Collections.emptyMap(), StringHelper.randomId(), new HashMap<>(), null);
+    SegmentCommitInfo commitInfo = new SegmentCommitInfo(si, 0, 0, 0, 0);
+    PendingSoftDeletes deletes = newPendingDeletes(commitInfo);
+    FieldInfo fieldInfo = new FieldInfo("_soft_deletes", 1, false, false, false, IndexOptions.NONE, DocValuesType.NUMERIC, 0, Collections.emptyMap(), 0, 0);
+    List<Integer> docsDeleted = Arrays.asList(1, 3, 7, 8, DocIdSetIterator.NO_MORE_DOCS);
+    List<DocValuesFieldUpdates> updates = Arrays.asList(singleUpdate(docsDeleted, 10));
+    deletes.onDocValuesUpdate(fieldInfo, updates);
+    assertEquals(4, deletes.numPendingDeletes());
+    assertTrue(deletes.getLiveDocs().get(0));
+    assertFalse(deletes.getLiveDocs().get(1));
+    assertTrue(deletes.getLiveDocs().get(2));
+    assertFalse(deletes.getLiveDocs().get(3));
+    assertTrue(deletes.getLiveDocs().get(4));
+    assertTrue(deletes.getLiveDocs().get(5));
+    assertTrue(deletes.getLiveDocs().get(6));
+    assertFalse(deletes.getLiveDocs().get(7));
+    assertFalse(deletes.getLiveDocs().get(8));
+    assertTrue(deletes.getLiveDocs().get(9));
+
+    docsDeleted = Arrays.asList(1, 2, DocIdSetIterator.NO_MORE_DOCS);
+    updates = Arrays.asList(singleUpdate(docsDeleted, 10));
+    fieldInfo = new FieldInfo("_soft_deletes", 1, false, false, false, IndexOptions.NONE, DocValuesType.NUMERIC, 1, Collections.emptyMap(), 0, 0);
+    deletes.onDocValuesUpdate(fieldInfo, updates);
+    assertEquals(5, deletes.numPendingDeletes());
+    assertTrue(deletes.getLiveDocs().get(0));
+    assertFalse(deletes.getLiveDocs().get(1));
+    assertFalse(deletes.getLiveDocs().get(2));
+    assertFalse(deletes.getLiveDocs().get(3));
+    assertTrue(deletes.getLiveDocs().get(4));
+    assertTrue(deletes.getLiveDocs().get(5));
+    assertTrue(deletes.getLiveDocs().get(6));
+    assertFalse(deletes.getLiveDocs().get(7));
+    assertFalse(deletes.getLiveDocs().get(8));
+    assertTrue(deletes.getLiveDocs().get(9));
+  }
+
+  public void testUpdateAppliedOnlyOnce() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); // no soft delete field hier
+    Document doc = new Document();
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    writer.softUpdateDocument(new Term("id", "1"), doc,
+        new NumericDocValuesField("_soft_deletes", 1));
+    doc = new Document();
+    doc.add(new StringField("id", "2", Field.Store.YES));
+    writer.softUpdateDocument(new Term("id", "2"), doc,
+        new NumericDocValuesField("_soft_deletes", 1));
+    doc = new Document();
+    doc.add(new StringField("id", "2", Field.Store.YES));
+    writer.softUpdateDocument(new Term("id", "2"), doc,
+        new NumericDocValuesField("_soft_deletes", 1));
+    writer.commit();
+    DirectoryReader reader = writer.getReader();
+    assertEquals(1, reader.leaves().size());
+    SegmentReader segmentReader = (SegmentReader) reader.leaves().get(0).reader();
+    SegmentCommitInfo segmentInfo = segmentReader.getSegmentInfo();
+    SegmentInfo si = new SegmentInfo(dir, Version.LATEST, Version.LATEST, "test", 3, false, Codec.getDefault(),
+        Collections.emptyMap(), StringHelper.randomId(), new HashMap<>(), null);
+    PendingSoftDeletes deletes = newPendingDeletes(segmentInfo);
+    FieldInfo fieldInfo = new FieldInfo("_soft_deletes", 1, false, false, false, IndexOptions.NONE, DocValuesType.NUMERIC, segmentInfo.getDocValuesGen(), Collections.emptyMap(), 0, 0);
+    List<Integer> docsDeleted = Arrays.asList(1, DocIdSetIterator.NO_MORE_DOCS);
+    List<DocValuesFieldUpdates> updates = Arrays.asList(singleUpdate(docsDeleted, 3));
+    deletes.onDocValuesUpdate(fieldInfo, updates);
+    assertEquals(1, deletes.numPendingDeletes());
+    assertTrue(deletes.getLiveDocs().get(0));
+    assertFalse(deletes.getLiveDocs().get(1));
+    assertTrue(deletes.getLiveDocs().get(2));
+    deletes.liveDocsShared();
+    Bits liveDocs = deletes.getLiveDocs();
+    deletes.onNewReader(segmentReader, segmentInfo);
+    // no changes we don't apply updates twice
+    assertSame(liveDocs, deletes.getLiveDocs());
+    assertTrue(deletes.getLiveDocs().get(0));
+    assertFalse(deletes.getLiveDocs().get(1));
+    assertTrue(deletes.getLiveDocs().get(2));
+    assertEquals(1, deletes.numPendingDeletes());
+    IOUtils.close(reader, writer, dir);
+  }
+
+  private DocValuesFieldUpdates singleUpdate(List<Integer> docsDeleted, int maxDoc) {
+    return new DocValuesFieldUpdates(maxDoc, 0, "_soft_deletes", DocValuesType.NUMERIC) {
+      @Override
+      public void add(int doc, Object value) {
+      }
+
+      @Override
+      public Iterator iterator() {
+        return new Iterator() {
+          java.util.Iterator<Integer> iter = docsDeleted.iterator();
+          int doc = -1;
+
+          @Override
+          int nextDoc() {
+            return doc = iter.next();
+          }
+
+          @Override
+          int doc() {
+            return doc;
+          }
+
+          @Override
+          Object value() {
+            return 1;
+          }
+
+          @Override
+          long delGen() {
+            return 0;
+          }
+        };
+      }
+
+      @Override
+      public void finish() {
+      }
+
+      @Override
+      public boolean any() {
+        return true;
+      }
+
+      @Override
+      public long ramBytesUsed() {
+        return 0;
+      }
+
+      @Override
+      public int size() {
+        return 1;
+      }
+    };
+  }
+}


[29/50] lucene-solr:jira/solr-12181: SOLR-10616: add 'java-javadocs' as a variable in the ref-guide, and cleanup some overly specific mentions of 'Java 8'

Posted by ab...@apache.org.
SOLR-10616: add 'java-javadocs' as a variable in the ref-guide, and cleanup some overly specific mentions of 'Java 8'

Continuation of SOLR-12118


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/9009fe63
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/9009fe63
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/9009fe63

Branch: refs/heads/jira/solr-12181
Commit: 9009fe6378c8f3fe1757ef744114c3e558919a68
Parents: 4137f32
Author: Chris Hostetter <ho...@apache.org>
Authored: Thu Apr 5 11:36:24 2018 -0700
Committer: Chris Hostetter <ho...@apache.org>
Committed: Thu Apr 5 11:36:24 2018 -0700

----------------------------------------------------------------------
 solr/solr-ref-guide/build.xml                           |  1 +
 solr/solr-ref-guide/src/_config.yml.template            |  1 +
 solr/solr-ref-guide/src/language-analysis.adoc          | 12 ++++++------
 solr/solr-ref-guide/src/solr-jdbc-python-jython.adoc    |  4 ++--
 solr/solr-ref-guide/src/solr-jdbc-r.adoc                |  2 +-
 solr/solr-ref-guide/src/tokenizers.adoc                 |  6 +++---
 ...ed-data-store-data-with-the-data-import-handler.adoc |  4 ++--
 solr/solr-ref-guide/src/working-with-dates.adoc         |  4 ++--
 8 files changed, 18 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9009fe63/solr/solr-ref-guide/build.xml
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/build.xml b/solr/solr-ref-guide/build.xml
index 745a2a5..92c236a 100644
--- a/solr/solr-ref-guide/build.xml
+++ b/solr/solr-ref-guide/build.xml
@@ -230,6 +230,7 @@
         <attribute key="solr-guide-draft-status" value="${solr-guide-draft-status}" />
         <attribute key="solr-guide-version" value="${solr-guide-version}" />
         <attribute key="solr-docs-version" value="${solr-docs-version}" />
+        <attribute key="java-javadocs" value="${javadoc.link}" />
         <attribute key="solr-javadocs" value="@{solr-javadocs}" />
         <attribute key="lucene-javadocs" value="@{lucene-javadocs}" />
         <attribute key="build-date" value="${DSTAMP}" />

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9009fe63/solr/solr-ref-guide/src/_config.yml.template
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/_config.yml.template b/solr/solr-ref-guide/src/_config.yml.template
index f789b8c..1c55dc4 100755
--- a/solr/solr-ref-guide/src/_config.yml.template
+++ b/solr/solr-ref-guide/src/_config.yml.template
@@ -74,6 +74,7 @@ solr-attributes: &solr-attributes-ref
   solr-guide-version: "${solr-guide-version}"
   solr-guide-version-path: "${solr-guide-version-path}"
   solr-docs-version: "${solr-docs-version}"
+  java-javadocs: "${javadoc.link}"
   solr-javadocs: "${html-solr-javadocs}"
   lucene-javadocs: "${html-lucene-javadocs}"
   build-date: "${DSTAMP}"

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9009fe63/solr/solr-ref-guide/src/language-analysis.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/language-analysis.adoc b/solr/solr-ref-guide/src/language-analysis.adoc
index ef476d7..10dd264 100644
--- a/solr/solr-ref-guide/src/language-analysis.adoc
+++ b/solr/solr-ref-guide/src/language-analysis.adoc
@@ -274,7 +274,7 @@ The principles of JDK Collation are the same as those of ICU Collation; you just
 
 *Arguments for `solr.CollationField`, specified as attributes within the `<fieldtype>` element:*
 
-Using a System collator (see http://www.oracle.com/technetwork/java/javase/java8locales-2095355.html[Oracle's list of locales supported in Java 8]):
+Using a System collator (see http://www.oracle.com/technetwork/java/javase/java8locales-2095355.html[Oracle's list of locales supported in Java]):
 
 `language`:: (required) http://www.loc.gov/standards/iso639-2/php/code_list.php[ISO-639] language code
 
@@ -282,17 +282,17 @@ Using a System collator (see http://www.oracle.com/technetwork/java/javase/java8
 
 `variant`:: Vendor or browser-specific code
 
-`strength`:: Valid values are `primary`, `secondary`, `tertiary` or `identical`. See http://docs.oracle.com/javase/8/docs/api/java/text/Collator.html[Oracle Java 8 Collator javadocs] for more information.
+`strength`:: Valid values are `primary`, `secondary`, `tertiary` or `identical`. See {java-javadocs}java/text/Collator.html[Java Collator javadocs] for more information.
 
-`decomposition`:: Valid values are `no`, `canonical`, or `full`. See http://docs.oracle.com/javase/8/docs/api/java/text/Collator.html[Oracle Java 8 Collator javadocs] for more information.
+`decomposition`:: Valid values are `no`, `canonical`, or `full`. See {java-javadocs}java/text/Collator.html[Java Collator javadocs] for more information.
 
 Using a Tailored ruleset:
 
-`custom`:: (required) Path to a UTF-8 text file containing rules supported by the http://docs.oracle.com/javase/8/docs/api/java/text/RuleBasedCollator.html[`JDK RuleBasedCollator`]
+`custom`:: (required) Path to a UTF-8 text file containing rules supported by the {java-javadocs}java/text/RuleBasedCollator.html[`JDK RuleBasedCollator`]
 
-`strength`:: Valid values are `primary`, `secondary`, `tertiary` or `identical`. See http://docs.oracle.com/javase/8/docs/api/java/text/Collator.html[Oracle Java 8 Collator javadocs] for more information.
+`strength`:: Valid values are `primary`, `secondary`, `tertiary` or `identical`. See {java-javadocs}java/text/Collator.html[Java Collator javadocs] for more information.
 
-`decomposition`:: Valid values are `no`, `canonical`, or `full`. See http://docs.oracle.com/javase/8/docs/api/java/text/Collator.html[Oracle Java 8 Collator javadocs] for more information.
+`decomposition`:: Valid values are `no`, `canonical`, or `full`. See {java-javadocs}java/text/Collator.html[Java Collator javadocs] for more information.
 
 .A `solr.CollationField` example:
 [source,xml]

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9009fe63/solr/solr-ref-guide/src/solr-jdbc-python-jython.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/solr-jdbc-python-jython.adoc b/solr/solr-ref-guide/src/solr-jdbc-python-jython.adoc
index 26f4a87..55f37de 100644
--- a/solr/solr-ref-guide/src/solr-jdbc-python-jython.adoc
+++ b/solr/solr-ref-guide/src/solr-jdbc-python-jython.adoc
@@ -29,7 +29,7 @@ Python supports accessing JDBC using the https://pypi.python.org/pypi/JayDeBeApi
 [source,bash]
 ----
 #!/usr/bin/env bash
-# Java 8 must already be installed
+# Java must already be installed
 
 pip install JayDeBeApi
 
@@ -70,7 +70,7 @@ Jython supports accessing JDBC natively with Java interfaces or with the zxJDBC
 [source,bash]
 ----
 #!/usr/bin/env bash
-# Java 8 and Jython must already be installed
+# Java and Jython must already be installed
 
 export CLASSPATH="$(echo $(ls /opt/solr/dist/solr-solrj* /opt/solr/dist/solrj-lib/*) | tr ' ' ':')"
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9009fe63/solr/solr-ref-guide/src/solr-jdbc-r.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/solr-jdbc-r.adoc b/solr/solr-ref-guide/src/solr-jdbc-r.adoc
index e43dde4..9037135 100644
--- a/solr/solr-ref-guide/src/solr-jdbc-r.adoc
+++ b/solr/solr-ref-guide/src/solr-jdbc-r.adoc
@@ -25,7 +25,7 @@ R supports accessing JDBC using the https://www.rforge.net/RJDBC/[RJDBC] library
 ----
 #!/usr/bin/env bash
 
-# Java 8 must already be installed and R configured with `R CMD javareconf`
+# Java must already be installed and R configured with `R CMD javareconf`
 
 Rscript -e 'install.packages("RJDBC", dep=TRUE)'
 Rscript solr_rjdbc.R

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9009fe63/solr/solr-ref-guide/src/tokenizers.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/tokenizers.adoc b/solr/solr-ref-guide/src/tokenizers.adoc
index a351499..b335239 100644
--- a/solr/solr-ref-guide/src/tokenizers.adoc
+++ b/solr/solr-ref-guide/src/tokenizers.adoc
@@ -323,7 +323,7 @@ This tokenizer creates synonyms from file path hierarchies.
 
 This tokenizer uses a Java regular expression to break the input text stream into tokens. The expression provided by the pattern argument can be interpreted either as a delimiter that separates tokens, or to match patterns that should be extracted from the text as tokens.
 
-See http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html[the Javadocs for `java.util.regex.Pattern`] for more information on Java regular expression syntax.
+See {java-javadocs}java/util/regex/Pattern.html[the Javadocs for `java.util.regex.Pattern`] for more information on Java regular expression syntax.
 
 *Factory class:* `solr.PatternTokenizerFactory`
 
@@ -470,7 +470,7 @@ Simple tokenizer that splits the text stream on whitespace and returns sequences
 `rule`::
 Specifies how to define whitespace for the purpose of tokenization. Valid values:
 
-* `java`: (Default) Uses https://docs.oracle.com/javase/8/docs/api/java/lang/Character.html#isWhitespace-int-[Character.isWhitespace(int)]
+* `java`: (Default) Uses {java-javadocs}java/lang/Character.html#isWhitespace-int-[Character.isWhitespace(int)]
 * `unicode`: Uses Unicode's WHITESPACE property
 
 *Example:*
@@ -488,4 +488,4 @@ Specifies how to define whitespace for the purpose of tokenization. Valid values
 
 == OpenNLP Tokenizer and OpenNLP Filters
 
-See <<language-analysis.adoc#opennlp-integration,OpenNLP Integration>> for information about using the OpenNLP Tokenizer, along with information about available OpenNLP token filters.
\ No newline at end of file
+See <<language-analysis.adoc#opennlp-integration,OpenNLP Integration>> for information about using the OpenNLP Tokenizer, along with information about available OpenNLP token filters.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9009fe63/solr/solr-ref-guide/src/uploading-structured-data-store-data-with-the-data-import-handler.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/uploading-structured-data-store-data-with-the-data-import-handler.adoc b/solr/solr-ref-guide/src/uploading-structured-data-store-data-with-the-data-import-handler.adoc
index c4eaad8..fdcfe5a 100644
--- a/solr/solr-ref-guide/src/uploading-structured-data-store-data-with-the-data-import-handler.adoc
+++ b/solr/solr-ref-guide/src/uploading-structured-data-store-data-with-the-data-import-handler.adoc
@@ -868,7 +868,7 @@ DateFormatTransformer applies only on the fields with an attribute `dateTimeForm
 This transformer recognizes the following attributes:
 
 dateTimeFormat::
-The format used for parsing this field. This must comply with the syntax of the http://docs.oracle.com/javase/8/docs/api/java/text/SimpleDateFormat.html[Java SimpleDateFormat] class.
+The format used for parsing this field. This must comply with the syntax of the {java-javadocs}java/text/SimpleDateFormat.html[Java SimpleDateFormat] class.
 
 sourceColName::
 The column on which the dateFormat is to be applied. If this is absent source and target are same.
@@ -993,7 +993,7 @@ Note that this transformer can be used to either split a string into tokens base
 
 === The ScriptTransformer
 
-The script transformer allows arbitrary transformer functions to be written in any scripting language supported by Java, such as Javascript, JRuby, Jython, Groovy, or BeanShell. Javascript is integrated into Java 8; you'll need to integrate other languages yourself.
+The script transformer allows arbitrary transformer functions to be written in any scripting language supported by Java, such as Javascript, JRuby, Jython, Groovy, or BeanShell. Javascript is integrated into Java by default; you'll need to integrate other languages yourself.
 
 Each function you write must accept a row variable (which corresponds to a `Java Map<String,Object>`, thus permitting `get,put,remove` operations). Thus you can modify the value of an existing field or add new fields. The return value of the function is the returned object.
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9009fe63/solr/solr-ref-guide/src/working-with-dates.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/working-with-dates.adoc b/solr/solr-ref-guide/src/working-with-dates.adoc
index ae3b737..d5f3203 100644
--- a/solr/solr-ref-guide/src/working-with-dates.adoc
+++ b/solr/solr-ref-guide/src/working-with-dates.adoc
@@ -18,7 +18,7 @@
 
 == Date Formatting
 
-Solr's date fields (`DatePointField`, `DateRangeField` and the deprecated `TrieDateField`) represent "dates" as a point in time with millisecond precision. The format used is a restricted form of the canonical representation of dateTime in the http://www.w3.org/TR/xmlschema-2/#dateTime[XML Schema specification] – a restricted subset of https://en.wikipedia.org/wiki/ISO_8601[ISO-8601]. For those familiar with Java 8, Solr uses https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html#ISO_INSTANT[DateTimeFormatter.ISO_INSTANT] for formatting, and parsing too with "leniency".
+Solr's date fields (`DatePointField`, `DateRangeField` and the deprecated `TrieDateField`) represent "dates" as a point in time with millisecond precision. The format used is a restricted form of the canonical representation of dateTime in the http://www.w3.org/TR/xmlschema-2/#dateTime[XML Schema specification] – a restricted subset of https://en.wikipedia.org/wiki/ISO_8601[ISO-8601]. For those familiar with Java date handling, Solr uses {java-javadocs}java/time/format/DateTimeFormatter.html#ISO_INSTANT[DateTimeFormatter.ISO_INSTANT] for formatting, and parsing too with "leniency".
 
 `YYYY-MM-DDThh:mm:ssZ`
 
@@ -110,7 +110,7 @@ Example:
 
 ==== TZ
 
-By default, all date math expressions are evaluated relative to the UTC TimeZone, but the `TZ` parameter can be specified to override this behaviour, by forcing all date based addition and rounding to be relative to the specified http://docs.oracle.com/javase/8/docs/api/java/util/TimeZone.html[time zone].
+By default, all date math expressions are evaluated relative to the UTC TimeZone, but the `TZ` parameter can be specified to override this behaviour, by forcing all date based addition and rounding to be relative to the specified {java-javadocs}java/util/TimeZone.html[time zone].
 
 For example, the following request will use range faceting to facet over the current month, "per day" relative UTC:
 


[14/50] lucene-solr:jira/solr-12181: SOLR-12136: highlighting.adoc: Add links and clarify "hl.fl" must refer to stored fields.

Posted by ab...@apache.org.
SOLR-12136: highlighting.adoc: Add links and clarify "hl.fl" must refer to stored fields.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/8b3fc53e
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/8b3fc53e
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/8b3fc53e

Branch: refs/heads/jira/solr-12181
Commit: 8b3fc53e6e75ecc8153ad9a8f25b70169f422c7a
Parents: 508476e
Author: David Smiley <ds...@apache.org>
Authored: Thu Apr 5 11:36:10 2018 -0400
Committer: David Smiley <ds...@apache.org>
Committed: Thu Apr 5 11:36:10 2018 -0400

----------------------------------------------------------------------
 solr/solr-ref-guide/src/highlighting.adoc | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b3fc53e/solr/solr-ref-guide/src/highlighting.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/highlighting.adoc b/solr/solr-ref-guide/src/highlighting.adoc
index 1990c6c..2a832ee 100644
--- a/solr/solr-ref-guide/src/highlighting.adoc
+++ b/solr/solr-ref-guide/src/highlighting.adoc
@@ -36,7 +36,7 @@ The highlighting implementation to use. Acceptable values are: `unified`, `origi
 See the <<Choosing a Highlighter>> section below for more details on the differences between the available highlighters.
 
 `hl.fl`::
-Specifies a list of fields to highlight, either comma- or space-delimited.
+Specifies a list of fields to highlight, either comma- or space-delimited.  These must be "stored".
 A wildcard of `\*` (asterisk) can be used to match field globs, such as `text_*` or even `\*` to highlight on all fields where highlighting is possible.
 When using `*`, consider adding `hl.requireFieldMatch=true`.
 +
@@ -55,7 +55,7 @@ When setting this, you might also need to set `hl.qparser`.
 The default is the value of the `q` parameter (already parsed).
 
 `hl.qparser`::
-The query parser to use for the `hl.q` query.  It only applies when `hl.q` is set.
+The <<query-syntax-and-parsing.adoc#query-syntax-and-parsing,query parser>> to use for the `hl.q` query.  It only applies when `hl.q` is set.
 +
 The default is the value of the `defType` parameter which in turn defaults to `lucene`.
 
@@ -141,7 +141,7 @@ The `highlighting` section includes the ID of each document, and the field that
 
 == Choosing a Highlighter
 
-Solr provides a `HighlightComponent` (a `SearchComponent`) and it's in the default list of components for search handlers. It offers a somewhat unified API over multiple actual highlighting implementations (or simply "highlighters") that do the business of highlighting.
+Solr provides a `HighlightComponent` (a <<requesthandlers-and-searchcomponents-in-solrconfig.adoc#search-components,`SearchComponent`>>) and it's in the default list of components for search handlers. It offers a somewhat unified API over multiple actual highlighting implementations (or simply "highlighters") that do the business of highlighting.
 
 There are many parameters supported by more than one highlighter, and sometimes the implementation details and semantics will be a bit different, so don't expect identical results when switching highlighters. You should use the `hl.method` parameter to choose a highlighter but it's also possible to explicitly configure an implementation by class name in `solrconfig.xml`.
 


[15/50] lucene-solr:jira/solr-12181: LUCENE-8239: Identify the situation where the travel and test point plane envelopes are off the ellipsoid and avoid them.

Posted by ab...@apache.org.
LUCENE-8239: Identify the situation where the travel and test point plane envelopes are off the ellipsoid and avoid them.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/9b03f8c0
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/9b03f8c0
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/9b03f8c0

Branch: refs/heads/jira/solr-12181
Commit: 9b03f8c033e15954f4d9d1a3962cc0695d2d762d
Parents: 508476e
Author: Karl Wright <Da...@gmail.com>
Authored: Thu Apr 5 12:07:07 2018 -0400
Committer: Karl Wright <Da...@gmail.com>
Committed: Thu Apr 5 12:07:07 2018 -0400

----------------------------------------------------------------------
 .../spatial3d/geom/GeoComplexPolygon.java       | 380 ++++++++++++-------
 .../lucene/spatial3d/geom/GeoPolygonTest.java   |   1 -
 2 files changed, 234 insertions(+), 147 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9b03f8c0/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java
index d5d005e..d11fb79 100644
--- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java
+++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java
@@ -80,12 +80,41 @@ class GeoComplexPolygon extends GeoBasePolygon {
     this.testPointFixedXPlane = new Plane(1.0, 0.0, 0.0, -testPoint.x);
     this.testPointFixedZPlane = new Plane(0.0, 0.0, 1.0, -testPoint.z);
     
-    this.testPointFixedYAbovePlane = new Plane(testPointFixedYPlane, true);
-    this.testPointFixedYBelowPlane = new Plane(testPointFixedYPlane, false);
-    this.testPointFixedXAbovePlane = new Plane(testPointFixedXPlane, true);
-    this.testPointFixedXBelowPlane = new Plane(testPointFixedXPlane, false);
-    this.testPointFixedZAbovePlane = new Plane(testPointFixedZPlane, true);
-    this.testPointFixedZBelowPlane = new Plane(testPointFixedZPlane, false);
+    Plane fixedYAbovePlane = new Plane(testPointFixedYPlane, true);
+    if (fixedYAbovePlane.D - planetModel.getMaximumYValue() >= Vector.MINIMUM_RESOLUTION || planetModel.getMinimumYValue() - fixedYAbovePlane.D >= Vector.MINIMUM_RESOLUTION) {
+        fixedYAbovePlane = null;
+    }
+    this.testPointFixedYAbovePlane = fixedYAbovePlane;
+    
+    Plane fixedYBelowPlane = new Plane(testPointFixedYPlane, false);
+    if (fixedYBelowPlane.D - planetModel.getMaximumYValue() >= Vector.MINIMUM_RESOLUTION || planetModel.getMinimumYValue() - fixedYBelowPlane.D >= Vector.MINIMUM_RESOLUTION) {
+        fixedYBelowPlane = null;
+    }
+    this.testPointFixedYBelowPlane = fixedYBelowPlane;
+    
+    Plane fixedXAbovePlane = new Plane(testPointFixedXPlane, true);
+    if (fixedXAbovePlane.D - planetModel.getMaximumXValue() >= Vector.MINIMUM_RESOLUTION || planetModel.getMinimumXValue() - fixedXAbovePlane.D >= Vector.MINIMUM_RESOLUTION) {
+        fixedXAbovePlane = null;
+    }
+    this.testPointFixedXAbovePlane = fixedXAbovePlane;
+    
+    Plane fixedXBelowPlane = new Plane(testPointFixedXPlane, false);
+    if (fixedXBelowPlane.D - planetModel.getMaximumXValue() >= Vector.MINIMUM_RESOLUTION || planetModel.getMinimumXValue() - fixedXBelowPlane.D >= Vector.MINIMUM_RESOLUTION) {
+        fixedXBelowPlane = null;
+    }
+    this.testPointFixedXBelowPlane = fixedXBelowPlane;
+    
+    Plane fixedZAbovePlane = new Plane(testPointFixedZPlane, true);
+    if (fixedZAbovePlane.D - planetModel.getMaximumZValue() >= Vector.MINIMUM_RESOLUTION || planetModel.getMinimumZValue() - fixedZAbovePlane.D >= Vector.MINIMUM_RESOLUTION) {
+        fixedZAbovePlane = null;
+    }
+    this.testPointFixedZAbovePlane = fixedZAbovePlane;
+    
+    Plane fixedZBelowPlane = new Plane(testPointFixedZPlane, false);
+    if (fixedZBelowPlane.D - planetModel.getMaximumZValue() >= Vector.MINIMUM_RESOLUTION || planetModel.getMinimumZValue() - fixedZBelowPlane.D >= Vector.MINIMUM_RESOLUTION) {
+        fixedZBelowPlane = null;
+    }
+    this.testPointFixedZBelowPlane = fixedZBelowPlane;
 
     this.edgePoints = new GeoPoint[pointsList.size()];
     this.shapeStartEdges = new Edge[pointsList.size()];
@@ -193,7 +222,7 @@ class GeoComplexPolygon extends GeoBasePolygon {
       }
       return ((crossingEdgeIterator.getCrossingCount() & 1) == 0)?testPointInSet:!testPointInSet;
     } else {
-      
+
       // This is the expensive part!!
       // Changing the code below has an enormous impact on the queries per second we see with the benchmark.
       
@@ -202,13 +231,37 @@ class GeoComplexPolygon extends GeoBasePolygon {
       final Plane travelPlaneFixedY = new Plane(0.0, 1.0, 0.0, -y);
       final Plane travelPlaneFixedZ = new Plane(0.0, 0.0, 1.0, -z);
 
+      Plane fixedYAbovePlane = new Plane(travelPlaneFixedY, true);
+      if (fixedYAbovePlane.D - planetModel.getMaximumYValue() >= Vector.MINIMUM_RESOLUTION || planetModel.getMinimumYValue() - fixedYAbovePlane.D >= Vector.MINIMUM_RESOLUTION) {
+          fixedYAbovePlane = null;
+      }
+      
+      Plane fixedYBelowPlane = new Plane(travelPlaneFixedY, false);
+      if (fixedYBelowPlane.D - planetModel.getMaximumYValue() >= Vector.MINIMUM_RESOLUTION || planetModel.getMinimumYValue() - fixedYBelowPlane.D >= Vector.MINIMUM_RESOLUTION) {
+          fixedYBelowPlane = null;
+      }
+      
+      Plane fixedXAbovePlane = new Plane(travelPlaneFixedX, true);
+      if (fixedXAbovePlane.D - planetModel.getMaximumXValue() >= Vector.MINIMUM_RESOLUTION || planetModel.getMinimumXValue() - fixedXAbovePlane.D >= Vector.MINIMUM_RESOLUTION) {
+          fixedXAbovePlane = null;
+      }
+      
+      Plane fixedXBelowPlane = new Plane(travelPlaneFixedX, false);
+      if (fixedXBelowPlane.D - planetModel.getMaximumXValue() >= Vector.MINIMUM_RESOLUTION || planetModel.getMinimumXValue() - fixedXBelowPlane.D >= Vector.MINIMUM_RESOLUTION) {
+          fixedXBelowPlane = null;
+      }
+      
+      Plane fixedZAbovePlane = new Plane(travelPlaneFixedZ, true);
+      if (fixedZAbovePlane.D - planetModel.getMaximumZValue() >= Vector.MINIMUM_RESOLUTION || planetModel.getMinimumZValue() - fixedZAbovePlane.D >= Vector.MINIMUM_RESOLUTION) {
+          fixedZAbovePlane = null;
+      }
+      
+      Plane fixedZBelowPlane = new Plane(travelPlaneFixedZ, false);
+      if (fixedZBelowPlane.D - planetModel.getMaximumZValue() >= Vector.MINIMUM_RESOLUTION || planetModel.getMinimumZValue() - fixedZBelowPlane.D >= Vector.MINIMUM_RESOLUTION) {
+          fixedZBelowPlane = null;
+      }
+
       // Find the intersection points for each one of these and the complementary test point planes.
-      final GeoPoint[] XIntersectionsY = travelPlaneFixedX.findIntersections(planetModel, testPointFixedYPlane);
-      final GeoPoint[] XIntersectionsZ = travelPlaneFixedX.findIntersections(planetModel, testPointFixedZPlane);
-      final GeoPoint[] YIntersectionsX = travelPlaneFixedY.findIntersections(planetModel, testPointFixedXPlane);
-      final GeoPoint[] YIntersectionsZ = travelPlaneFixedY.findIntersections(planetModel, testPointFixedZPlane);
-      final GeoPoint[] ZIntersectionsX = travelPlaneFixedZ.findIntersections(planetModel, testPointFixedXPlane);
-      final GeoPoint[] ZIntersectionsY = travelPlaneFixedZ.findIntersections(planetModel, testPointFixedYPlane);
 
       // There will be multiple intersection points found.  We choose the one that has the lowest total distance, as measured in delta X, delta Y, and delta Z.
       double bestDistance = Double.POSITIVE_INFINITY;
@@ -218,154 +271,186 @@ class GeoComplexPolygon extends GeoBasePolygon {
       Plane firstLegAbovePlane = null;
       Plane firstLegBelowPlane = null;
       Plane secondLegPlane = null;
+      Plane secondLegAbovePlane = null;
+      Plane secondLegBelowPlane = null;
       Tree firstLegTree = null;
       Tree secondLegTree = null;
       GeoPoint intersectionPoint = null;
-      
-      for (final GeoPoint p : XIntersectionsY) {
-        // Travel would be in YZ plane (fixed x) then in XZ (fixed y)
-        // We compute distance we need to travel as a placeholder for the number of intersections we might encounter.
-        //final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint);
-        final double tpDelta1 = testPoint.x - p.x;
-        final double tpDelta2 = testPoint.z - p.z;
-        final double cpDelta1 = y - p.y;
-        final double cpDelta2 = z - p.z;
-        final double newDistance = tpDelta1 * tpDelta1 + tpDelta2 * tpDelta2 + cpDelta1 * cpDelta1 + cpDelta2 * cpDelta2;
-        //final double newDistance = (testPoint.x - p.x) * (testPoint.x - p.x) + (testPoint.z - p.z) * (testPoint.z - p.z)  + (thePoint.y - p.y) * (thePoint.y - p.y) + (thePoint.z - p.z) * (thePoint.z - p.z);
-        //final double newDistance = Math.abs(testPoint.x - p.x) + Math.abs(thePoint.y - p.y);
-        if (newDistance < bestDistance) {
-          bestDistance = newDistance;
-          firstLegValue = testPoint.y;
-          secondLegValue = x;
-          firstLegPlane = testPointFixedYPlane;
-          firstLegAbovePlane = testPointFixedYAbovePlane;
-          firstLegBelowPlane = testPointFixedYBelowPlane;
-          secondLegPlane = travelPlaneFixedX;
-          firstLegTree = yTree;
-          secondLegTree = xTree;
-          intersectionPoint = p;
+
+      if (testPointFixedYAbovePlane != null && testPointFixedYBelowPlane != null && fixedXAbovePlane != null && fixedXBelowPlane != null) {
+        final GeoPoint[] XIntersectionsY = travelPlaneFixedX.findIntersections(planetModel, testPointFixedYPlane);
+        for (final GeoPoint p : XIntersectionsY) {
+          // Travel would be in YZ plane (fixed x) then in XZ (fixed y)
+          // We compute distance we need to travel as a placeholder for the number of intersections we might encounter.
+          //final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint);
+          final double tpDelta1 = testPoint.x - p.x;
+          final double tpDelta2 = testPoint.z - p.z;
+          final double cpDelta1 = y - p.y;
+          final double cpDelta2 = z - p.z;
+          final double newDistance = tpDelta1 * tpDelta1 + tpDelta2 * tpDelta2 + cpDelta1 * cpDelta1 + cpDelta2 * cpDelta2;
+          //final double newDistance = (testPoint.x - p.x) * (testPoint.x - p.x) + (testPoint.z - p.z) * (testPoint.z - p.z)  + (thePoint.y - p.y) * (thePoint.y - p.y) + (thePoint.z - p.z) * (thePoint.z - p.z);
+          //final double newDistance = Math.abs(testPoint.x - p.x) + Math.abs(thePoint.y - p.y);
+          if (newDistance < bestDistance) {
+            bestDistance = newDistance;
+            firstLegValue = testPoint.y;
+            secondLegValue = x;
+            firstLegPlane = testPointFixedYPlane;
+            firstLegAbovePlane = testPointFixedYAbovePlane;
+            firstLegBelowPlane = testPointFixedYBelowPlane;
+            secondLegPlane = travelPlaneFixedX;
+            secondLegAbovePlane = fixedXAbovePlane;
+            secondLegBelowPlane = fixedXBelowPlane;
+            firstLegTree = yTree;
+            secondLegTree = xTree;
+            intersectionPoint = p;
+          }
         }
       }
-      for (final GeoPoint p : XIntersectionsZ) {
-        // Travel would be in YZ plane (fixed x) then in XY (fixed z)
-        //final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint);
-        final double tpDelta1 = testPoint.x - p.x;
-        final double tpDelta2 = testPoint.y - p.y;
-        final double cpDelta1 = y - p.y;
-        final double cpDelta2 = z - p.z;
-        final double newDistance = tpDelta1 * tpDelta1 + tpDelta2 * tpDelta2 + cpDelta1 * cpDelta1 + cpDelta2 * cpDelta2;
-        //final double newDistance = (testPoint.x - p.x) * (testPoint.x - p.x) + (testPoint.y - p.y) * (testPoint.y - p.y)  + (thePoint.y - p.y) * (thePoint.y - p.y) + (thePoint.z - p.z) * (thePoint.z - p.z);
-        //final double newDistance = Math.abs(testPoint.x - p.x) + Math.abs(thePoint.z - p.z);
-        if (newDistance < bestDistance) {
-          bestDistance = newDistance;
-          firstLegValue = testPoint.z;
-          secondLegValue = x;
-          firstLegPlane = testPointFixedZPlane;
-          firstLegAbovePlane = testPointFixedZAbovePlane;
-          firstLegBelowPlane = testPointFixedZBelowPlane;
-          secondLegPlane = travelPlaneFixedX;
-          firstLegTree = zTree;
-          secondLegTree = xTree;
-          intersectionPoint = p;
+      if (testPointFixedZAbovePlane != null && testPointFixedZBelowPlane != null && fixedXAbovePlane != null && fixedXBelowPlane != null) {
+        final GeoPoint[] XIntersectionsZ = travelPlaneFixedX.findIntersections(planetModel, testPointFixedZPlane);
+        for (final GeoPoint p : XIntersectionsZ) {
+          // Travel would be in YZ plane (fixed x) then in XY (fixed z)
+          //final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint);
+          final double tpDelta1 = testPoint.x - p.x;
+          final double tpDelta2 = testPoint.y - p.y;
+          final double cpDelta1 = y - p.y;
+          final double cpDelta2 = z - p.z;
+          final double newDistance = tpDelta1 * tpDelta1 + tpDelta2 * tpDelta2 + cpDelta1 * cpDelta1 + cpDelta2 * cpDelta2;
+          //final double newDistance = (testPoint.x - p.x) * (testPoint.x - p.x) + (testPoint.y - p.y) * (testPoint.y - p.y)  + (thePoint.y - p.y) * (thePoint.y - p.y) + (thePoint.z - p.z) * (thePoint.z - p.z);
+          //final double newDistance = Math.abs(testPoint.x - p.x) + Math.abs(thePoint.z - p.z);
+          if (newDistance < bestDistance) {
+            bestDistance = newDistance;
+            firstLegValue = testPoint.z;
+            secondLegValue = x;
+            firstLegPlane = testPointFixedZPlane;
+            firstLegAbovePlane = testPointFixedZAbovePlane;
+            firstLegBelowPlane = testPointFixedZBelowPlane;
+            secondLegPlane = travelPlaneFixedX;
+            secondLegAbovePlane = fixedXAbovePlane;
+            secondLegBelowPlane = fixedXBelowPlane;
+            firstLegTree = zTree;
+            secondLegTree = xTree;
+            intersectionPoint = p;
+          }
         }
       }
-      for (final GeoPoint p : YIntersectionsX) {
-        // Travel would be in XZ plane (fixed y) then in YZ (fixed x)
-        //final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint);
-        final double tpDelta1 = testPoint.y - p.y;
-        final double tpDelta2 = testPoint.z - p.z;
-        final double cpDelta1 = x - p.x;
-        final double cpDelta2 = z - p.z;
-        final double newDistance = tpDelta1 * tpDelta1 + tpDelta2 * tpDelta2 + cpDelta1 * cpDelta1 + cpDelta2 * cpDelta2;
-        //final double newDistance = (testPoint.y - p.y) * (testPoint.y - p.y) + (testPoint.z - p.z) * (testPoint.z - p.z)  + (thePoint.x - p.x) * (thePoint.x - p.x) + (thePoint.z - p.z) * (thePoint.z - p.z);
-        //final double newDistance = Math.abs(testPoint.y - p.y) + Math.abs(thePoint.x - p.x);
-        if (newDistance < bestDistance) {
-          bestDistance = newDistance;
-          firstLegValue = testPoint.x;
-          secondLegValue = y;
-          firstLegPlane = testPointFixedXPlane;
-          firstLegAbovePlane = testPointFixedXAbovePlane;
-          firstLegBelowPlane = testPointFixedXBelowPlane;
-          secondLegPlane = travelPlaneFixedY;
-          firstLegTree = xTree;
-          secondLegTree = yTree;
-          intersectionPoint = p;
+      if (testPointFixedXAbovePlane != null && testPointFixedXBelowPlane != null && fixedYAbovePlane != null && fixedYBelowPlane != null) {
+        final GeoPoint[] YIntersectionsX = travelPlaneFixedY.findIntersections(planetModel, testPointFixedXPlane);
+        for (final GeoPoint p : YIntersectionsX) {
+          // Travel would be in XZ plane (fixed y) then in YZ (fixed x)
+          //final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint);
+          final double tpDelta1 = testPoint.y - p.y;
+          final double tpDelta2 = testPoint.z - p.z;
+          final double cpDelta1 = x - p.x;
+          final double cpDelta2 = z - p.z;
+          final double newDistance = tpDelta1 * tpDelta1 + tpDelta2 * tpDelta2 + cpDelta1 * cpDelta1 + cpDelta2 * cpDelta2;
+          //final double newDistance = (testPoint.y - p.y) * (testPoint.y - p.y) + (testPoint.z - p.z) * (testPoint.z - p.z)  + (thePoint.x - p.x) * (thePoint.x - p.x) + (thePoint.z - p.z) * (thePoint.z - p.z);
+          //final double newDistance = Math.abs(testPoint.y - p.y) + Math.abs(thePoint.x - p.x);
+          if (newDistance < bestDistance) {
+            bestDistance = newDistance;
+            firstLegValue = testPoint.x;
+            secondLegValue = y;
+            firstLegPlane = testPointFixedXPlane;
+            firstLegAbovePlane = testPointFixedXAbovePlane;
+            firstLegBelowPlane = testPointFixedXBelowPlane;
+            secondLegPlane = travelPlaneFixedY;
+            secondLegAbovePlane = fixedYAbovePlane;
+            secondLegBelowPlane = fixedYBelowPlane;
+            firstLegTree = xTree;
+            secondLegTree = yTree;
+            intersectionPoint = p;
+          }
         }
       }
-      for (final GeoPoint p : YIntersectionsZ) {
-        // Travel would be in XZ plane (fixed y) then in XY (fixed z)
-        //final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint);
-        final double tpDelta1 = testPoint.x - p.x;
-        final double tpDelta2 = testPoint.y - p.y;
-        final double cpDelta1 = x - p.x;
-        final double cpDelta2 = z - p.z;
-        final double newDistance = tpDelta1 * tpDelta1 + tpDelta2 * tpDelta2 + cpDelta1 * cpDelta1 + cpDelta2 * cpDelta2;
-        //final double newDistance = (testPoint.x - p.x) * (testPoint.x - p.x) + (testPoint.y - p.y) * (testPoint.y - p.y)  + (thePoint.x - p.x) * (thePoint.x - p.x) + (thePoint.z - p.z) * (thePoint.z - p.z);
-        //final double newDistance = Math.abs(testPoint.y - p.y) + Math.abs(thePoint.z - p.z);
-        if (newDistance < bestDistance) {
-          bestDistance = newDistance;
-          firstLegValue = testPoint.z;
-          secondLegValue = y;
-          firstLegPlane = testPointFixedZPlane;
-          firstLegAbovePlane = testPointFixedZAbovePlane;
-          firstLegBelowPlane = testPointFixedZBelowPlane;
-          secondLegPlane = travelPlaneFixedY;
-          firstLegTree = zTree;
-          secondLegTree = yTree;
-          intersectionPoint = p;
+      if (testPointFixedZAbovePlane != null && testPointFixedZBelowPlane != null && fixedYAbovePlane != null && fixedYBelowPlane != null) {
+        final GeoPoint[] YIntersectionsZ = travelPlaneFixedY.findIntersections(planetModel, testPointFixedZPlane);
+        for (final GeoPoint p : YIntersectionsZ) {
+          // Travel would be in XZ plane (fixed y) then in XY (fixed z)
+          //final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint);
+          final double tpDelta1 = testPoint.x - p.x;
+          final double tpDelta2 = testPoint.y - p.y;
+          final double cpDelta1 = x - p.x;
+          final double cpDelta2 = z - p.z;
+          final double newDistance = tpDelta1 * tpDelta1 + tpDelta2 * tpDelta2 + cpDelta1 * cpDelta1 + cpDelta2 * cpDelta2;
+          //final double newDistance = (testPoint.x - p.x) * (testPoint.x - p.x) + (testPoint.y - p.y) * (testPoint.y - p.y)  + (thePoint.x - p.x) * (thePoint.x - p.x) + (thePoint.z - p.z) * (thePoint.z - p.z);
+          //final double newDistance = Math.abs(testPoint.y - p.y) + Math.abs(thePoint.z - p.z);
+          if (newDistance < bestDistance) {
+            bestDistance = newDistance;
+            firstLegValue = testPoint.z;
+            secondLegValue = y;
+            firstLegPlane = testPointFixedZPlane;
+            firstLegAbovePlane = testPointFixedZAbovePlane;
+            firstLegBelowPlane = testPointFixedZBelowPlane;
+            secondLegPlane = travelPlaneFixedY;
+            secondLegAbovePlane = fixedYAbovePlane;
+            secondLegBelowPlane = fixedYBelowPlane;
+            firstLegTree = zTree;
+            secondLegTree = yTree;
+            intersectionPoint = p;
+          }
         }
       }
-      for (final GeoPoint p : ZIntersectionsX) {
-        // Travel would be in XY plane (fixed z) then in YZ (fixed x)
-        //final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint);
-        final double tpDelta1 = testPoint.y - p.y;
-        final double tpDelta2 = testPoint.z - p.z;
-        final double cpDelta1 = y - p.y;
-        final double cpDelta2 = x - p.x;
-        final double newDistance = tpDelta1 * tpDelta1 + tpDelta2 * tpDelta2 + cpDelta1 * cpDelta1 + cpDelta2 * cpDelta2;
-        //final double newDistance = (testPoint.y - p.y) * (testPoint.y - p.y) + (testPoint.z - p.z) * (testPoint.z - p.z)  + (thePoint.y - p.y) * (thePoint.y - p.y) + (thePoint.x - p.x) * (thePoint.x - p.x);
-        //final double newDistance = Math.abs(testPoint.z - p.z) + Math.abs(thePoint.x - p.x);
-        if (newDistance < bestDistance) {
-          bestDistance = newDistance;
-          firstLegValue = testPoint.x;
-          secondLegValue = z;
-          firstLegPlane = testPointFixedXPlane;
-          firstLegAbovePlane = testPointFixedXAbovePlane;
-          firstLegBelowPlane = testPointFixedXBelowPlane;
-          secondLegPlane = travelPlaneFixedZ;
-          firstLegTree = xTree;
-          secondLegTree = zTree;
-          intersectionPoint = p;
+      if (testPointFixedXAbovePlane != null && testPointFixedXBelowPlane != null && fixedZAbovePlane != null && fixedZBelowPlane != null) {
+        final GeoPoint[] ZIntersectionsX = travelPlaneFixedZ.findIntersections(planetModel, testPointFixedXPlane);
+        for (final GeoPoint p : ZIntersectionsX) {
+          // Travel would be in XY plane (fixed z) then in YZ (fixed x)
+          //final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint);
+          final double tpDelta1 = testPoint.y - p.y;
+          final double tpDelta2 = testPoint.z - p.z;
+          final double cpDelta1 = y - p.y;
+          final double cpDelta2 = x - p.x;
+          final double newDistance = tpDelta1 * tpDelta1 + tpDelta2 * tpDelta2 + cpDelta1 * cpDelta1 + cpDelta2 * cpDelta2;
+          //final double newDistance = (testPoint.y - p.y) * (testPoint.y - p.y) + (testPoint.z - p.z) * (testPoint.z - p.z)  + (thePoint.y - p.y) * (thePoint.y - p.y) + (thePoint.x - p.x) * (thePoint.x - p.x);
+          //final double newDistance = Math.abs(testPoint.z - p.z) + Math.abs(thePoint.x - p.x);
+          if (newDistance < bestDistance) {
+            bestDistance = newDistance;
+            firstLegValue = testPoint.x;
+            secondLegValue = z;
+            firstLegPlane = testPointFixedXPlane;
+            firstLegAbovePlane = testPointFixedXAbovePlane;
+            firstLegBelowPlane = testPointFixedXBelowPlane;
+            secondLegPlane = travelPlaneFixedZ;
+            secondLegAbovePlane = fixedZAbovePlane;
+            secondLegBelowPlane = fixedZBelowPlane;
+            firstLegTree = xTree;
+            secondLegTree = zTree;
+            intersectionPoint = p;
+          }
         }
       }
-      for (final GeoPoint p : ZIntersectionsY) {
-        // Travel would be in XY plane (fixed z) then in XZ (fixed y)
-        //final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint);
-        final double tpDelta1 = testPoint.x - p.x;
-        final double tpDelta2 = testPoint.z - p.z;
-        final double cpDelta1 = y - p.y;
-        final double cpDelta2 = x - p.x;
-        final double newDistance = tpDelta1 * tpDelta1 + tpDelta2 * tpDelta2 + cpDelta1 * cpDelta1 + cpDelta2 * cpDelta2;
-        //final double newDistance = (testPoint.x - p.x) * (testPoint.x - p.x) + (testPoint.z - p.z) * (testPoint.z - p.z)  + (thePoint.y - p.y) * (thePoint.y - p.y) + (thePoint.x - p.x) * (thePoint.x - p.x);
-        //final double newDistance = Math.abs(testPoint.z - p.z) + Math.abs(thePoint.y - p.y);
-        if (newDistance < bestDistance) {
-          bestDistance = newDistance;
-          firstLegValue = testPoint.y;
-          secondLegValue = z;
-          firstLegPlane = testPointFixedYPlane;
-          firstLegAbovePlane = testPointFixedYAbovePlane;
-          firstLegBelowPlane = testPointFixedYBelowPlane;
-          secondLegPlane = travelPlaneFixedZ;
-          firstLegTree = yTree;
-          secondLegTree = zTree;
-          intersectionPoint = p;
+      if (testPointFixedYAbovePlane != null && testPointFixedYBelowPlane != null && fixedZAbovePlane != null && fixedZBelowPlane != null) {
+        final GeoPoint[] ZIntersectionsY = travelPlaneFixedZ.findIntersections(planetModel, testPointFixedYPlane);
+        for (final GeoPoint p : ZIntersectionsY) {
+          // Travel would be in XY plane (fixed z) then in XZ (fixed y)
+          //final double newDistance = p.arcDistance(testPoint) + p.arcDistance(thePoint);
+          final double tpDelta1 = testPoint.x - p.x;
+          final double tpDelta2 = testPoint.z - p.z;
+          final double cpDelta1 = y - p.y;
+          final double cpDelta2 = x - p.x;
+          final double newDistance = tpDelta1 * tpDelta1 + tpDelta2 * tpDelta2 + cpDelta1 * cpDelta1 + cpDelta2 * cpDelta2;
+          //final double newDistance = (testPoint.x - p.x) * (testPoint.x - p.x) + (testPoint.z - p.z) * (testPoint.z - p.z)  + (thePoint.y - p.y) * (thePoint.y - p.y) + (thePoint.x - p.x) * (thePoint.x - p.x);
+          //final double newDistance = Math.abs(testPoint.z - p.z) + Math.abs(thePoint.y - p.y);
+          if (newDistance < bestDistance) {
+            bestDistance = newDistance;
+            firstLegValue = testPoint.y;
+            secondLegValue = z;
+            firstLegPlane = testPointFixedYPlane;
+            firstLegAbovePlane = testPointFixedYAbovePlane;
+            firstLegBelowPlane = testPointFixedYBelowPlane;
+            secondLegPlane = travelPlaneFixedZ;
+            secondLegAbovePlane = fixedZAbovePlane;
+            secondLegBelowPlane = fixedZBelowPlane;
+            firstLegTree = yTree;
+            secondLegTree = zTree;
+            intersectionPoint = p;
+          }
         }
       }
 
       assert bestDistance > 0.0 : "Best distance should not be zero unless on single plane";
       assert bestDistance < Double.POSITIVE_INFINITY : "Couldn't find an intersection point of any kind";
       
-      final DualCrossingEdgeIterator edgeIterator = new DualCrossingEdgeIterator(firstLegPlane, firstLegAbovePlane, firstLegBelowPlane, secondLegPlane, x, y, z, intersectionPoint);
+      final DualCrossingEdgeIterator edgeIterator = new DualCrossingEdgeIterator(firstLegPlane, firstLegAbovePlane, firstLegBelowPlane, secondLegPlane, secondLegAbovePlane, secondLegBelowPlane, x, y, z, intersectionPoint);
       if (!firstLegTree.traverse(edgeIterator, firstLegValue)) {
         return true;
       }
@@ -431,6 +516,7 @@ class GeoComplexPolygon extends GeoBasePolygon {
     final double yDelta = xyzBounds.getMaximumY() - xyzBounds.getMinimumY();
     final double zDelta = xyzBounds.getMaximumZ() - xyzBounds.getMinimumZ();
     // Select the smallest range
+    // Select the smallest range
     if (xDelta <= yDelta && xDelta <= zDelta) {
       // Drill down in x
       return !xTree.traverse(intersector, xyzBounds.getMinimumX(), xyzBounds.getMaximumX());
@@ -962,6 +1048,8 @@ class GeoComplexPolygon extends GeoBasePolygon {
     private final Plane testPointAbovePlane;
     private final Plane testPointBelowPlane;
     private final Plane travelPlane;
+    private final Plane travelAbovePlane;
+    private final Plane travelBelowPlane;
     private final double thePointX;
     private final double thePointY;
     private final double thePointZ;
@@ -990,11 +1078,14 @@ class GeoComplexPolygon extends GeoBasePolygon {
     public int outerCrossingCount = 0;
 
     public DualCrossingEdgeIterator(final Plane testPointPlane, final Plane testPointAbovePlane, final Plane testPointBelowPlane,
-      final Plane travelPlane, final double thePointX, final double thePointY, final double thePointZ, final GeoPoint intersectionPoint) {
+      final Plane travelPlane, final Plane travelAbovePlane, final Plane travelBelowPlane,
+      final double thePointX, final double thePointY, final double thePointZ, final GeoPoint intersectionPoint) {
       this.testPointPlane = testPointPlane;
       this.testPointAbovePlane = testPointAbovePlane;
       this.testPointBelowPlane = testPointBelowPlane;
       this.travelPlane = travelPlane;
+      this.travelAbovePlane = travelAbovePlane;
+      this.travelBelowPlane = travelBelowPlane;
       this.thePointX = thePointX;
       this.thePointY = thePointY;
       this.thePointZ = thePointZ;
@@ -1034,9 +1125,6 @@ class GeoComplexPolygon extends GeoBasePolygon {
         // Figure out which of the above/below planes are inside vs. outside.  To do this,
         // we look for the point that is within the bounds of the testPointPlane and travelPlane.  The two sides that intersected there are the inside
         // borders.
-        final Plane travelAbovePlane = new Plane(travelPlane, true);
-        final Plane travelBelowPlane = new Plane(travelPlane, false);
-        
         // Each of these can generate two solutions.  We need to refine them to generate only one somehow -- the one in the same area of the world as intersectionPoint.
         // Since the travel/testpoint planes have one fixed coordinate, and that is represented by the plane's D value, it should be possible to choose based on the
         // point's coordinates. 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9b03f8c0/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
index fb32471..b5c18cf 100755
--- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
+++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
@@ -1429,7 +1429,6 @@ shape:
     assertTrue(intersection == largeIntersection);
   }
 
-  @Ignore
   @Test
   public void testComplexPolygonPlaneOutsideWorld() {
     List<GeoPoint> points = new ArrayList<>();


[33/50] lucene-solr:jira/solr-12181: Ref Guide: fix color definition so monospace links display with color

Posted by ab...@apache.org.
Ref Guide: fix color definition so monospace links display with color


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/b2d756c9
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/b2d756c9
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/b2d756c9

Branch: refs/heads/jira/solr-12181
Commit: b2d756c9f4d8e272f822682eba48d64055368c90
Parents: 73d7410
Author: Cassandra Targett <ct...@apache.org>
Authored: Thu Apr 5 13:42:44 2018 -0500
Committer: Cassandra Targett <ct...@apache.org>
Committed: Fri Apr 6 11:00:09 2018 -0500

----------------------------------------------------------------------
 solr/solr-ref-guide/src/css/customstyles.css | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b2d756c9/solr/solr-ref-guide/src/css/customstyles.css
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/css/customstyles.css b/solr/solr-ref-guide/src/css/customstyles.css
index 8ead55c..9a166c1 100755
--- a/solr/solr-ref-guide/src/css/customstyles.css
+++ b/solr/solr-ref-guide/src/css/customstyles.css
@@ -762,7 +762,7 @@ span.label.label-primary {
 .col-lg-12 .nav li a {background-color: white}
 
 a code {
-    color: ##2156a5;
+    color: #2156a5;
 }
 
 table th code {


[18/50] lucene-solr:jira/solr-12181: Ref Guide: remove unused README that was copied with our theme design and has never been relevant for us

Posted by ab...@apache.org.
Ref Guide: remove unused README that was copied with our theme design and has never been relevant for us


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/8e7b1b23
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/8e7b1b23
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/8e7b1b23

Branch: refs/heads/jira/solr-12181
Commit: 8e7b1b23759cb174fafdcc7c0da5fab070f5b22c
Parents: 2573eac
Author: Cassandra Targett <ct...@apache.org>
Authored: Thu Apr 5 12:39:48 2018 -0500
Committer: Cassandra Targett <ct...@apache.org>
Committed: Thu Apr 5 12:39:48 2018 -0500

----------------------------------------------------------------------
 solr/solr-ref-guide/src/README.md | 3 ---
 1 file changed, 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e7b1b23/solr/solr-ref-guide/src/README.md
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/README.md b/solr/solr-ref-guide/src/README.md
deleted file mode 100755
index 20d66e3..0000000
--- a/solr/solr-ref-guide/src/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Jekyll Documentation theme
-
-Build the site to see the instructions for using it. Or just go here: [http://idratherbewriting.com/documentation-theme-jekyll/](http://idratherbewriting.com/documentation-theme-jekyll/)


[11/50] lucene-solr:jira/solr-12181: LUCENE-7935: Keep md5/sha1 checksums for maven artifacts

Posted by ab...@apache.org.
LUCENE-7935: Keep md5/sha1 checksums for maven artifacts


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/60ae7be4
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/60ae7be4
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/60ae7be4

Branch: refs/heads/jira/solr-12181
Commit: 60ae7be40786d6f8a5c5c8393875bf986d2b8877
Parents: 2ace16c
Author: Jan Høydahl <ja...@apache.org>
Authored: Thu Apr 5 02:18:04 2018 +0200
Committer: Jan Høydahl <ja...@apache.org>
Committed: Thu Apr 5 02:18:04 2018 +0200

----------------------------------------------------------------------
 dev-tools/scripts/smokeTestRelease.py | 22 +++++++++++-----------
 1 file changed, 11 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/60ae7be4/dev-tools/scripts/smokeTestRelease.py
----------------------------------------------------------------------
diff --git a/dev-tools/scripts/smokeTestRelease.py b/dev-tools/scripts/smokeTestRelease.py
index 9316ff4..f68a9b1 100644
--- a/dev-tools/scripts/smokeTestRelease.py
+++ b/dev-tools/scripts/smokeTestRelease.py
@@ -1071,36 +1071,36 @@ def checkIdenticalMavenArtifacts(distFiles, artifacts, version):
                               % (artifact, distFilenames[artifactFilename], project))
 
 def verifyMavenDigests(artifacts):
-  print("    verify Maven artifacts' sha1/sha512 digests...")
+  print("    verify Maven artifacts' md5/sha1 digests...")
   reJarWarPom = re.compile(r'\.(?:[wj]ar|pom)$')
   for project in ('lucene', 'solr'):
     for artifactFile in [a for a in artifacts[project] if reJarWarPom.search(a)]:
+      if artifactFile + '.md5' not in artifacts[project]:
+        raise RuntimeError('missing: MD5 digest for %s' % artifactFile)
       if artifactFile + '.sha1' not in artifacts[project]:
         raise RuntimeError('missing: SHA1 digest for %s' % artifactFile)
-      if artifactFile + '.sha512' not in artifacts[project]:
-        raise RuntimeError('missing: SHA512 digest for %s' % artifactFile)
+      with open(artifactFile + '.md5', encoding='UTF-8') as md5File:
+        md5Expected = md5File.read().strip()
       with open(artifactFile + '.sha1', encoding='UTF-8') as sha1File:
         sha1Expected = sha1File.read().strip()
-      with open(artifactFile + '.sha512', encoding='UTF-8') as sha512File:
-        sha512Expected = sha512File.read().strip()
+      md5 = hashlib.md5()
       sha1 = hashlib.sha1()
-      sha512 = hashlib.sha512()
       inputFile = open(artifactFile, 'rb')
       while True:
         bytes = inputFile.read(65536)
         if len(bytes) == 0:
           break
+        md5.update(bytes)
         sha1.update(bytes)
-        sha512.update(bytes)
       inputFile.close()
+      md5Actual = md5.hexdigest()
       sha1Actual = sha1.hexdigest()
-      sha512Actual = sha512.hexdigest()
+      if md5Actual != md5Expected:
+        raise RuntimeError('MD5 digest mismatch for %s: expected %s but got %s'
+                           % (artifactFile, md5Expected, md5Actual))
       if sha1Actual != sha1Expected:
         raise RuntimeError('SHA1 digest mismatch for %s: expected %s but got %s'
                            % (artifactFile, sha1Expected, sha1Actual))
-      if sha512Actual != sha512Expected:
-        raise RuntimeError('SHA512 digest mismatch for %s: expected %s but got %s'
-                           % (artifactFile, sha512Expected, sha512Actual))
 
 def getPOMcoordinate(treeRoot):
   namespace = '{http://maven.apache.org/POM/4.0.0}'


[49/50] lucene-solr:jira/solr-12181: Merge branch 'master' into jira/solr-12181

Posted by ab...@apache.org.
Merge branch 'master' into jira/solr-12181


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/221f749f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/221f749f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/221f749f

Branch: refs/heads/jira/solr-12181
Commit: 221f749f2d7fa831375e17d52dd7986d59ec98de
Parents: 2f13a21 f83a8da
Author: Andrzej Bialecki <ab...@apache.org>
Authored: Mon Apr 9 16:59:52 2018 +0200
Committer: Andrzej Bialecki <ab...@apache.org>
Committed: Mon Apr 9 16:59:52 2018 +0200

----------------------------------------------------------------------
 dev-tools/doap/lucene.rdf                       |     7 +
 dev-tools/doap/solr.rdf                         |     7 +
 dev-tools/maven/pom.xml.template                |     5 +
 dev-tools/scripts/smokeTestRelease.py           |    22 +-
 lucene/CHANGES.txt                              |    22 +
 .../miscellaneous/WordDelimiterFilter.java      |    18 +-
 .../miscellaneous/WordDelimiterGraphFilter.java |    17 +-
 .../icu/segmentation/TestICUTokenizerCJK.java   |     2 +
 .../index/TestBackwardsCompatibility.java       |     7 +-
 .../org/apache/lucene/index/index.7.3.0-cfs.zip |   Bin 0 -> 15538 bytes
 .../apache/lucene/index/index.7.3.0-nocfs.zip   |   Bin 0 -> 15548 bytes
 .../org/apache/lucene/index/sorted.7.3.0.zip    |   Bin 0 -> 393834 bytes
 .../lucene/index/BufferedUpdatesStream.java     |    12 +-
 .../lucene/index/FrozenBufferedUpdates.java     |     2 +-
 .../org/apache/lucene/index/IndexWriter.java    |    60 +-
 .../apache/lucene/index/IndexWriterConfig.java  |    29 +
 .../lucene/index/LiveIndexWriterConfig.java     |    12 +
 .../org/apache/lucene/index/MergePolicy.java    |     8 +
 .../apache/lucene/index/MergePolicyWrapper.java |     4 +
 .../org/apache/lucene/index/NoMergePolicy.java  |     7 +-
 .../org/apache/lucene/index/PendingDeletes.java |    97 +-
 .../apache/lucene/index/PendingSoftDeletes.java |   164 +
 .../org/apache/lucene/index/ReaderUtil.java     |     2 -
 .../apache/lucene/index/ReadersAndUpdates.java  |    19 +-
 .../SoftDeletesDirectoryReaderWrapper.java      |   177 +
 .../index/SoftDeletesRetentionMergePolicy.java  |   170 +
 .../lucene/index/StandardDirectoryReader.java   |     4 +-
 .../search/DocValuesFieldExistsQuery.java       |    49 +-
 .../src/java/org/apache/lucene/util/Bits.java   |    10 +-
 .../lucene/codecs/TestCodecLoadingDeadlock.java |    10 +-
 .../org/apache/lucene/index/TestAddIndexes.java |    81 +
 .../lucene/index/TestDirectoryReaderReopen.java |    54 +
 .../apache/lucene/index/TestIndexSorting.java   |     7 +-
 .../apache/lucene/index/TestIndexWriter.java    |   189 +-
 .../lucene/index/TestIndexWriterConfig.java     |     1 +
 .../lucene/index/TestIndexWriterOnDiskFull.java |    11 +-
 .../index/TestIndexingSequenceNumbers.java      |     6 +-
 .../apache/lucene/index/TestMultiFields.java    |    11 +-
 .../apache/lucene/index/TestPendingDeletes.java |    10 +-
 .../lucene/index/TestPendingSoftDeletes.java    |   232 +
 .../TestSoftDeletesDirectoryReaderWrapper.java  |   199 +
 .../TestSoftDeletesRetentionMergePolicy.java    |   312 +
 .../org/apache/lucene/index/TestStressNRT.java  |     9 +-
 .../TestApproximationSearchEquivalence.java     |    30 +-
 .../lucene/search/TestSearcherManager.java      |     2 +-
 .../lucene/search/intervals/IntervalFilter.java |     3 +-
 .../intervals/LowpassIntervalsSource.java       |     3 +
 .../idversion/TestIDVersionPostingsFormat.java  |    28 +-
 .../spatial3d/geom/GeoComplexPolygon.java       |   391 +-
 .../lucene/spatial3d/geom/GeoPathFactory.java   |    22 +-
 .../org/apache/lucene/spatial3d/geom/Plane.java |     2 +-
 .../lucene/spatial3d/geom/GeoPathTest.java      |    23 +
 .../lucene/spatial3d/geom/GeoPolygonTest.java   |    74 +
 .../asserting/AssertingLiveDocsFormat.java      |     2 +-
 .../apache/lucene/index/RandomIndexWriter.java  |    84 +-
 solr/CHANGES.txt                                |    15 +-
 .../solr/response/GeoJSONResponseWriter.java    |     3 +-
 .../solr/response/JSONResponseWriter.java       |     6 +-
 .../java/org/apache/solr/update/UpdateLog.java  |     7 +-
 .../configsets/_default/conf/managed-schema     |     3 +
 .../autoscaling/sim/TestNodeAddedTrigger.java   |     4 +
 .../solr/handler/TestReplicationHandler.java    |     8 +-
 .../apache/solr/response/JSONWriterTest.java    |    24 +-
 .../TestSubQueryTransformerDistrib.java         |    59 +-
 .../solr/uninverting/TestDocTermOrds.java       |    12 +-
 .../configsets/_default/conf/managed-schema     |     3 +
 solr/solr-ref-guide/build.xml                   |     2 +
 solr/solr-ref-guide/src/README.md               |     3 -
 solr/solr-ref-guide/src/_config.yml.template    |     2 +
 solr/solr-ref-guide/src/css/customstyles.css    |     2 +-
 solr/solr-ref-guide/src/highlighting.adoc       |     6 +-
 solr/solr-ref-guide/src/language-analysis.adoc  |    12 +-
 solr/solr-ref-guide/src/learning-to-rank.adoc   |     2 +-
 solr/solr-ref-guide/src/meta-docs/pdf.adoc      |     2 +-
 .../src/rule-based-replica-placement.adoc       |     2 +-
 .../src/solr-jdbc-python-jython.adoc            |     4 +-
 solr/solr-ref-guide/src/solr-jdbc-r.adoc        |     2 +-
 solr/solr-ref-guide/src/tokenizers.adoc         |     6 +-
 .../src/updating-parts-of-documents.adoc        |    45 +-
 .../src/upgrading-a-solr-cluster.adoc           |     2 +-
 ...store-data-with-the-data-import-handler.adoc |     6 +-
 solr/solr-ref-guide/src/working-with-dates.adoc |     4 +-
 .../solrj/io/stream/MathExpressionTest.java     |  4145 +++++++
 .../solrj/io/stream/StreamDecoratorTest.java    |  3954 +++++++
 .../solrj/io/stream/StreamExpressionTest.java   | 10308 ++---------------
 85 files changed, 11652 insertions(+), 9730 deletions(-)
----------------------------------------------------------------------



[43/50] lucene-solr:jira/solr-12181: LUCENE-8222: Await-fix TestICUTokenizerCJK.

Posted by ab...@apache.org.
LUCENE-8222: Await-fix TestICUTokenizerCJK.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/6568f3bb
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/6568f3bb
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/6568f3bb

Branch: refs/heads/jira/solr-12181
Commit: 6568f3bb59877b063385e318400055fa1907bbc4
Parents: a49543b
Author: Adrien Grand <jp...@gmail.com>
Authored: Mon Apr 9 09:53:22 2018 +0200
Committer: Adrien Grand <jp...@gmail.com>
Committed: Mon Apr 9 09:53:22 2018 +0200

----------------------------------------------------------------------
 .../lucene/analysis/icu/segmentation/TestICUTokenizerCJK.java      | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6568f3bb/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizerCJK.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizerCJK.java b/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizerCJK.java
index 51fb0e6..74a7ca2 100644
--- a/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizerCJK.java
+++ b/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizerCJK.java
@@ -21,10 +21,12 @@ import java.util.Random;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
 
 /**
  * test ICUTokenizer with dictionary-based CJ segmentation
  */
+@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8222")
 public class TestICUTokenizerCJK extends BaseTokenStreamTestCase {
   Analyzer a;
   


[05/50] lucene-solr:jira/solr-12181: SOLR-12095: Missed a few calls to init().

Posted by ab...@apache.org.
SOLR-12095: Missed a few calls to init().


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/2bbd1936
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/2bbd1936
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/2bbd1936

Branch: refs/heads/jira/solr-12181
Commit: 2bbd19369137d2b31f44c94ce2de61f9047856f4
Parents: ecc17f9
Author: Andrzej Bialecki <ab...@apache.org>
Authored: Wed Apr 4 17:13:24 2018 +0200
Committer: Andrzej Bialecki <ab...@apache.org>
Committed: Wed Apr 4 17:13:24 2018 +0200

----------------------------------------------------------------------
 .../apache/solr/cloud/autoscaling/sim/TestNodeAddedTrigger.java  | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2bbd1936/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestNodeAddedTrigger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestNodeAddedTrigger.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestNodeAddedTrigger.java
index fd816ca..04e8c1d 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestNodeAddedTrigger.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestNodeAddedTrigger.java
@@ -80,6 +80,7 @@ public class TestNodeAddedTrigger extends SimSolrCloudTestCase {
 
     try (NodeAddedTrigger trigger = new NodeAddedTrigger("node_added_trigger")) {
       trigger.configure(cluster.getLoader(), cluster, props);
+      trigger.init();
       trigger.setProcessor(noFirstRunProcessor);
       trigger.run();
 
@@ -250,6 +251,7 @@ public class TestNodeAddedTrigger extends SimSolrCloudTestCase {
     // and assert that the new trigger still fires
     NodeAddedTrigger trigger = new NodeAddedTrigger("node_added_trigger");
     trigger.configure(cluster.getLoader(), cluster, props);
+    trigger.init();
     trigger.setProcessor(noFirstRunProcessor);
     trigger.run();
 
@@ -259,6 +261,7 @@ public class TestNodeAddedTrigger extends SimSolrCloudTestCase {
 
     try (NodeAddedTrigger newTrigger = new NodeAddedTrigger("some_different_name"))  {
       newTrigger.configure(cluster.getLoader(), cluster, props);
+      trigger.init();
       try {
         newTrigger.restoreState(trigger);
         fail("Trigger should only be able to restore state from an old trigger of the same name");
@@ -269,6 +272,7 @@ public class TestNodeAddedTrigger extends SimSolrCloudTestCase {
 
     try (NodeAddedTrigger newTrigger = new NodeAddedTrigger("node_added_trigger"))  {
       newTrigger.configure(cluster.getLoader(), cluster, props);
+      newTrigger.init();
       AtomicBoolean fired = new AtomicBoolean(false);
       AtomicReference<TriggerEvent> eventRef = new AtomicReference<>();
       newTrigger.setProcessor(event -> {


[19/50] lucene-solr:jira/solr-12181: LUCENE-8239: Handle degenerate vector case on linear edge evaluation.

Posted by ab...@apache.org.
LUCENE-8239: Handle degenerate vector case on linear edge evaluation.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/74c2b798
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/74c2b798
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/74c2b798

Branch: refs/heads/jira/solr-12181
Commit: 74c2b798eb5bf02bf161f92c17f94969dba49958
Parents: 27f4772
Author: Karl Wright <Da...@gmail.com>
Authored: Thu Apr 5 13:56:21 2018 -0400
Committer: Karl Wright <Da...@gmail.com>
Committed: Thu Apr 5 13:56:21 2018 -0400

----------------------------------------------------------------------
 .../apache/lucene/spatial3d/geom/GeoComplexPolygon.java  | 11 ++++++++---
 .../org/apache/lucene/spatial3d/geom/GeoPolygonTest.java |  1 -
 2 files changed, 8 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/74c2b798/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java
index d11fb79..f64755c 100644
--- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java
+++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java
@@ -195,7 +195,7 @@ class GeoComplexPolygon extends GeoBasePolygon {
     }
     
     // If we're right on top of any of the test planes, we navigate solely on that plane.
-    if (testPointFixedYPlane.evaluateIsZero(x, y, z)) {
+    if (testPointFixedYAbovePlane != null && testPointFixedYBelowPlane != null && testPointFixedYPlane.evaluateIsZero(x, y, z)) {
       // Use the XZ plane exclusively.
       final CountingEdgeIterator crossingEdgeIterator = createLinearCrossingEdgeIterator(testPointFixedYPlane, testPointFixedYAbovePlane, testPointFixedYBelowPlane, x, y, z);
       // Traverse our way from the test point to the check point.  Use the y tree because that's fixed.
@@ -204,7 +204,7 @@ class GeoComplexPolygon extends GeoBasePolygon {
         return true;
       }
       return ((crossingEdgeIterator.getCrossingCount() & 1) == 0)?testPointInSet:!testPointInSet;
-    } else if (testPointFixedXPlane.evaluateIsZero(x, y, z)) {
+    } else if (testPointFixedXAbovePlane != null && testPointFixedXBelowPlane != null && testPointFixedXPlane.evaluateIsZero(x, y, z)) {
       // Use the YZ plane exclusively.
       final CountingEdgeIterator crossingEdgeIterator = createLinearCrossingEdgeIterator(testPointFixedXPlane, testPointFixedXAbovePlane, testPointFixedXBelowPlane, x, y, z);
       // Traverse our way from the test point to the check point.  Use the x tree because that's fixed.
@@ -213,7 +213,7 @@ class GeoComplexPolygon extends GeoBasePolygon {
         return true;
       }
       return ((crossingEdgeIterator.getCrossingCount() & 1) == 0)?testPointInSet:!testPointInSet;
-    } else if (testPointFixedZPlane.evaluateIsZero(x, y, z)) {
+    } else if (testPointFixedZAbovePlane != null && testPointFixedZBelowPlane != null && testPointFixedZPlane.evaluateIsZero(x, y, z)) {
       final CountingEdgeIterator crossingEdgeIterator = createLinearCrossingEdgeIterator(testPointFixedZPlane, testPointFixedZAbovePlane, testPointFixedZBelowPlane, x, y, z);
       // Traverse our way from the test point to the check point.  Use the z tree because that's fixed.
       if (!zTree.traverse(crossingEdgeIterator, testPoint.z)) {
@@ -221,6 +221,8 @@ class GeoComplexPolygon extends GeoBasePolygon {
         return true;
       }
       return ((crossingEdgeIterator.getCrossingCount() & 1) == 0)?testPointInSet:!testPointInSet;
+    } else if (testPointFixedYPlane.evaluateIsZero(x, y, z) || testPointFixedXPlane.evaluateIsZero(x, y, z) || testPointFixedZPlane.evaluateIsZero(x, y, z)) {
+      throw new IllegalArgumentException("Can't compute isWithin for specified point");
     } else {
 
       // This is the expensive part!!
@@ -903,6 +905,9 @@ class GeoComplexPolygon extends GeoBasePolygon {
       this.plane = plane;
       this.abovePlane = abovePlane;
       this.belowPlane = belowPlane;
+      if (plane.isNumericallyIdentical(testPoint)) {
+        throw new IllegalArgumentException("Plane vector identical to testpoint vector");
+      }
       // It doesn't matter which 1/2 of the world we choose, but we must choose only one.
       this.bound = new SidedPlane(plane, testPoint);
       this.thePointX = thePointX;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/74c2b798/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
index b5c18cf..ee15ec4 100755
--- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
+++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
@@ -1450,7 +1450,6 @@ shape:
     assertTrue(polygon.isWithin(point1) == largePolygon.isWithin(point1));
   }
 
-  @Ignore
   @Test
   public void testComplexPolygonDegeneratedVector() {
     List<GeoPoint> points = new ArrayList<>();


[48/50] lucene-solr:jira/solr-12181: SOLR-12096: Removing redundant patch file

Posted by ab...@apache.org.
SOLR-12096: Removing redundant patch file


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/f83a8da0
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/f83a8da0
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/f83a8da0

Branch: refs/heads/jira/solr-12181
Commit: f83a8da05e298395d4b65996d140da0a3343b2d9
Parents: ea08bd3
Author: Ishan Chattopadhyaya <is...@apache.org>
Authored: Mon Apr 9 17:43:29 2018 +0530
Committer: Ishan Chattopadhyaya <is...@apache.org>
Committed: Mon Apr 9 17:43:29 2018 +0530

----------------------------------------------------------------------
 SOLR-12096.patch | 217 --------------------------------------------------
 1 file changed, 217 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f83a8da0/SOLR-12096.patch
----------------------------------------------------------------------
diff --git a/SOLR-12096.patch b/SOLR-12096.patch
deleted file mode 100644
index 9ed1ad7..0000000
--- a/SOLR-12096.patch
+++ /dev/null
@@ -1,217 +0,0 @@
-diff --git a/solr/core/src/java/org/apache/solr/response/GeoJSONResponseWriter.java b/solr/core/src/java/org/apache/solr/response/GeoJSONResponseWriter.java
-index 43fd7b4..012290e 100644
---- a/solr/core/src/java/org/apache/solr/response/GeoJSONResponseWriter.java
-+++ b/solr/core/src/java/org/apache/solr/response/GeoJSONResponseWriter.java
-@@ -166,7 +166,8 @@ class GeoJSONWriter extends JSONWriter {
- 
-       // SolrDocument will now have multiValued fields represented as a Collection,
-       // even if only a single value is returned for this document.
--      if (val instanceof List) {
-+      // For SolrDocumentList, use writeVal instead of writeArray
-+      if (!(val instanceof SolrDocumentList) && val instanceof List) {
-         // shortcut this common case instead of going through writeVal again
-         writeArray(name,((Iterable)val).iterator());
-       } else {
-diff --git a/solr/core/src/java/org/apache/solr/response/JSONResponseWriter.java b/solr/core/src/java/org/apache/solr/response/JSONResponseWriter.java
-index 513df4e..5f6e2f2 100644
---- a/solr/core/src/java/org/apache/solr/response/JSONResponseWriter.java
-+++ b/solr/core/src/java/org/apache/solr/response/JSONResponseWriter.java
-@@ -25,10 +25,11 @@ import java.util.Map;
- import java.util.Set;
- 
- import org.apache.solr.common.IteratorWriter;
-+import org.apache.solr.common.MapWriter;
- import org.apache.solr.common.MapWriter.EntryWriter;
- import org.apache.solr.common.PushWriter;
- import org.apache.solr.common.SolrDocument;
--import org.apache.solr.common.MapWriter;
-+import org.apache.solr.common.SolrDocumentList;
- import org.apache.solr.common.params.SolrParams;
- import org.apache.solr.common.util.NamedList;
- import org.apache.solr.common.util.SimpleOrderedMap;
-@@ -367,7 +368,8 @@ class JSONWriter extends TextResponseWriter {
- 
-       // SolrDocument will now have multiValued fields represented as a Collection,
-       // even if only a single value is returned for this document.
--      if (val instanceof List) {
-+      // For SolrDocumentList, use writeVal instead of writeArray
-+      if (!(val instanceof SolrDocumentList) && val instanceof List) {
-         // shortcut this common case instead of going through writeVal again
-         writeArray(name,((Iterable)val).iterator());
-       } else {
-diff --git a/solr/core/src/test/org/apache/solr/response/JSONWriterTest.java b/solr/core/src/test/org/apache/solr/response/JSONWriterTest.java
-index 1b53150..68cebd2 100644
---- a/solr/core/src/test/org/apache/solr/response/JSONWriterTest.java
-+++ b/solr/core/src/test/org/apache/solr/response/JSONWriterTest.java
-@@ -22,7 +22,10 @@ import java.lang.reflect.Method;
- import java.lang.reflect.Modifier;
- import java.nio.charset.StandardCharsets;
- import java.util.ArrayList;
-+import java.util.Arrays;
- import java.util.List;
-+
-+import org.apache.solr.JSONTestUtil;
- import org.apache.solr.SolrTestCaseJ4;
- import org.apache.solr.common.SolrDocument;
- import org.apache.solr.common.SolrDocumentList;
-@@ -130,9 +133,9 @@ public class JSONWriterTest extends SolrTestCaseJ4 {
-   }
- 
-   @Test
--  public void testJSONSolrDocument() throws IOException {
-+  public void testJSONSolrDocument() throws Exception {
-     SolrQueryRequest req = req(CommonParams.WT,"json",
--                               CommonParams.FL,"id,score");
-+                               CommonParams.FL,"id,score,_children_,path");
-     SolrQueryResponse rsp = new SolrQueryResponse();
-     JSONResponseWriter w = new JSONResponseWriter();
- 
-@@ -141,11 +144,22 @@ public class JSONWriterTest extends SolrTestCaseJ4 {
- 
-     StringWriter buf = new StringWriter();
- 
-+    SolrDocument childDoc = new SolrDocument();
-+    childDoc.addField("id", "2");
-+    childDoc.addField("score", "0.4");
-+    childDoc.addField("path", Arrays.asList("a>b", "a>b>c"));
-+
-+    SolrDocumentList childList = new SolrDocumentList();
-+    childList.setNumFound(1);
-+    childList.setStart(0);
-+    childList.add(childDoc);
-+
-     SolrDocument solrDoc = new SolrDocument();
-     solrDoc.addField("id", "1");
-     solrDoc.addField("subject", "hello2");
-     solrDoc.addField("title", "hello3");
-     solrDoc.addField("score", "0.7");
-+    solrDoc.setField("_children_", childList);
- 
-     SolrDocumentList list = new SolrDocumentList();
-     list.setNumFound(1);
-@@ -163,8 +177,12 @@ public class JSONWriterTest extends SolrTestCaseJ4 {
-                 result.contains("\"title\""));
-     assertTrue("response doesn't contain expected fields: " + result, 
-                result.contains("\"id\"") &&
--               result.contains("\"score\""));
-+               result.contains("\"score\"") && result.contains("_children_"));
- 
-+    String expectedResult = "{'response':{'numFound':1,'start':0,'maxScore':0.7,'docs':[{'id':'1', 'score':'0.7'," +
-+        " '_children_':{'numFound':1,'start':0,'docs':[{'id':'2', 'score':'0.4', 'path':['a>b', 'a>b>c']}] }}] }}";
-+    String error = JSONTestUtil.match(result, "=="+expectedResult);
-+    assertNull("response validation failed with error: " + error, error);
- 
-     req.close();
-   }
-diff --git a/solr/core/src/test/org/apache/solr/response/transform/TestSubQueryTransformerDistrib.java b/solr/core/src/test/org/apache/solr/response/transform/TestSubQueryTransformerDistrib.java
-index 620cac0..f6d0a38 100644
---- a/solr/core/src/test/org/apache/solr/response/transform/TestSubQueryTransformerDistrib.java
-+++ b/solr/core/src/test/org/apache/solr/response/transform/TestSubQueryTransformerDistrib.java
-@@ -16,7 +16,11 @@
-  */
- package org.apache.solr.response.transform;
- 
-+import java.io.ByteArrayOutputStream;
- import java.io.IOException;
-+import java.io.InputStream;
-+import java.net.URL;
-+import java.nio.charset.Charset;
- import java.nio.file.Path;
- import java.nio.file.Paths;
- import java.util.ArrayList;
-@@ -26,6 +30,8 @@ import java.util.List;
- import java.util.Map;
- import java.util.Random;
- 
-+import org.apache.commons.io.IOUtils;
-+import org.apache.solr.JSONTestUtil;
- import org.apache.solr.client.solrj.SolrServerException;
- import org.apache.solr.client.solrj.impl.CloudSolrClient;
- import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-@@ -37,10 +43,12 @@ import org.apache.solr.cloud.SolrCloudTestCase;
- import org.apache.solr.common.SolrDocument;
- import org.apache.solr.common.SolrDocumentList;
- import org.apache.solr.common.cloud.ZkStateReader;
-+import org.apache.solr.common.params.ModifiableSolrParams;
- import org.apache.solr.common.util.ContentStreamBase;
- import org.junit.BeforeClass;
- import org.junit.Test;
- 
-+@org.apache.solr.SolrTestCaseJ4.SuppressSSL()
- public class TestSubQueryTransformerDistrib extends SolrCloudTestCase {
-   
-   private static final String support = "These guys help customers";
-@@ -92,7 +100,7 @@ public class TestSubQueryTransformerDistrib extends SolrCloudTestCase {
-   
-   @SuppressWarnings("serial")
-   @Test
--  public void test() throws SolrServerException, IOException {
-+  public void test() throws Exception {
-     int peopleMultiplier = atLeast(1);
-     int deptMultiplier = atLeast(1);
-     
-@@ -100,24 +108,26 @@ public class TestSubQueryTransformerDistrib extends SolrCloudTestCase {
-     
-     Random random1 = random();
-     
-+    final ModifiableSolrParams params = params(
-+        new String[]{"q","name_s:dave", "indent","true",
-+            "fl","*,depts:[subquery "+((random1.nextBoolean() ? "" : "separator=,"))+"]",
-+            "rows","" + peopleMultiplier,
-+            "depts.q","{!terms f=dept_id_s v=$row.dept_ss_dv "+((random1.nextBoolean() ? "" : "separator=,"))+"}",
-+            "depts.fl","text_t"+(differentUniqueId?",id:notid":""),
-+            "depts.indent","true",
-+            "depts.collection","departments",
-+            differentUniqueId ? "depts.distrib.singlePass":"notnecessary","true",
-+            "depts.rows",""+(deptMultiplier*2),
-+            "depts.logParamsList","q,fl,rows,row.dept_ss_dv",
-+            random().nextBoolean()?"depts.wt":"whatever",anyWt(),
-+            random().nextBoolean()?"wt":"whatever",anyWt()});
-+
-+    final SolrDocumentList hits;
-     {
--     
--      final QueryRequest  qr = new QueryRequest(params(
--          new String[]{"q","name_s:dave", "indent","true",
--          "fl","*,depts:[subquery "+((random1.nextBoolean() ? "" : "separator=,"))+"]", 
--          "rows","" + peopleMultiplier,
--          "depts.q","{!terms f=dept_id_s v=$row.dept_ss_dv "+((random1.nextBoolean() ? "" : "separator=,"))+"}", 
--          "depts.fl","text_t"+(differentUniqueId?",id:notid":""),
--          "depts.indent","true",
--          "depts.collection","departments",
--          differentUniqueId ? "depts.distrib.singlePass":"notnecessary","true",
--          "depts.rows",""+(deptMultiplier*2),
--          "depts.logParamsList","q,fl,rows,row.dept_ss_dv",
--          random().nextBoolean()?"depts.wt":"whatever",anyWt(),
--          random().nextBoolean()?"wt":"whatever",anyWt()}));
-+      final QueryRequest qr = new QueryRequest(params);
-       final QueryResponse  rsp = new QueryResponse();
--      rsp.setResponse(cluster.getSolrClient().request(qr, people));
--      final SolrDocumentList hits = rsp.getResults();
-+      rsp.setResponse(cluster.getSolrClient().request(qr, people+","+depts));
-+      hits = rsp.getResults();
-       
-       assertEquals(peopleMultiplier, hits.getNumFound());
-       
-@@ -140,6 +150,21 @@ public class TestSubQueryTransformerDistrib extends SolrCloudTestCase {
-       }
-       assertEquals(hits.toString(), engineerCount, supportCount); 
-     }
-+
-+    params.set("wt", "json");
-+    final URL node = new URL(cluster.getRandomJetty(random()).getBaseUrl().toString()
-+     +"/"+people+"/select"+params.toQueryString());
-+
-+    try(final InputStream jsonResponse = node.openStream()){
-+      final ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
-+      IOUtils.copy(jsonResponse, outBuffer);
-+
-+      final Object expected = ((SolrDocumentList) hits.get(0).getFieldValue("depts")).get(0).get("text_t");
-+      final String err = JSONTestUtil.match("/response/docs/[0]/depts/docs/[0]/text_t"
-+          ,outBuffer.toString(Charset.forName("UTF-8").toString()),
-+          "\""+expected+"\"");
-+      assertNull(err,err);
-+    }
-     
-   }
- 


[35/50] lucene-solr:jira/solr-12181: LUCENE-8238: improve javadocs for WordDelimiterFilter and WordDelimiterGraphFilter

Posted by ab...@apache.org.
LUCENE-8238: improve javadocs for WordDelimiterFilter and WordDelimiterGraphFilter


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/0f53adbe
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/0f53adbe
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/0f53adbe

Branch: refs/heads/jira/solr-12181
Commit: 0f53adbee49015aa01e8f66945f82e88a9172c7c
Parents: 5c37b07
Author: Mike McCandless <mi...@apache.org>
Authored: Fri Apr 6 15:20:22 2018 -0400
Committer: Mike McCandless <mi...@apache.org>
Committed: Fri Apr 6 15:20:22 2018 -0400

----------------------------------------------------------------------
 lucene/CHANGES.txt                                |  5 +++++
 .../miscellaneous/WordDelimiterFilter.java        | 18 ++++++++++++------
 .../miscellaneous/WordDelimiterGraphFilter.java   | 17 +++++++++++------
 3 files changed, 28 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0f53adbe/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 84e242d..f90f9e3 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -153,6 +153,11 @@ Build
 
 * LUCENE-8230: Upgrade forbiddenapis to version 2.5.  (Uwe Schindler)
 
+Documentation
+
+* LUCENE-8238: Improve WordDelimiterFilter and WordDelimiterGraphFilter javadocs
+xo  (Mike Sokolov via Mike McCandless)
+
 ======================= Lucene 7.3.0 =======================
 
 API Changes

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0f53adbe/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java
index aef697c..313386b 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java
@@ -55,11 +55,14 @@ import org.apache.lucene.util.InPlaceMergeSorter;
  * </li>
  * </ul>
  * 
- * The <b>combinations</b> parameter affects how subwords are combined:
+ * The <b>GENERATE...</b> options affect how incoming tokens are broken into parts, and the
+ * various <b>CATENATE_...</b> parameters affect how those parts are combined.
+ *
  * <ul>
- * <li>combinations="0" causes no subword combinations: <code>"PowerShot"</code>
- * &#8594; <code>0:"Power", 1:"Shot"</code> (0 and 1 are the token positions)</li>
- * <li>combinations="1" means that in addition to the subwords, maximum runs of
+ * <li>If no CATENATE option is set, then no subword combinations are generated:
+ * <code>"PowerShot"</code> &#8594; <code>0:"Power", 1:"Shot"</code> (0 and 1 are the token
+ * positions)</li>
+ * <li>CATENATE_WORDS means that in addition to the subwords, maximum runs of
  * non-numeric subwords are catenated and produced at the same position of the
  * last subword in the run:
  * <ul>
@@ -72,12 +75,15 @@ import org.apache.lucene.util.InPlaceMergeSorter;
  * </li>
  * </ul>
  * </li>
+ * <li>CATENATE_NUMBERS works like CATENATE_WORDS, but for adjacent digit sequences.</li>
+ * <li>CATENATE_ALL smushes together all the token parts without distinguishing numbers and words.</li>
  * </ul>
+ *
  * One use for {@link WordDelimiterFilter} is to help match words with different
  * subword delimiters. For example, if the source text contained "wi-fi" one may
  * want "wifi" "WiFi" "wi-fi" "wi+fi" queries to all match. One way of doing so
- * is to specify combinations="1" in the analyzer used for indexing, and
- * combinations="0" (the default) in the analyzer used for querying. Given that
+ * is to specify CATENATE options in the analyzer used for indexing, and
+ * not in the analyzer used for querying. Given that
  * the current {@link StandardTokenizer} immediately removes many intra-word
  * delimiters, it is recommended that this filter be used after a tokenizer that
  * does not do this (such as {@link WhitespaceTokenizer}).

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0f53adbe/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilter.java
index a6ade19..7949fa2 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilter.java
@@ -62,11 +62,14 @@ import org.apache.lucene.util.RamUsageEstimator;
  * </li>
  * </ul>
  * 
- * The <b>combinations</b> parameter affects how subwords are combined:
+ * The <b>GENERATE...</b> options affect how incoming tokens are broken into parts, and the
+ * various <b>CATENATE_...</b> parameters affect how those parts are combined.
+ *
  * <ul>
- * <li>combinations="0" causes no subword combinations: <code>"PowerShot"</code>
- * &#8594; <code>0:"Power", 1:"Shot"</code> (0 and 1 are the token positions)</li>
- * <li>combinations="1" means that in addition to the subwords, maximum runs of
+ * <li>If no CATENATE option is set, then no subword combinations are generated:
+ * <code>"PowerShot"</code> &#8594; <code>0:"Power", 1:"Shot"</code> (0 and 1 are the token
+ * positions)</li>
+ * <li>CATENATE_WORDS means that in addition to the subwords, maximum runs of
  * non-numeric subwords are catenated and produced at the same position of the
  * last subword in the run:
  * <ul>
@@ -79,12 +82,14 @@ import org.apache.lucene.util.RamUsageEstimator;
  * </li>
  * </ul>
  * </li>
+ * <li>CATENATE_NUMBERS works like CATENATE_WORDS, but for adjacent digit sequences.</li>
+ * <li>CATENATE_ALL smushes together all the token parts without distinguishing numbers and words.</li>
  * </ul>
  * One use for {@link WordDelimiterGraphFilter} is to help match words with different
  * subword delimiters. For example, if the source text contained "wi-fi" one may
  * want "wifi" "WiFi" "wi-fi" "wi+fi" queries to all match. One way of doing so
- * is to specify combinations="1" in the analyzer used for indexing, and
- * combinations="0" (the default) in the analyzer used for querying. Given that
+ * is to specify CATENATE options in the analyzer used for indexing, and not
+ * in the analyzer used for querying. Given that
  * the current {@link StandardTokenizer} immediately removes many intra-word
  * delimiters, it is recommended that this filter be used after a tokenizer that
  * does not do this (such as {@link WhitespaceTokenizer}).


[31/50] lucene-solr:jira/solr-12181: Use asciidoctor's 'attribute-missing: warn' option and fix some places that were generating warnings because of unescaped/uninteded attribute syntax

Posted by ab...@apache.org.
Use asciidoctor's 'attribute-missing: warn' option and fix some places that were generating warnings because of unescaped/uninteded attribute syntax


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/73d74107
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/73d74107
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/73d74107

Branch: refs/heads/jira/solr-12181
Commit: 73d74107dcb2d836c541654e4bf99dc2e306cf75
Parents: aba793d
Author: Chris Hostetter <ho...@apache.org>
Authored: Thu Apr 5 17:16:00 2018 -0700
Committer: Chris Hostetter <ho...@apache.org>
Committed: Thu Apr 5 17:16:00 2018 -0700

----------------------------------------------------------------------
 solr/solr-ref-guide/build.xml                                      | 1 +
 solr/solr-ref-guide/src/_config.yml.template                       | 1 +
 solr/solr-ref-guide/src/learning-to-rank.adoc                      | 2 +-
 solr/solr-ref-guide/src/meta-docs/pdf.adoc                         | 2 +-
 solr/solr-ref-guide/src/rule-based-replica-placement.adoc          | 2 +-
 ...ng-structured-data-store-data-with-the-data-import-handler.adoc | 2 +-
 6 files changed, 6 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/73d74107/solr/solr-ref-guide/build.xml
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/build.xml b/solr/solr-ref-guide/build.xml
index 92c236a..08fbc40 100644
--- a/solr/solr-ref-guide/build.xml
+++ b/solr/solr-ref-guide/build.xml
@@ -213,6 +213,7 @@
                    imagesDir="${build.content.dir}"
                    doctype="book"
                    safemode="unsafe">
+        <attribute key="attribute-missing" value="warn" />
         <attribute key="section-toc" value='' /><!-- we don't use these in the pdf -->
         <attribute key="icons" value="font" />
         <attribute key="icon-set" value="fa" />

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/73d74107/solr/solr-ref-guide/src/_config.yml.template
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/_config.yml.template b/solr/solr-ref-guide/src/_config.yml.template
index 1c55dc4..f50ae1e 100755
--- a/solr/solr-ref-guide/src/_config.yml.template
+++ b/solr/solr-ref-guide/src/_config.yml.template
@@ -91,6 +91,7 @@ asciidoctor:
   safe: 0
   attributes:
     <<: *solr-attributes-ref
+    attribute-missing: "warn"
     icons: "font"
     source-highlighter: "pygments"
     pygments-css: "style"

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/73d74107/solr/solr-ref-guide/src/learning-to-rank.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/learning-to-rank.adoc b/solr/solr-ref-guide/src/learning-to-rank.adoc
index f98f049..4e79a7a 100644
--- a/solr/solr-ref-guide/src/learning-to-rank.adoc
+++ b/solr/solr-ref-guide/src/learning-to-rank.adoc
@@ -61,7 +61,7 @@ The LTR contrib module includes several feature classes as well as support for c
 |solr query |{solr-javadocs}/solr-ltr/org/apache/solr/ltr/feature/SolrFeature.html[SolrFeature] |`{"q":"{!func}` `recip(ms(NOW,last_modified)` `,3.16e-11,1,1)"}` |supported
 |solr filter query |{solr-javadocs}/solr-ltr/org/apache/solr/ltr/feature/SolrFeature.html[SolrFeature] |`{"fq":["{!terms f=category}book"]}` |supported
 |solr query + filter query |{solr-javadocs}/solr-ltr/org/apache/solr/ltr/feature/SolrFeature.html[SolrFeature] |`{"q":"{!func}` `recip(ms(NOW,last_modified),` `3.16e-11,1,1)",` `"fq":["{!terms f=category}book"]}` |supported
-|value |{solr-javadocs}/solr-ltr/org/apache/solr/ltr/feature/ValueFeature.html[ValueFeature] |`{"value":"${userFromMobile}","required":true}` |supported
+|value |{solr-javadocs}/solr-ltr/org/apache/solr/ltr/feature/ValueFeature.html[ValueFeature] |`{"value":"$\{userFromMobile}","required":true}` |supported
 |(custom) |(custom class extending {solr-javadocs}/solr-ltr/org/apache/solr/ltr/feature/Feature.html[Feature]) | |
 |===
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/73d74107/solr/solr-ref-guide/src/meta-docs/pdf.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/meta-docs/pdf.adoc b/solr/solr-ref-guide/src/meta-docs/pdf.adoc
index 9c5cc7f..b7d4c62 100644
--- a/solr/solr-ref-guide/src/meta-docs/pdf.adoc
+++ b/solr/solr-ref-guide/src/meta-docs/pdf.adoc
@@ -141,5 +141,5 @@ These attributes use variables that are inserted by Ant during the PDF creation
 `<attribute key="solr-docs-version" value="${solr-docs-version}" />`:: The version of Solr covered by this guide.
 `<attribute key="solr-javadocs" value="${solr-javadocs}" />`:: Sets the path for Solr javadoc links to include the right path for the current release version.
 `<attribute key="lucene-javadocs" value="${lucene-javadocs}" />`:: Sets the path for Lucene javadoc links to the right path for the current release version.
-`<attribute key="build-date" value="${DSTAMP}" />`:: Sets the date of the build to add the date to the footer of each page of the PDF.
+`<attribute key="build-date" value="$\{DSTAMP}" />`:: Sets the date of the build to add the date to the footer of each page of the PDF.
 `<attribute key="build-year" value="${current.year}" />`:: Sets the year of the build to add the date to the copyright notice.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/73d74107/solr/solr-ref-guide/src/rule-based-replica-placement.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/rule-based-replica-placement.adoc b/solr/solr-ref-guide/src/rule-based-replica-placement.adoc
index 7c0953b..66b1d45 100644
--- a/solr/solr-ref-guide/src/rule-based-replica-placement.adoc
+++ b/solr/solr-ref-guide/src/rule-based-replica-placement.adoc
@@ -84,7 +84,7 @@ Tag values come from a plugin called Snitch. If there is a tag named ‘rack’
 * *node*: node name
 * *role*: The role of the node. The only supported role is 'overseer'
 * *ip_1, ip_2, ip_3, ip_4*: These are ip fragments for each node. For example, in a host with ip `192.168.1.2`, `ip_1 = 2`, `ip_2 =1`, `ip_3 = 168` and` ip_4 = 192`
-* *sysprop.{PROPERTY_NAME}*: These are values available from system properties. `sysprop.key` means a value that is passed to the node as `-Dkey=keyValue` during the node startup. It is possible to use rules like `sysprop.key:expectedVal,shard:*`
+* *sysprop.\{PROPERTY_NAME}*: These are values available from system properties. `sysprop.key` means a value that is passed to the node as `-Dkey=keyValue` during the node startup. It is possible to use rules like `sysprop.key:expectedVal,shard:*`
 
 === How Snitches are Configured
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/73d74107/solr/solr-ref-guide/src/uploading-structured-data-store-data-with-the-data-import-handler.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/uploading-structured-data-store-data-with-the-data-import-handler.adoc b/solr/solr-ref-guide/src/uploading-structured-data-store-data-with-the-data-import-handler.adoc
index fdcfe5a..7cf50ee 100644
--- a/solr/solr-ref-guide/src/uploading-structured-data-store-data-with-the-data-import-handler.adoc
+++ b/solr/solr-ref-guide/src/uploading-structured-data-store-data-with-the-data-import-handler.adoc
@@ -306,7 +306,7 @@ For MySQL driver, which doesn't honor fetchSize and pulls whole resultSet, which
 +
 In this case, set `batchSize=-1` that pass setFetchSize(Integer.MIN_VALUE), and switch result set to pull row by row
 
-All of them substitute properties via `${placeholders}`.
+All of them substitute properties via `$\{placeholders}`.
 
 === URLDataSource
 


[07/50] lucene-solr:jira/solr-12181: Add 7.3.0 release to DOAP files

Posted by ab...@apache.org.
Add 7.3.0 release to DOAP files


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/3e17933a
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/3e17933a
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/3e17933a

Branch: refs/heads/jira/solr-12181
Commit: 3e17933ad853dc7340b9f6cfa3ff8d6b1052c14e
Parents: 8e276b9
Author: Alan Woodward <ro...@apache.org>
Authored: Wed Apr 4 17:59:48 2018 +0100
Committer: Alan Woodward <ro...@apache.org>
Committed: Wed Apr 4 17:59:48 2018 +0100

----------------------------------------------------------------------
 dev-tools/doap/lucene.rdf | 7 +++++++
 dev-tools/doap/solr.rdf   | 7 +++++++
 2 files changed, 14 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3e17933a/dev-tools/doap/lucene.rdf
----------------------------------------------------------------------
diff --git a/dev-tools/doap/lucene.rdf b/dev-tools/doap/lucene.rdf
index c33666d..d5f8893 100644
--- a/dev-tools/doap/lucene.rdf
+++ b/dev-tools/doap/lucene.rdf
@@ -69,6 +69,13 @@
     <!-- NOTE: please insert releases in numeric order, NOT chronologically. -->
     <release>
       <Version>
+        <name>lucene-7.3.0</name>
+        <created>2018-04-04</created>
+        <revision>7.3.0</revision>
+      </Version>
+    </release>
+    <release>
+      <Version>
         <name>lucene-7.2.1</name>
         <created>2018-01-15</created>
         <revision>7.2.1</revision>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3e17933a/dev-tools/doap/solr.rdf
----------------------------------------------------------------------
diff --git a/dev-tools/doap/solr.rdf b/dev-tools/doap/solr.rdf
index 09bdb6b..fb23089 100644
--- a/dev-tools/doap/solr.rdf
+++ b/dev-tools/doap/solr.rdf
@@ -69,6 +69,13 @@
     <!-- NOTE: please insert releases in numeric order, NOT chronologically. -->
     <release>
       <Version>
+        <name>solr-7.3.0</name>
+        <created>2018-04-04</created>
+        <revision>7.3.0</revision>
+      </Version>
+    </release>
+    <release>
+      <Version>
         <name>solr-7.2.1</name>
         <created>2018-01-15</created>
         <revision>7.2.1</revision>


[50/50] lucene-solr:jira/solr-12181: SOLR-12181: Support mixed bytes / docs bounds.

Posted by ab...@apache.org.
SOLR-12181: Support mixed bytes / docs bounds.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/751987d5
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/751987d5
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/751987d5

Branch: refs/heads/jira/solr-12181
Commit: 751987d53df34884c71d8ba8e1a76f9e48290f1c
Parents: 221f749
Author: Andrzej Bialecki <ab...@apache.org>
Authored: Tue Apr 10 16:11:07 2018 +0200
Committer: Andrzej Bialecki <ab...@apache.org>
Committed: Tue Apr 10 16:11:07 2018 +0200

----------------------------------------------------------------------
 .../cloud/autoscaling/IndexSizeTrigger.java     | 121 ++++++++++++-------
 .../cloud/autoscaling/SearchRateTrigger.java    |   5 +-
 .../org/apache/solr/cloud/CloudTestUtils.java   |  10 +-
 .../cloud/autoscaling/IndexSizeTriggerTest.java |  19 +--
 .../ScheduledMaintenanceTriggerTest.java        |   2 +-
 .../sim/SimClusterStateProvider.java            |  37 ++++--
 .../autoscaling/sim/SimSolrCloudTestCase.java   |   6 -
 .../cloud/autoscaling/sim/TestLargeCluster.java |  14 ++-
 .../cloud/autoscaling/SplitShardSuggester.java  |   2 +-
 9 files changed, 130 insertions(+), 86 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/751987d5/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
index 7bfda9a..725da62 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
@@ -20,12 +20,10 @@ package org.apache.solr.cloud.autoscaling;
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
-import java.util.Locale;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
@@ -55,64 +53,85 @@ import org.slf4j.LoggerFactory;
 public class IndexSizeTrigger extends TriggerBase {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-  public static final String ABOVE_PROP = "above";
+  public static final String ABOVE_BYTES_PROP = "aboveBytes";
+  public static final String ABOVE_DOCS_PROP = "aboveDocs";
   public static final String ABOVE_OP_PROP = "aboveOp";
-  public static final String BELOW_PROP = "below";
+  public static final String BELOW_BYTES_PROP = "belowBytes";
+  public static final String BELOW_DOCS_PROP = "belowDocs";
   public static final String BELOW_OP_PROP = "belowOp";
-  public static final String UNIT_PROP = "unit";
   public static final String COLLECTIONS_PROP = "collections";
 
-  public static final String SIZE_PROP = "__indexSize__";
+  public static final String BYTES_SIZE_PROP = "__bytesSize__";
+  public static final String DOCS_SIZE_PROP = "__docsSize__";
   public static final String ABOVE_SIZE_PROP = "aboveSize";
   public static final String BELOW_SIZE_PROP = "belowSize";
 
   public enum Unit { bytes, docs }
 
-  private long above, below;
+  private long aboveBytes, aboveDocs, belowBytes, belowDocs;
   private CollectionParams.CollectionAction aboveOp, belowOp;
-  private Unit unit;
   private final Set<String> collections = new HashSet<>();
   private final Map<String, Long> lastEventMap = new ConcurrentHashMap<>();
 
   public IndexSizeTrigger(String name) {
     super(TriggerEventType.INDEXSIZE, name);
     TriggerUtils.validProperties(validProperties,
-        ABOVE_PROP, BELOW_PROP, UNIT_PROP, COLLECTIONS_PROP);
+        ABOVE_BYTES_PROP, ABOVE_DOCS_PROP, BELOW_BYTES_PROP, BELOW_DOCS_PROP, COLLECTIONS_PROP);
   }
 
   @Override
   public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, Map<String, Object> properties) throws TriggerValidationException {
     super.configure(loader, cloudManager, properties);
-    String unitStr = String.valueOf(properties.getOrDefault(UNIT_PROP, Unit.bytes));
+    String aboveStr = String.valueOf(properties.getOrDefault(ABOVE_BYTES_PROP, Long.MAX_VALUE));
+    String belowStr = String.valueOf(properties.getOrDefault(BELOW_BYTES_PROP, -1));
     try {
-      unit = Unit.valueOf(unitStr.toLowerCase(Locale.ROOT));
+      aboveBytes = Long.parseLong(aboveStr);
+      if (aboveBytes <= 0) {
+        throw new Exception("value must be > 0");
+      }
     } catch (Exception e) {
-      throw new TriggerValidationException(getName(), UNIT_PROP, "invalid unit, must be one of " + Arrays.toString(Unit.values()));
+      throw new TriggerValidationException(getName(), ABOVE_BYTES_PROP, "invalid value '" + aboveStr + "': " + e.toString());
+    }
+    try {
+      belowBytes = Long.parseLong(belowStr);
+      if (belowBytes < 0) {
+        belowBytes = -1;
+      }
+    } catch (Exception e) {
+      throw new TriggerValidationException(getName(), BELOW_BYTES_PROP, "invalid value '" + belowStr + "': " + e.toString());
+    }
+    // below must be at least 2x smaller than above, otherwise splitting a shard
+    // would immediately put the shard below the threshold and cause the mergeshards action
+    if (belowBytes > 0 && (belowBytes * 2 > aboveBytes)) {
+      throw new TriggerValidationException(getName(), BELOW_BYTES_PROP,
+          "invalid value " + belowBytes + ", should be less than half of '" + ABOVE_BYTES_PROP + "' value, which is " + aboveBytes);
     }
-    String aboveStr = String.valueOf(properties.getOrDefault(ABOVE_PROP, Long.MAX_VALUE));
-    String belowStr = String.valueOf(properties.getOrDefault(BELOW_PROP, -1));
+    // do the same for docs bounds
+    aboveStr = String.valueOf(properties.getOrDefault(ABOVE_DOCS_PROP, Long.MAX_VALUE));
+    belowStr = String.valueOf(properties.getOrDefault(BELOW_DOCS_PROP, -1));
     try {
-      above = Long.parseLong(aboveStr);
-      if (above <= 0) {
+      aboveDocs = Long.parseLong(aboveStr);
+      if (aboveDocs <= 0) {
         throw new Exception("value must be > 0");
       }
     } catch (Exception e) {
-      throw new TriggerValidationException(getName(), ABOVE_PROP, "invalid value '" + aboveStr + "': " + e.toString());
+      throw new TriggerValidationException(getName(), ABOVE_DOCS_PROP, "invalid value '" + aboveStr + "': " + e.toString());
     }
     try {
-      below = Long.parseLong(belowStr);
-      if (below < 0) {
-        below = -1;
+      belowDocs = Long.parseLong(belowStr);
+      if (belowDocs < 0) {
+        belowDocs = -1;
       }
     } catch (Exception e) {
-      throw new TriggerValidationException(getName(), BELOW_PROP, "invalid value '" + belowStr + "': " + e.toString());
+      throw new TriggerValidationException(getName(), BELOW_DOCS_PROP, "invalid value '" + belowStr + "': " + e.toString());
     }
     // below must be at least 2x smaller than above, otherwise splitting a shard
     // would immediately put the shard below the threshold and cause the mergeshards action
-    if (below > 0 && (below * 2 > above)) {
-      throw new TriggerValidationException(getName(), BELOW_PROP,
-          "invalid value " + below + ", should be less than half of '" + ABOVE_PROP + "' value, which is " + above);
+    if (belowDocs > 0 && (belowDocs * 2 > aboveDocs)) {
+      throw new TriggerValidationException(getName(), BELOW_DOCS_PROP,
+          "invalid value " + belowDocs + ", should be less than half of '" + ABOVE_DOCS_PROP + "' value, which is " + aboveDocs);
     }
+
     String collectionsString = (String) properties.get(COLLECTIONS_PROP);
     if (collectionsString != null && !collectionsString.isEmpty()) {
       collections.addAll(StrUtils.splitSmart(collectionsString, ','));
@@ -209,17 +228,9 @@ public class IndexSizeTrigger extends TriggerBase {
               replicaName = info.getName(); // which is actually coreNode name...
             }
             String registry = SolrCoreMetricManager.createRegistryName(true, coll, sh, replicaName, null);
-            String tag;
-            switch (unit) {
-              case bytes:
-                tag = "metrics:" + registry + ":INDEX.size";
-                break;
-              case docs:
-                tag = "metrics:" + registry + ":SEARCHER.searcher.numDocs";
-                break;
-              default:
-                throw new UnsupportedOperationException("Unit " + unit + " not supported");
-            }
+            String tag = "metrics:" + registry + ":INDEX.sizeInBytes";
+            metricTags.put(tag, info);
+            tag = "metrics:" + registry + ":SEARCHER.searcher.numDocs";
             metricTags.put(tag, info);
           });
         });
@@ -228,7 +239,7 @@ public class IndexSizeTrigger extends TriggerBase {
         }
         Map<String, Object> sizes = cloudManager.getNodeStateProvider().getNodeValues(node, metricTags.keySet());
         sizes.forEach((tag, size) -> {
-          ReplicaInfo info = metricTags.get(tag);
+          final ReplicaInfo info = metricTags.get(tag);
           if (info == null) {
             log.warn("Missing replica info for response tag " + tag);
           } else {
@@ -237,9 +248,13 @@ public class IndexSizeTrigger extends TriggerBase {
               log.warn("invalid size value - not a number: '" + size + "' is " + size.getClass().getName());
               return;
             }
-            info = (ReplicaInfo)info.clone();
-            info.getVariables().put(SIZE_PROP, ((Number) size).longValue());
-            currentSizes.put(info.getCore(), info);
+
+            ReplicaInfo currentInfo = currentSizes.computeIfAbsent(info.getCore(), k -> (ReplicaInfo)info.clone());
+            if (tag.contains("INDEX")) {
+              currentInfo.getVariables().put(BYTES_SIZE_PROP, ((Number) size).longValue());
+            } else {
+              currentInfo.getVariables().put(DOCS_SIZE_PROP, ((Number) size).longValue());
+            }
           }
         });
       }
@@ -255,22 +270,30 @@ public class IndexSizeTrigger extends TriggerBase {
     // collection / list(info)
     Map<String, List<ReplicaInfo>> aboveSize = new HashMap<>();
     currentSizes.entrySet().stream()
-        .filter(e -> (Long)e.getValue().getVariable(SIZE_PROP) > above &&
-            waitForElapsed(e.getKey(), now, lastEventMap))
+        .filter(e -> (
+            (Long)e.getValue().getVariable(BYTES_SIZE_PROP) > aboveBytes ||
+            (Long)e.getValue().getVariable(DOCS_SIZE_PROP) > aboveDocs
+            ) && waitForElapsed(e.getKey(), now, lastEventMap))
         .forEach(e -> {
           ReplicaInfo info = e.getValue();
           List<ReplicaInfo> infos = aboveSize.computeIfAbsent(info.getCollection(), c -> new ArrayList<>());
-          infos.add(info);
+          if (!infos.contains(info)) {
+            infos.add(info);
+          }
         });
     // collection / list(info)
     Map<String, List<ReplicaInfo>> belowSize = new HashMap<>();
     currentSizes.entrySet().stream()
-        .filter(e -> (Long)e.getValue().getVariable(SIZE_PROP) < below &&
-            waitForElapsed(e.getKey(), now, lastEventMap))
+        .filter(e -> (
+            (Long)e.getValue().getVariable(BYTES_SIZE_PROP) < belowBytes ||
+            (Long)e.getValue().getVariable(DOCS_SIZE_PROP) < belowDocs
+            ) && waitForElapsed(e.getKey(), now, lastEventMap))
         .forEach(e -> {
           ReplicaInfo info = e.getValue();
           List<ReplicaInfo> infos = belowSize.computeIfAbsent(info.getCollection(), c -> new ArrayList<>());
-          infos.add(info);
+          if (!infos.contains(info)) {
+            infos.add(info);
+          }
         });
 
     if (aboveSize.isEmpty() && belowSize.isEmpty()) {
@@ -299,7 +322,11 @@ public class IndexSizeTrigger extends TriggerBase {
       }
       // sort by increasing size
       replicas.sort((r1, r2) -> {
-        long delta = (Long) r1.getVariable(SIZE_PROP) - (Long) r2.getVariable(SIZE_PROP);
+        // XXX this is not quite correct - if BYTES_SIZE_PROP decided that replica got here
+        // then we should be sorting by BYTES_SIZE_PROP. However, since DOCS and BYTES are
+        // loosely correlated it's simpler to sort just by docs (which better reflects the "too small"
+        // condition than index size, due to possibly existing deleted docs that still occupy space)
+        long delta = (Long) r1.getVariable(DOCS_SIZE_PROP) - (Long) r2.getVariable(DOCS_SIZE_PROP);
         if (delta > 0) {
           return 1;
         } else if (delta < 0) {
@@ -308,7 +335,7 @@ public class IndexSizeTrigger extends TriggerBase {
           return 0;
         }
       });
-      // take top two smallest
+      // take the top two smallest
       TriggerEvent.Op op = new TriggerEvent.Op(belowOp);
       op.addHint(Suggester.Hint.COLL_SHARD, new Pair(coll, replicas.get(0).getShard()));
       op.addHint(Suggester.Hint.COLL_SHARD, new Pair(coll, replicas.get(1).getShard()));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/751987d5/solr/core/src/java/org/apache/solr/cloud/autoscaling/SearchRateTrigger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/SearchRateTrigger.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/SearchRateTrigger.java
index 00bc6d8..02a2d0c 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/SearchRateTrigger.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/SearchRateTrigger.java
@@ -181,10 +181,11 @@ public class SearchRateTrigger extends TriggerBase {
         } else {
           Map<String, List<ReplicaInfo>> perCollection = collectionRates.computeIfAbsent(info.getCollection(), s -> new HashMap<>());
           List<ReplicaInfo> perShard = perCollection.computeIfAbsent(info.getShard(), s -> new ArrayList<>());
-          info.getVariables().put(AutoScalingParams.RATE, rate);
+          info = (ReplicaInfo)info.clone();
+          info.getVariables().put(AutoScalingParams.RATE, ((Number)rate).doubleValue());
           perShard.add(info);
           AtomicDouble perNode = nodeRates.computeIfAbsent(node, s -> new AtomicDouble());
-          perNode.addAndGet((Double)rate);
+          perNode.addAndGet(((Number)rate).doubleValue());
         }
       });
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/751987d5/solr/core/src/test/org/apache/solr/cloud/CloudTestUtils.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CloudTestUtils.java b/solr/core/src/test/org/apache/solr/cloud/CloudTestUtils.java
index d51e6ca..5590252 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CloudTestUtils.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CloudTestUtils.java
@@ -19,6 +19,7 @@ package org.apache.solr.cloud;
 
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
+import java.util.Collection;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
@@ -113,16 +114,21 @@ public class CloudTestUtils {
    * number of shards and replicas
    */
   public static CollectionStatePredicate clusterShape(int expectedShards, int expectedReplicas) {
+    return clusterShape(expectedShards, expectedReplicas, false);
+  }
+
+  public static CollectionStatePredicate clusterShape(int expectedShards, int expectedReplicas, boolean withInactive) {
     return (liveNodes, collectionState) -> {
       if (collectionState == null) {
         log.debug("-- null collection");
         return false;
       }
-      if (collectionState.getActiveSlices().size() != expectedShards) {
+      Collection<Slice> slices = withInactive ? collectionState.getSlices() : collectionState.getActiveSlices();
+      if (slices.size() != expectedShards) {
         log.debug("-- wrong number of active slices, expected=" + expectedShards + ", found=" + collectionState.getSlices().size());
         return false;
       }
-      for (Slice slice : collectionState.getActiveSlices()) {
+      for (Slice slice : slices) {
         int activeReplicas = 0;
         for (Replica replica : slice) {
           if (replica.isActive(liveNodes))

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/751987d5/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
index 19edfbf..a24b447 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
@@ -19,7 +19,6 @@ package org.apache.solr.cloud.autoscaling;
 
 import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -41,9 +40,7 @@ import org.apache.solr.cloud.CloudTestUtils;
 import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.cloud.autoscaling.sim.SimCloudManager;
 import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CollectionParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.Pair;
@@ -53,7 +50,6 @@ import org.apache.solr.core.SolrResourceLoader;
 import org.apache.solr.util.LogLevel;
 import org.junit.After;
 import org.junit.AfterClass;
-import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -246,9 +242,8 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
         "'name' : 'index_size_trigger'," +
         "'event' : 'indexSize'," +
         "'waitFor' : '" + waitForSeconds + "s'," +
-        "'unit' : 'docs'," +
-        "'above' : 10," +
-        "'below' : 4," +
+        "'aboveDocs' : 10," +
+        "'belowDocs' : 4," +
         "'enabled' : true," +
         "'actions' : [{'name' : 'compute_plan', 'class' : 'solr.ComputePlanAction'}," +
         "{'name' : 'execute_plan', 'class' : '" + ExecutePlanAction.class.getName() + "'}]" +
@@ -354,9 +349,8 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
         "'name' : 'index_size_trigger'," +
         "'event' : 'indexSize'," +
         "'waitFor' : '" + waitForSeconds + "s'," +
-        "'unit' : 'docs'," +
-        "'above' : 40," +
-        "'below' : 4," +
+        "'aboveDocs' : 40," +
+        "'belowDocs' : 4," +
         "'enabled' : true," +
         "'actions' : [{'name' : 'compute_plan', 'class' : 'solr.ComputePlanAction'}," +
         "{'name' : 'execute_plan', 'class' : '" + ExecutePlanAction.class.getName() + "'}]" +
@@ -438,9 +432,8 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
     props.put("event", "indexSize");
     props.put("waitFor", waitForSeconds);
     props.put("enabled", true);
-    props.put(IndexSizeTrigger.UNIT_PROP, IndexSizeTrigger.Unit.docs.toString());
-    props.put(IndexSizeTrigger.ABOVE_PROP, 10);
-    props.put(IndexSizeTrigger.BELOW_PROP, 2);
+    props.put(IndexSizeTrigger.ABOVE_DOCS_PROP, 10);
+    props.put(IndexSizeTrigger.BELOW_DOCS_PROP, 2);
     List<Map<String, String>> actions = new ArrayList<>(3);
     Map<String, String> map = new HashMap<>(2);
     map.put("name", "compute_plan");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/751987d5/solr/core/src/test/org/apache/solr/cloud/autoscaling/ScheduledMaintenanceTriggerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/ScheduledMaintenanceTriggerTest.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/ScheduledMaintenanceTriggerTest.java
index ffcab4d..164db8f 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/ScheduledMaintenanceTriggerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/ScheduledMaintenanceTriggerTest.java
@@ -165,7 +165,7 @@ public class ScheduledMaintenanceTriggerTest extends SolrCloudTestCase {
         .setShardName("shard1");
     split1.process(solrClient);
     CloudTestUtils.waitForState(cloudManager, "failed to split " + collection1, collection1,
-        CloudTestUtils.clusterShape(3, 1));
+        CloudTestUtils.clusterShape(3, 1, true));
 
     String setListenerCommand = "{" +
         "'set-listener' : " +

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/751987d5/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
index b8da5b0..9b3782a 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
@@ -900,7 +900,12 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     if (sessionWrapper != null) sessionWrapper.release();
 
     // adjust numDocs / deletedDocs / maxDoc
-    String numDocsStr = parentSlice.getLeader().getStr("SEARCHER.searcher.numDocs", "0");
+    Replica leader = parentSlice.getLeader();
+    // XXX leader election may not have happened yet - should we require it?
+    if (leader == null) {
+      leader = parentSlice.getReplicas().iterator().next();
+    }
+    String numDocsStr = leader.getStr("SEARCHER.searcher.numDocs", "0");
     long numDocs = Long.parseLong(numDocsStr);
     long newNumDocs = numDocs / subSlices.size();
     long remainder = numDocs % subSlices.size();
@@ -1004,21 +1009,22 @@ public class SimClusterStateProvider implements ClusterStateProvider {
   }
 
   /**
-   * Simulate an update by increasing replica metrics.
-   * <p>The following core metrics are updated:
+   * Simulate an update by modifying replica metrics.
+   * The following core metrics are updated:
    * <ul>
-   *   <li></li>
+   *   <li><code>SEARCHER.searcher.numDocs</code> - increased by added docs, decreased by deleteById and deleteByQuery</li>
+   *   <li><code>SEARCHER.searcher.deletedDocs</code> - decreased by deleteById and deleteByQuery by up to <code>numDocs</code></li>
+   *   <li><code>SEARCHER.searcher.maxDoc</code> - always increased by the number of added docs.</li>
    * </ul>
-   * </p>
-   * <p>IMPORTANT limitations:
+   * <p>IMPORTANT limitations:</p>
    * <ul>
    *   <li>document replacements are always counted as new docs</li>
-   *   <li>delete by ID always succeeds (unless there are 0 documents)</li>
-   *   <li>deleteByQuery never matches unless the query is <code>*:*</code></li>
-   * </ul></p>
-   * @param req
-   * @return
-   * @throws SolrException
+   *   <li>delete by ID always succeeds (unless numDocs == 0)</li>
+   *   <li>deleteByQuery is not supported unless the query is <code>*:*</code></li>
+   * </ul>
+   * @param req update request. This request MUST have the <code>collection</code> param set.
+   * @return {@link UpdateResponse}
+   * @throws SolrException on errors, such as nonexistent collection or unsupported deleteByQuery
    */
   public UpdateResponse simUpdate(UpdateRequest req) throws SolrException, InterruptedException, IOException {
     String collection = req.getCollection();
@@ -1028,7 +1034,8 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     if (!simListCollections().contains(collection)) {
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection '" + collection + "' doesn't exist");
     }
-    // always reset first to get the current metrics
+    // always reset first to get the current metrics - it's easier than to keep matching
+    // Replica with ReplicaInfo where the current real counts are stored
     collectionsStatesRef.set(null);
     DocCollection coll = getClusterState().getCollection(collection);
     DocRouter router = coll.getRouter();
@@ -1041,6 +1048,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
       if (deletes != null && !deletes.isEmpty()) {
         for (String id : deletes) {
           Slice s = router.getTargetSlice(id, null, null, req.getParams(), coll);
+          // NOTE: we don't use getProperty because it uses PROPERTY_PROP_PREFIX
           String numDocsStr = s.getLeader().getStr("SEARCHER.searcher.numDocs");
           if (numDocsStr == null) {
             LOG.debug("-- no docs in " + s.getLeader());
@@ -1100,6 +1108,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
             simSetShardValue(collection, s.getName(), "SEARCHER.searcher.numDocs", 1, true, false);
             simSetShardValue(collection, s.getName(), "SEARCHER.searcher.maxDoc", 1, true, false);
             // Policy reuses this value and expects it to be in GB units!!!
+            // the idea here is to increase the index size by 500 bytes with each doc
             // simSetShardValue(collection, s.getName(), "INDEX.sizeInBytes", 500, true, false);
           } catch (Exception e) {
             throw new IOException(e);
@@ -1391,6 +1400,8 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     return state;
   }
 
+  // this method uses a simple cache in collectionsStatesRef. Operations that modify
+  // cluster state should always reset this cache so that the changes become visible
   private Map<String, DocCollection> getCollectionStates() {
     Map<String, DocCollection> collectionStates = collectionsStatesRef.get();
     if (collectionStates != null) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/751987d5/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimSolrCloudTestCase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimSolrCloudTestCase.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimSolrCloudTestCase.java
index 1c56b74..757e297 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimSolrCloudTestCase.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimSolrCloudTestCase.java
@@ -22,15 +22,9 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.concurrent.atomic.AtomicInteger;
 import java.util.function.Predicate;
 
 import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.client.solrj.cloud.autoscaling.ReplicaInfo;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/751987d5/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestLargeCluster.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestLargeCluster.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestLargeCluster.java
index 2c4d8d3..129f18c 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestLargeCluster.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestLargeCluster.java
@@ -592,7 +592,19 @@ public class TestLargeCluster extends SimSolrCloudTestCase {
     ops.forEach(op -> {
       assertEquals(CollectionParams.CollectionAction.ADDREPLICA, op.getAction());
       assertEquals(1, op.getHints().size());
-      Pair<String, String> hint = (Pair<String, String>)op.getHints().get(Suggester.Hint.COLL_SHARD);
+      Object o = op.getHints().get(Suggester.Hint.COLL_SHARD);
+      // this may be a pair or a HashSet of pairs with size 1
+      Pair<String, String> hint = null;
+      if (o instanceof Pair) {
+        hint = (Pair<String, String>)o;
+      } else if (o instanceof Set) {
+        assertEquals("unexpected number of hints: " + o, 1, ((Set)o).size());
+        o = ((Set)o).iterator().next();
+        assertTrue("unexpected hint: " + o, o instanceof Pair);
+        hint = (Pair<String, String>)o;
+      } else {
+        fail("unexpected hints: " + o);
+      }
       assertNotNull(hint);
       assertEquals(collectionName, hint.first());
       assertEquals("shard1", hint.second());

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/751987d5/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/SplitShardSuggester.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/SplitShardSuggester.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/SplitShardSuggester.java
index dc371d2..2a42d27 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/SplitShardSuggester.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/SplitShardSuggester.java
@@ -24,7 +24,7 @@ import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.common.util.Pair;
 
 /**
- * This suggester produces a SPLITSHARD request using provided {@link Hint#COLL_SHARD} value.
+ * This suggester produces a SPLITSHARD request using provided {@link org.apache.solr.client.solrj.cloud.autoscaling.Suggester.Hint#COLL_SHARD} value.
  */
 class SplitShardSuggester extends Suggester {
 


[27/50] lucene-solr:jira/solr-12181: SOLR-12175: Add random field type and dynamic field to the default managed-schema

Posted by ab...@apache.org.
SOLR-12175: Add random field type and dynamic field to the default managed-schema


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d420139c
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d420139c
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d420139c

Branch: refs/heads/jira/solr-12181
Commit: d420139c27013ddd8c5aab9dea79dab09736e869
Parents: f1d6911
Author: Joel Bernstein <jb...@apache.org>
Authored: Tue Apr 3 16:25:59 2018 -0400
Committer: Joel Bernstein <jb...@apache.org>
Committed: Thu Apr 5 14:00:07 2018 -0400

----------------------------------------------------------------------
 solr/server/solr/configsets/_default/conf/managed-schema | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d420139c/solr/server/solr/configsets/_default/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/server/solr/configsets/_default/conf/managed-schema b/solr/server/solr/configsets/_default/conf/managed-schema
index 4168a83..6f4e2ef 100644
--- a/solr/server/solr/configsets/_default/conf/managed-schema
+++ b/solr/server/solr/configsets/_default/conf/managed-schema
@@ -139,6 +139,7 @@
     <dynamicField name="*_fs" type="pfloats"  indexed="true"  stored="true"/>
     <dynamicField name="*_d"  type="pdouble" indexed="true"  stored="true"/>
     <dynamicField name="*_ds" type="pdoubles" indexed="true"  stored="true"/>
+    <dynamicField name="random_*" type="random"/>
 
     <!-- Type used for data-driven schema, to add a string copy for each text field -->
     <dynamicField name="*_str" type="strings" stored="false" docValues="true" indexed="false" />
@@ -211,6 +212,8 @@
     <fieldType name="pfloats" class="solr.FloatPointField" docValues="true" multiValued="true"/>
     <fieldType name="plongs" class="solr.LongPointField" docValues="true" multiValued="true"/>
     <fieldType name="pdoubles" class="solr.DoublePointField" docValues="true" multiValued="true"/>
+    <fieldType name="random" class="solr.RandomSortField" indexed="true"/>
+
 
     <!-- The format for this date field is of the form 1995-12-31T23:59:59Z, and
          is a more restricted form of the canonical representation of dateTime


[17/50] lucene-solr:jira/solr-12181: SOLR-12134: CHANGES entry: ref-guide 'bare-bones html' validation is now part of 'ant documentation' and validates javadoc links locally

Posted by ab...@apache.org.
SOLR-12134: CHANGES entry: ref-guide 'bare-bones html' validation is now part of 'ant documentation' and validates javadoc links locally


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/2573eac1
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/2573eac1
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/2573eac1

Branch: refs/heads/jira/solr-12181
Commit: 2573eac1c2cddaf8d818e5be02eef2dd7f4c178f
Parents: 27f4772
Author: Chris Hostetter <ho...@apache.org>
Authored: Thu Apr 5 10:10:20 2018 -0700
Committer: Chris Hostetter <ho...@apache.org>
Committed: Thu Apr 5 10:10:20 2018 -0700

----------------------------------------------------------------------
 solr/CHANGES.txt | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2573eac1/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 6a41c8d..09c330b 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -168,6 +168,9 @@ Other Changes
 * SOLR-12176: Improve FORCELEADER to handle the case when a replica win the election but does not present
   in clusterstate (Cao Manh Dat)
 
+* SOLR-12134: ref-guide 'bare-bones html' validation is now part of 'ant documentation' and validates
+  javadoc links locally. (hossman)
+
 ==================  7.3.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.


[44/50] lucene-solr:jira/solr-12181: Disable rewrite optimizations that can make the test fail because of floating-point rounding errors.

Posted by ab...@apache.org.
Disable rewrite optimizations that can make the test fail because of floating-point rounding errors.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/b82f5912
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/b82f5912
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/b82f5912

Branch: refs/heads/jira/solr-12181
Commit: b82f5912a05ceffd28cf2a600c701e2fb387014d
Parents: 6568f3b
Author: Adrien Grand <jp...@gmail.com>
Authored: Mon Apr 9 10:22:23 2018 +0200
Committer: Adrien Grand <jp...@gmail.com>
Committed: Mon Apr 9 10:22:53 2018 +0200

----------------------------------------------------------------------
 .../TestApproximationSearchEquivalence.java     | 30 ++++++++++++++++----
 1 file changed, 24 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b82f5912/lucene/core/src/test/org/apache/lucene/search/TestApproximationSearchEquivalence.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestApproximationSearchEquivalence.java b/lucene/core/src/test/org/apache/lucene/search/TestApproximationSearchEquivalence.java
index a4926fc..89fc12a 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestApproximationSearchEquivalence.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestApproximationSearchEquivalence.java
@@ -44,7 +44,10 @@ public class TestApproximationSearchEquivalence extends SearchEquivalenceTestBas
 
   public void testNestedConjunction() throws Exception {
     Term t1 = randomTerm();
-    Term t2 = randomTerm();
+    Term t2;
+    do {
+      t2 = randomTerm();
+    } while (t1.equals(t2));
     Term t3 = randomTerm();
     TermQuery q1 = new TermQuery(t1);
     TermQuery q2 = new TermQuery(t2);
@@ -88,7 +91,10 @@ public class TestApproximationSearchEquivalence extends SearchEquivalenceTestBas
 
   public void testNestedDisjunction() throws Exception {
     Term t1 = randomTerm();
-    Term t2 = randomTerm();
+    Term t2;
+    do {
+      t2 = randomTerm();
+    } while (t1.equals(t2));
     Term t3 = randomTerm();
     TermQuery q1 = new TermQuery(t1);
     TermQuery q2 = new TermQuery(t2);
@@ -115,7 +121,10 @@ public class TestApproximationSearchEquivalence extends SearchEquivalenceTestBas
 
   public void testDisjunctionInConjunction() throws Exception {
     Term t1 = randomTerm();
-    Term t2 = randomTerm();
+    Term t2;
+    do {
+      t2 = randomTerm();
+    } while (t1.equals(t2));
     Term t3 = randomTerm();
     TermQuery q1 = new TermQuery(t1);
     TermQuery q2 = new TermQuery(t2);
@@ -142,7 +151,10 @@ public class TestApproximationSearchEquivalence extends SearchEquivalenceTestBas
 
   public void testConjunctionInDisjunction() throws Exception {
     Term t1 = randomTerm();
-    Term t2 = randomTerm();
+    Term t2;
+    do {
+      t2 = randomTerm();
+    } while (t1.equals(t2));
     Term t3 = randomTerm();
     TermQuery q1 = new TermQuery(t1);
     TermQuery q2 = new TermQuery(t2);
@@ -203,7 +215,10 @@ public class TestApproximationSearchEquivalence extends SearchEquivalenceTestBas
 
   public void testNestedExclusion() throws Exception {
     Term t1 = randomTerm();
-    Term t2 = randomTerm();
+    Term t2;
+    do {
+      t2 = randomTerm();
+    } while (t1.equals(t2));
     Term t3 = randomTerm();
     TermQuery q1 = new TermQuery(t1);
     TermQuery q2 = new TermQuery(t2);
@@ -253,7 +268,10 @@ public class TestApproximationSearchEquivalence extends SearchEquivalenceTestBas
 
   public void testReqOpt() throws Exception {
     Term t1 = randomTerm();
-    Term t2 = randomTerm();
+    Term t2;
+    do {
+      t2 = randomTerm();
+    } while (t1.equals(t2));
     Term t3 = randomTerm();
     TermQuery q1 = new TermQuery(t1);
     TermQuery q2 = new TermQuery(t2);


[36/50] lucene-solr:jira/solr-12181: LUCENE-8226: Don't generate unnecessarily massive indexes for index vs query sorting test

Posted by ab...@apache.org.
LUCENE-8226: Don't generate unnecessarily massive indexes for index vs query sorting test


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/005da875
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/005da875
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/005da875

Branch: refs/heads/jira/solr-12181
Commit: 005da875211bc271257c1fb008a8355a3c1e9f3c
Parents: 0f53adb
Author: Alan Woodward <ro...@apache.org>
Authored: Sat Apr 7 18:29:14 2018 +0100
Committer: Alan Woodward <ro...@apache.org>
Committed: Sat Apr 7 18:29:14 2018 +0100

----------------------------------------------------------------------
 .../src/test/org/apache/lucene/index/TestIndexSorting.java    | 7 +------
 1 file changed, 1 insertion(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/005da875/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java
index 6b43c16..3679d20 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java
@@ -2222,12 +2222,7 @@ public class TestIndexSorting extends LuceneTestCase {
 
   // pits index time sorting against query time sorting
   public void testRandom3() throws Exception {
-    int numDocs;
-    if (TEST_NIGHTLY) {
-      numDocs = atLeast(100000);
-    } else {
-      numDocs = atLeast(1000);
-    }
+    int numDocs = atLeast(1000);
     List<RandomDoc> docs = new ArrayList<>();
 
     Sort sort = randomSort();


[24/50] lucene-solr:jira/solr-12181: SOLR-12183: Refactor Streaming Expression test cases

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/80375acb/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamDecoratorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamDecoratorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamDecoratorTest.java
new file mode 100644
index 0000000..2afc74f
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamDecoratorTest.java
@@ -0,0 +1,3954 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.stream;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.io.SolrClientCache;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.comp.ComparatorOrder;
+import org.apache.solr.client.solrj.io.comp.FieldComparator;
+import org.apache.solr.client.solrj.io.eval.AddEvaluator;
+import org.apache.solr.client.solrj.io.eval.AndEvaluator;
+import org.apache.solr.client.solrj.io.eval.EqualToEvaluator;
+import org.apache.solr.client.solrj.io.eval.GreaterThanEqualToEvaluator;
+import org.apache.solr.client.solrj.io.eval.GreaterThanEvaluator;
+import org.apache.solr.client.solrj.io.eval.IfThenElseEvaluator;
+import org.apache.solr.client.solrj.io.eval.LessThanEqualToEvaluator;
+import org.apache.solr.client.solrj.io.eval.LessThanEvaluator;
+import org.apache.solr.client.solrj.io.eval.NotEvaluator;
+import org.apache.solr.client.solrj.io.eval.OrEvaluator;
+import org.apache.solr.client.solrj.io.eval.RawValueEvaluator;
+import org.apache.solr.client.solrj.io.ops.ConcatOperation;
+import org.apache.solr.client.solrj.io.ops.GroupOperation;
+import org.apache.solr.client.solrj.io.ops.ReplaceOperation;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionParser;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import org.apache.solr.client.solrj.io.stream.metrics.CountMetric;
+import org.apache.solr.client.solrj.io.stream.metrics.MaxMetric;
+import org.apache.solr.client.solrj.io.stream.metrics.MeanMetric;
+import org.apache.solr.client.solrj.io.stream.metrics.MinMetric;
+import org.apache.solr.client.solrj.io.stream.metrics.SumMetric;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.cloud.AbstractDistribZkTestBase;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.params.CommonParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+@Slow
+@LuceneTestCase.SuppressCodecs({"Lucene3x", "Lucene40","Lucene41","Lucene42","Lucene45"})
+public class StreamDecoratorTest extends SolrCloudTestCase {
+
+  private static final String COLLECTIONORALIAS = "collection1";
+  private static final int TIMEOUT = DEFAULT_TIMEOUT;
+  private static final String id = "id";
+
+  private static boolean useAlias;
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(4)
+        .addConfig("conf", getFile("solrj").toPath().resolve("solr").resolve("configsets").resolve("streaming").resolve("conf"))
+        .addConfig("ml", getFile("solrj").toPath().resolve("solr").resolve("configsets").resolve("ml").resolve("conf"))
+        .configure();
+
+    String collection;
+    useAlias = random().nextBoolean();
+    if (useAlias) {
+      collection = COLLECTIONORALIAS + "_collection";
+    } else {
+      collection = COLLECTIONORALIAS;
+    }
+
+    CollectionAdminRequest.createCollection(collection, "conf", 2, 1).process(cluster.getSolrClient());
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish(collection, cluster.getSolrClient().getZkStateReader(),
+        false, true, TIMEOUT);
+    if (useAlias) {
+      CollectionAdminRequest.createAlias(COLLECTIONORALIAS, collection).process(cluster.getSolrClient());
+    }
+  }
+
+  @Before
+  public void cleanIndex() throws Exception {
+    new UpdateRequest()
+        .deleteByQuery("*:*")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+  }
+
+  @Test
+  public void testUniqueStream() throws Exception {
+
+    new UpdateRequest()
+        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0")
+        .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0")
+        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
+        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
+        .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    StreamExpression expression;
+    TupleStream stream;
+    List<Tuple> tuples;
+    StreamContext streamContext = new StreamContext();
+    SolrClientCache solrClientCache = new SolrClientCache();
+    streamContext.setSolrClientCache(solrClientCache);
+
+    StreamFactory factory = new StreamFactory()
+      .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
+      .withFunctionName("search", CloudSolrStream.class)
+      .withFunctionName("unique", UniqueStream.class);
+
+    try {
+      // Basic test
+      expression = StreamExpressionParser.parse("unique(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\"), over=\"a_f\")");
+      stream = new UniqueStream(expression, factory);
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+
+      assert (tuples.size() == 4);
+      assertOrder(tuples, 0, 1, 3, 4);
+
+      // Basic test desc
+      expression = StreamExpressionParser.parse("unique(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f desc, a_i desc\"), over=\"a_f\")");
+      stream = new UniqueStream(expression, factory);
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+
+      assert (tuples.size() == 4);
+      assertOrder(tuples, 4, 3, 1, 2);
+
+      // Basic w/multi comp
+      expression = StreamExpressionParser.parse("unique(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\"), over=\"a_f, a_i\")");
+      stream = new UniqueStream(expression, factory);
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+
+      assert (tuples.size() == 5);
+      assertOrder(tuples, 0, 2, 1, 3, 4);
+
+      // full factory w/multi comp
+      stream = factory.constructStream("unique(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\"), over=\"a_f, a_i\")");
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+
+      assert (tuples.size() == 5);
+      assertOrder(tuples, 0, 2, 1, 3, 4);
+    } finally {
+      solrClientCache.close();
+    }
+  }
+
+  @Test
+  public void testSortStream() throws Exception {
+
+    new UpdateRequest()
+        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0")
+        .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0")
+        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
+        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
+        .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1")
+        .add(id, "5", "a_s", "hello1", "a_i", "1", "a_f", "2")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    StreamExpression expression;
+    TupleStream stream;
+    List<Tuple> tuples;
+    StreamContext streamContext = new StreamContext();
+    SolrClientCache solrClientCache = new SolrClientCache();
+    streamContext.setSolrClientCache(solrClientCache);
+    try {
+      StreamFactory factory = new StreamFactory()
+          .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
+          .withFunctionName("search", CloudSolrStream.class)
+          .withFunctionName("sort", SortStream.class);
+
+      // Basic test
+      stream = factory.constructStream("sort(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), by=\"a_i asc\")");
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+      assert (tuples.size() == 6);
+      assertOrder(tuples, 0, 1, 5, 2, 3, 4);
+
+      // Basic test desc
+      stream = factory.constructStream("sort(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), by=\"a_i desc\")");
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+      assert (tuples.size() == 6);
+      assertOrder(tuples, 4, 3, 2, 1, 5, 0);
+
+      // Basic w/multi comp
+      stream = factory.constructStream("sort(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), by=\"a_i asc, a_f desc\")");
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+      assert (tuples.size() == 6);
+      assertOrder(tuples, 0, 5, 1, 2, 3, 4);
+    } finally {
+      solrClientCache.close();
+    }
+  }
+
+  @Test
+  public void testNullStream() throws Exception {
+
+    new UpdateRequest()
+        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0")
+        .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0")
+        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
+        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
+        .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1")
+        .add(id, "5", "a_s", "hello1", "a_i", "1", "a_f", "2")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    StreamExpression expression;
+    TupleStream stream;
+    List<Tuple> tuples;
+    StreamContext streamContext = new StreamContext();
+    SolrClientCache solrClientCache = new SolrClientCache();
+    streamContext.setSolrClientCache(solrClientCache);
+    StreamFactory factory = new StreamFactory()
+        .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
+        .withFunctionName("search", CloudSolrStream.class)
+        .withFunctionName("null", NullStream.class);
+
+    try {
+      // Basic test
+      stream = factory.constructStream("null(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), by=\"a_i asc\")");
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+      assertTrue(tuples.size() == 1);
+      assertTrue(tuples.get(0).getLong("nullCount") == 6);
+    } finally {
+      solrClientCache.close();
+    }
+  }
+
+  @Test
+  public void testParallelNullStream() throws Exception {
+
+    new UpdateRequest()
+        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0")
+        .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0")
+        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
+        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
+        .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1")
+        .add(id, "5", "a_s", "hello1", "a_i", "1", "a_f", "2")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    StreamExpression expression;
+    TupleStream stream;
+    List<Tuple> tuples;
+    StreamContext streamContext = new StreamContext();
+    SolrClientCache solrClientCache = new SolrClientCache();
+    streamContext.setSolrClientCache(solrClientCache);
+
+    StreamFactory factory = new StreamFactory()
+        .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
+        .withFunctionName("search", CloudSolrStream.class)
+        .withFunctionName("null", NullStream.class)
+        .withFunctionName("parallel", ParallelStream.class);
+
+    try {
+
+      // Basic test
+      stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"nullCount desc\", null(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=id), by=\"a_i asc\"))");
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+      assertTrue(tuples.size() == 2);
+      long nullCount = 0;
+      for (Tuple t : tuples) {
+        nullCount += t.getLong("nullCount");
+      }
+
+      assertEquals(nullCount, 6L);
+    } finally {
+      solrClientCache.close();
+    }
+  }
+
+  @Test
+  public void testMergeStream() throws Exception {
+
+    new UpdateRequest()
+        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0")
+        .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0")
+        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
+        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
+        .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    StreamExpression expression;
+    TupleStream stream;
+    List<Tuple> tuples;
+    
+    StreamFactory factory = new StreamFactory()
+      .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
+      .withFunctionName("search", CloudSolrStream.class)
+      .withFunctionName("unique", UniqueStream.class)
+      .withFunctionName("merge", MergeStream.class);
+    
+    // Basic test
+    expression = StreamExpressionParser.parse("merge("
+        + "search(" + COLLECTIONORALIAS + ", q=\"id:(0 3 4)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"),"
+        + "search(" + COLLECTIONORALIAS + ", q=\"id:(1)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"),"
+        + "on=\"a_f asc\")");
+
+    stream = new MergeStream(expression, factory);
+    StreamContext streamContext = new StreamContext();
+    SolrClientCache solrClientCache = new SolrClientCache();
+    streamContext.setSolrClientCache(solrClientCache);
+    try {
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+
+      assert (tuples.size() == 4);
+      assertOrder(tuples, 0, 1, 3, 4);
+
+      // Basic test desc
+      expression = StreamExpressionParser.parse("merge("
+          + "search(" + COLLECTIONORALIAS + ", q=\"id:(0 3 4)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f desc\"),"
+          + "search(" + COLLECTIONORALIAS + ", q=\"id:(1)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f desc\"),"
+          + "on=\"a_f desc\")");
+      stream = new MergeStream(expression, factory);
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+
+      assert (tuples.size() == 4);
+      assertOrder(tuples, 4, 3, 1, 0);
+
+      // Basic w/multi comp
+      expression = StreamExpressionParser.parse("merge("
+          + "search(" + COLLECTIONORALIAS + ", q=\"id:(0 3 4)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\"),"
+          + "search(" + COLLECTIONORALIAS + ", q=\"id:(1 2)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\"),"
+          + "on=\"a_f asc, a_s asc\")");
+      stream = new MergeStream(expression, factory);
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+
+      assert (tuples.size() == 5);
+      assertOrder(tuples, 0, 2, 1, 3, 4);
+
+      // full factory w/multi comp
+      stream = factory.constructStream("merge("
+          + "search(" + COLLECTIONORALIAS + ", q=\"id:(0 3 4)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\"),"
+          + "search(" + COLLECTIONORALIAS + ", q=\"id:(1 2)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\"),"
+          + "on=\"a_f asc, a_s asc\")");
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+
+      assert (tuples.size() == 5);
+      assertOrder(tuples, 0, 2, 1, 3, 4);
+
+      // full factory w/multi streams
+      stream = factory.constructStream("merge("
+          + "search(" + COLLECTIONORALIAS + ", q=\"id:(0 4)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\"),"
+          + "search(" + COLLECTIONORALIAS + ", q=\"id:(1)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\"),"
+          + "search(" + COLLECTIONORALIAS + ", q=\"id:(2)\", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_s asc\"),"
+          + "on=\"a_f asc\")");
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+
+      assert (tuples.size() == 4);
+      assertOrder(tuples, 0, 2, 1, 4);
+    } finally {
+      solrClientCache.close();
+    }
+  }
+
+  @Test
+  public void testRankStream() throws Exception {
+
+    new UpdateRequest()
+        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0")
+        .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0")
+        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
+        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
+        .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    StreamExpression expression;
+    TupleStream stream;
+    List<Tuple> tuples;
+    StreamContext streamContext = new StreamContext();
+    SolrClientCache solrClientCache = new SolrClientCache();
+    streamContext.setSolrClientCache(solrClientCache);
+
+    StreamFactory factory = new StreamFactory()
+      .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
+      .withFunctionName("search", CloudSolrStream.class)
+      .withFunctionName("unique", UniqueStream.class)
+      .withFunctionName("top", RankStream.class);
+    try {
+      // Basic test
+      expression = StreamExpressionParser.parse("top("
+          + "n=3,"
+          + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\"),"
+          + "sort=\"a_f asc, a_i asc\")");
+      stream = new RankStream(expression, factory);
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+
+      assert (tuples.size() == 3);
+      assertOrder(tuples, 0, 2, 1);
+
+      // Basic test desc
+      expression = StreamExpressionParser.parse("top("
+          + "n=2,"
+          + "unique("
+          + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f desc\"),"
+          + "over=\"a_f\"),"
+          + "sort=\"a_f desc\")");
+      stream = new RankStream(expression, factory);
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+
+      assert (tuples.size() == 2);
+      assertOrder(tuples, 4, 3);
+
+      // full factory
+      stream = factory.constructStream("top("
+          + "n=4,"
+          + "unique("
+          + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\"),"
+          + "over=\"a_f\"),"
+          + "sort=\"a_f asc\")");
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+
+      assert (tuples.size() == 4);
+      assertOrder(tuples, 0, 1, 3, 4);
+
+      // full factory, switch order
+      stream = factory.constructStream("top("
+          + "n=4,"
+          + "unique("
+          + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f desc, a_i desc\"),"
+          + "over=\"a_f\"),"
+          + "sort=\"a_f asc\")");
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+
+      assert (tuples.size() == 4);
+      assertOrder(tuples, 2, 1, 3, 4);
+    } finally {
+      solrClientCache.close();
+    }
+  }
+
+  @Test
+  public void testReducerStream() throws Exception {
+
+    new UpdateRequest()
+        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1")
+        .add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2")
+        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
+        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
+        .add(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5")
+        .add(id, "5", "a_s", "hello3", "a_i", "10", "a_f", "6")
+        .add(id, "6", "a_s", "hello4", "a_i", "11", "a_f", "7")
+        .add(id, "7", "a_s", "hello3", "a_i", "12", "a_f", "8")
+        .add(id, "8", "a_s", "hello3", "a_i", "13", "a_f", "9")
+        .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+    
+    StreamExpression expression;
+    TupleStream stream;
+    List<Tuple> tuples;
+    Tuple t0, t1, t2;
+    List<Map> maps0, maps1, maps2;
+    StreamContext streamContext = new StreamContext();
+    SolrClientCache solrClientCache = new SolrClientCache();
+    streamContext.setSolrClientCache(solrClientCache);
+
+    StreamFactory factory = new StreamFactory()
+        .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
+        .withFunctionName("search", CloudSolrStream.class)
+        .withFunctionName("reduce", ReducerStream.class)
+        .withFunctionName("group", GroupOperation.class);
+
+    try {
+      // basic
+      expression = StreamExpressionParser.parse("reduce("
+          + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_s asc, a_f asc\"),"
+          + "by=\"a_s\","
+          + "group(sort=\"a_f desc\", n=\"4\"))");
+
+      stream = factory.constructStream(expression);
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+
+      assert (tuples.size() == 3);
+
+      t0 = tuples.get(0);
+      maps0 = t0.getMaps("group");
+      assertMaps(maps0, 9, 1, 2, 0);
+
+      t1 = tuples.get(1);
+      maps1 = t1.getMaps("group");
+      assertMaps(maps1, 8, 7, 5, 3);
+
+
+      t2 = tuples.get(2);
+      maps2 = t2.getMaps("group");
+      assertMaps(maps2, 6, 4);
+
+      // basic w/spaces
+      expression = StreamExpressionParser.parse("reduce("
+          + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_s asc, a_f       asc\"),"
+          + "by=\"a_s\"," +
+          "group(sort=\"a_i asc\", n=\"2\"))");
+      stream = factory.constructStream(expression);
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+
+      assert (tuples.size() == 3);
+
+      t0 = tuples.get(0);
+      maps0 = t0.getMaps("group");
+      assert (maps0.size() == 2);
+
+      assertMaps(maps0, 0, 1);
+
+      t1 = tuples.get(1);
+      maps1 = t1.getMaps("group");
+      assertMaps(maps1, 3, 5);
+
+      t2 = tuples.get(2);
+      maps2 = t2.getMaps("group");
+      assertMaps(maps2, 4, 6);
+    } finally {
+      solrClientCache.close();
+    }
+  }
+
+  @Test
+  public void testHavingStream() throws Exception {
+
+    SolrClientCache solrClientCache = new SolrClientCache();
+
+    new UpdateRequest()
+        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1", "subject", "blah blah blah 0")
+        .add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2", "subject", "blah blah blah 2")
+        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3", "subject", "blah blah blah 3")
+        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4", "subject", "blah blah blah 4")
+        .add(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5", "subject", "blah blah blah 1")
+        .add(id, "5", "a_s", "hello3", "a_i", "5", "a_f", "6", "subject", "blah blah blah 5")
+        .add(id, "6", "a_s", "hello4", "a_i", "6", "a_f", "7", "subject", "blah blah blah 6")
+        .add(id, "7", "a_s", "hello3", "a_i", "7", "a_f", "8", "subject", "blah blah blah 7")
+        .add(id, "8", "a_s", "hello3", "a_i", "8", "a_f", "9", "subject", "blah blah blah 8")
+        .add(id, "9", "a_s", "hello0", "a_i", "9", "a_f", "10", "subject", "blah blah blah 9")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    TupleStream stream;
+    List<Tuple> tuples;
+
+    StreamFactory factory = new StreamFactory()
+        .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
+        .withFunctionName("search", CloudSolrStream.class)
+        .withFunctionName("having", HavingStream.class)
+        .withFunctionName("rollup", RollupStream.class)
+        .withFunctionName("sum", SumMetric.class)
+        .withFunctionName("and", AndEvaluator.class)
+        .withFunctionName("or", OrEvaluator.class)
+        .withFunctionName("not", NotEvaluator.class)
+        .withFunctionName("gt", GreaterThanEvaluator.class)
+        .withFunctionName("lt", LessThanEvaluator.class)
+        .withFunctionName("eq", EqualToEvaluator.class)
+        .withFunctionName("lteq", LessThanEqualToEvaluator.class)
+        .withFunctionName("gteq", GreaterThanEqualToEvaluator.class);
+
+    stream = factory.constructStream("having(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), eq(a_i, 9))");
+    StreamContext context = new StreamContext();
+    context.setSolrClientCache(solrClientCache);
+    stream.setStreamContext(context);
+    tuples = getTuples(stream);
+
+    assert(tuples.size() == 1);
+    Tuple t = tuples.get(0);
+    assertTrue(t.getString("id").equals("9"));
+
+    stream = factory.constructStream("having(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), and(eq(a_i, 9),lt(a_i, 10)))");
+    context = new StreamContext();
+    context.setSolrClientCache(solrClientCache);
+    stream.setStreamContext(context);
+    tuples = getTuples(stream);
+
+    assert(tuples.size() == 1);
+    t = tuples.get(0);
+    assertTrue(t.getString("id").equals("9"));
+
+    stream = factory.constructStream("having(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), or(eq(a_i, 9),eq(a_i, 8)))");
+    context = new StreamContext();
+    context.setSolrClientCache(solrClientCache);
+    stream.setStreamContext(context);
+    tuples = getTuples(stream);
+
+    assert(tuples.size() == 2);
+    t = tuples.get(0);
+    assertTrue(t.getString("id").equals("8"));
+
+    t = tuples.get(1);
+    assertTrue(t.getString("id").equals("9"));
+
+
+    stream = factory.constructStream("having(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), and(eq(a_i, 9),not(eq(a_i, 9))))");
+    context = new StreamContext();
+    context.setSolrClientCache(solrClientCache);
+    stream.setStreamContext(context);
+    tuples = getTuples(stream);
+
+    assert(tuples.size() == 0);
+
+    stream = factory.constructStream("having(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), and(lteq(a_i, 9), gteq(a_i, 8)))");
+    context = new StreamContext();
+    context.setSolrClientCache(solrClientCache);
+    stream.setStreamContext(context);
+    tuples = getTuples(stream);
+
+    assert(tuples.size() == 2);
+
+    t = tuples.get(0);
+    assertTrue(t.getString("id").equals("8"));
+
+    t = tuples.get(1);
+    assertTrue(t.getString("id").equals("9"));
+
+    stream = factory.constructStream("having(rollup(over=a_f, sum(a_i), search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\")), and(eq(sum(a_i), 9),eq(sum(a_i), 9)))");
+    context = new StreamContext();
+    context.setSolrClientCache(solrClientCache);
+    stream.setStreamContext(context);
+    tuples = getTuples(stream);
+
+    assert(tuples.size() == 1);
+    t = tuples.get(0);
+    assertTrue(t.getDouble("a_f") == 10.0D);
+
+    solrClientCache.close();
+  }
+
+  @Test
+  public void testParallelHavingStream() throws Exception {
+
+    SolrClientCache solrClientCache = new SolrClientCache();
+
+    new UpdateRequest()
+        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1", "subject", "blah blah blah 0")
+        .add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2", "subject", "blah blah blah 2")
+        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3", "subject", "blah blah blah 3")
+        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4", "subject", "blah blah blah 4")
+        .add(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5", "subject", "blah blah blah 1")
+        .add(id, "5", "a_s", "hello3", "a_i", "5", "a_f", "6", "subject", "blah blah blah 5")
+        .add(id, "6", "a_s", "hello4", "a_i", "6", "a_f", "7", "subject", "blah blah blah 6")
+        .add(id, "7", "a_s", "hello3", "a_i", "7", "a_f", "8", "subject", "blah blah blah 7")
+        .add(id, "8", "a_s", "hello3", "a_i", "8", "a_f", "9", "subject", "blah blah blah 8")
+        .add(id, "9", "a_s", "hello0", "a_i", "9", "a_f", "10", "subject", "blah blah blah 9")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    TupleStream stream;
+    List<Tuple> tuples;
+
+    StreamFactory factory = new StreamFactory()
+        .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
+        .withFunctionName("search", CloudSolrStream.class)
+        .withFunctionName("having", HavingStream.class)
+        .withFunctionName("rollup", RollupStream.class)
+        .withFunctionName("sum", SumMetric.class)
+        .withFunctionName("and", AndEvaluator.class)
+        .withFunctionName("or", OrEvaluator.class)
+        .withFunctionName("not", NotEvaluator.class)
+        .withFunctionName("gt", GreaterThanEvaluator.class)
+        .withFunctionName("lt", LessThanEvaluator.class)
+        .withFunctionName("eq", EqualToEvaluator.class)
+        .withFunctionName("lteq", LessThanEqualToEvaluator.class)
+        .withFunctionName("gteq", GreaterThanEqualToEvaluator.class)
+        .withFunctionName("val", RawValueEvaluator.class)
+        .withFunctionName("parallel", ParallelStream.class);
+
+    stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"a_f asc\", having(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=id), eq(a_i, 9)))");
+    StreamContext context = new StreamContext();
+    context.setSolrClientCache(solrClientCache);
+    stream.setStreamContext(context);
+    tuples = getTuples(stream);
+
+    assert(tuples.size() == 1);
+    Tuple t = tuples.get(0);
+    assertTrue(t.getString("id").equals("9"));
+
+    stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"a_f asc\", having(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=id), and(eq(a_i, 9),lt(a_i, 10))))");
+    context = new StreamContext();
+    context.setSolrClientCache(solrClientCache);
+    stream.setStreamContext(context);
+    tuples = getTuples(stream);
+
+    assert(tuples.size() == 1);
+    t = tuples.get(0);
+    assertTrue(t.getString("id").equals("9"));
+
+    stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"a_f asc\",having(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=id), or(eq(a_i, 9),eq(a_i, 8))))");
+    context = new StreamContext();
+    context.setSolrClientCache(solrClientCache);
+    stream.setStreamContext(context);
+    tuples = getTuples(stream);
+
+    assert(tuples.size() == 2);
+    t = tuples.get(0);
+    assertTrue(t.getString("id").equals("8"));
+
+    t = tuples.get(1);
+    assertTrue(t.getString("id").equals("9"));
+
+
+    stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"a_f asc\", having(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=id), and(eq(a_i, 9),not(eq(a_i, 9)))))");
+    context = new StreamContext();
+    context.setSolrClientCache(solrClientCache);
+    stream.setStreamContext(context);
+    tuples = getTuples(stream);
+
+    assert(tuples.size() == 0);
+
+
+    stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"a_f asc\",having(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=id), and(lteq(a_i, 9), gteq(a_i, 8))))");
+    context = new StreamContext();
+    context.setSolrClientCache(solrClientCache);
+    stream.setStreamContext(context);
+    tuples = getTuples(stream);
+
+    assert(tuples.size() == 2);
+
+    t = tuples.get(0);
+    assertTrue(t.getString("id").equals("8"));
+
+    t = tuples.get(1);
+    assertTrue(t.getString("id").equals("9"));
+
+    stream = factory.constructStream("parallel("+COLLECTIONORALIAS+", workers=2, sort=\"a_f asc\", having(rollup(over=a_f, sum(a_i), search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=a_f)), and(eq(sum(a_i), 9),eq(sum(a_i),9))))");
+    context = new StreamContext();
+    context.setSolrClientCache(solrClientCache);
+    stream.setStreamContext(context);
+    tuples = getTuples(stream);
+
+    assert(tuples.size() == 1);
+
+    t = tuples.get(0);
+    assertTrue(t.getDouble("a_f") == 10.0D);
+
+    solrClientCache.close();
+  }
+
+  @Test
+  public void testFetchStream() throws Exception {
+
+    SolrClientCache solrClientCache = new SolrClientCache();//TODO share in @Before ; close in @After ?
+
+    new UpdateRequest()
+        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1", "subject", "blah blah blah 0")
+        .add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2", "subject", "blah blah blah 2")
+        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3", "subject", "blah blah blah 3")
+        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4", "subject", "blah blah blah 4")
+        .add(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5", "subject", "blah blah blah 1")
+        .add(id, "5", "a_s", "hello3", "a_i", "5", "a_f", "6", "subject", "blah blah blah 5")
+        .add(id, "6", "a_s", "hello4", "a_i", "6", "a_f", "7", "subject", "blah blah blah 6")
+        .add(id, "7", "a_s", "hello3", "a_i", "7", "a_f", "8", "subject", "blah blah blah 7")
+        .add(id, "8", "a_s", "hello3", "a_i", "8", "a_f", "9", "subject", "blah blah blah 8")
+        .add(id, "9", "a_s", "hello0", "a_i", "9", "a_f", "10", "subject", "blah blah blah 9")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    TupleStream stream;
+    List<Tuple> tuples;
+
+    StreamFactory factory = new StreamFactory()
+        .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
+        .withFunctionName("search", CloudSolrStream.class)
+        .withFunctionName("fetch", FetchStream.class);
+
+    stream = factory.constructStream("fetch("+ COLLECTIONORALIAS +",  search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), on=\"id=a_i\", batchSize=\"2\", fl=\"subject\")");
+    StreamContext context = new StreamContext();
+    context.setSolrClientCache(solrClientCache);
+    stream.setStreamContext(context);
+    tuples = getTuples(stream);
+
+    assert(tuples.size() == 10);
+    Tuple t = tuples.get(0);
+    assertTrue("blah blah blah 0".equals(t.getString("subject")));
+    t = tuples.get(1);
+    assertTrue("blah blah blah 2".equals(t.getString("subject")));
+    t = tuples.get(2);
+    assertTrue("blah blah blah 3".equals(t.getString("subject")));
+    t = tuples.get(3);
+    assertTrue("blah blah blah 4".equals(t.getString("subject")));
+    t = tuples.get(4);
+    assertTrue("blah blah blah 1".equals(t.getString("subject")));
+    t = tuples.get(5);
+    assertTrue("blah blah blah 5".equals(t.getString("subject")));
+    t = tuples.get(6);
+    assertTrue("blah blah blah 6".equals(t.getString("subject")));
+    t = tuples.get(7);
+    assertTrue("blah blah blah 7".equals(t.getString("subject")));
+    t = tuples.get(8);
+    assertTrue("blah blah blah 8".equals(t.getString("subject")));
+    t = tuples.get(9);
+    assertTrue("blah blah blah 9".equals(t.getString("subject")));
+
+    //Change the batch size
+    stream = factory.constructStream("fetch(" + COLLECTIONORALIAS + ",  search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), on=\"id=a_i\", batchSize=\"3\", fl=\"subject\")");
+    context = new StreamContext();
+    context.setSolrClientCache(solrClientCache);
+    stream.setStreamContext(context);
+    tuples = getTuples(stream);
+
+    assert(tuples.size() == 10);
+    t = tuples.get(0);
+    assertTrue("blah blah blah 0".equals(t.getString("subject")));
+    t = tuples.get(1);
+    assertTrue("blah blah blah 2".equals(t.getString("subject")));
+    t = tuples.get(2);
+    assertTrue("blah blah blah 3".equals(t.getString("subject")));
+    t = tuples.get(3);
+    assertTrue("blah blah blah 4".equals(t.getString("subject")));
+    t = tuples.get(4);
+    assertTrue("blah blah blah 1".equals(t.getString("subject")));
+    t = tuples.get(5);
+    assertTrue("blah blah blah 5".equals(t.getString("subject")));
+    t = tuples.get(6);
+    assertTrue("blah blah blah 6".equals(t.getString("subject")));
+    t = tuples.get(7);
+    assertTrue("blah blah blah 7".equals(t.getString("subject")));
+    t = tuples.get(8);
+    assertTrue("blah blah blah 8".equals(t.getString("subject")));
+    t = tuples.get(9);
+    assertTrue("blah blah blah 9".equals(t.getString("subject")));
+
+    // SOLR-10404 test that "hello 99" as a value gets escaped
+    new UpdateRequest()
+        .add(id, "99", "a1_s", "hello 99", "a2_s", "hello 99", "subject", "blah blah blah 99")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    stream = factory.constructStream("fetch("+ COLLECTIONORALIAS +",  search(" + COLLECTIONORALIAS + ", q=" + id + ":99, fl=\"id,a1_s\", sort=\"id asc\"), on=\"a1_s=a2_s\", fl=\"subject\")");
+    context = new StreamContext();
+    context.setSolrClientCache(solrClientCache);
+    stream.setStreamContext(context);
+    tuples = getTuples(stream);
+
+    assertEquals(1, tuples.size());
+    t = tuples.get(0);
+    assertTrue("blah blah blah 99".equals(t.getString("subject")));
+
+    solrClientCache.close();
+  }
+
+  @Test
+  public void testParallelFetchStream() throws Exception {
+
+    new UpdateRequest()
+        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1", "subject", "blah blah blah 0")
+        .add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2", "subject", "blah blah blah 2")
+        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3", "subject", "blah blah blah 3")
+        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4", "subject", "blah blah blah 4")
+        .add(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5", "subject", "blah blah blah 1")
+        .add(id, "5", "a_s", "hello3", "a_i", "5", "a_f", "6", "subject", "blah blah blah 5")
+        .add(id, "6", "a_s", "hello4", "a_i", "6", "a_f", "7", "subject", "blah blah blah 6")
+        .add(id, "7", "a_s", "hello3", "a_i", "7", "a_f", "8", "subject", "blah blah blah 7")
+        .add(id, "8", "a_s", "hello3", "a_i", "8", "a_f", "9", "subject", "blah blah blah 8")
+        .add(id, "9", "a_s", "hello0", "a_i", "9", "a_f", "10", "subject", "blah blah blah 9")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    StreamContext streamContext = new StreamContext();
+    SolrClientCache solrClientCache = new SolrClientCache();
+    streamContext.setSolrClientCache(solrClientCache);
+
+    TupleStream stream;
+    List<Tuple> tuples;
+
+    StreamFactory factory = new StreamFactory()
+        .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
+        .withFunctionName("search", CloudSolrStream.class)
+        .withFunctionName("parallel", ParallelStream.class)
+        .withFunctionName("fetch", FetchStream.class);
+
+    try {
+
+      stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"a_f asc\", fetch(" + COLLECTIONORALIAS + ",  search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=\"id\"), on=\"id=a_i\", batchSize=\"2\", fl=\"subject\"))");
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+
+      assert (tuples.size() == 10);
+      Tuple t = tuples.get(0);
+      assertTrue("blah blah blah 0".equals(t.getString("subject")));
+      t = tuples.get(1);
+      assertTrue("blah blah blah 2".equals(t.getString("subject")));
+      t = tuples.get(2);
+      assertTrue("blah blah blah 3".equals(t.getString("subject")));
+      t = tuples.get(3);
+      assertTrue("blah blah blah 4".equals(t.getString("subject")));
+      t = tuples.get(4);
+      assertTrue("blah blah blah 1".equals(t.getString("subject")));
+      t = tuples.get(5);
+      assertTrue("blah blah blah 5".equals(t.getString("subject")));
+      t = tuples.get(6);
+      assertTrue("blah blah blah 6".equals(t.getString("subject")));
+      t = tuples.get(7);
+      assertTrue("blah blah blah 7".equals(t.getString("subject")));
+      t = tuples.get(8);
+      assertTrue("blah blah blah 8".equals(t.getString("subject")));
+      t = tuples.get(9);
+      assertTrue("blah blah blah 9".equals(t.getString("subject")));
+
+
+      stream = factory.constructStream("parallel(" + COLLECTIONORALIAS + ", workers=2, sort=\"a_f asc\", fetch(" + COLLECTIONORALIAS + ",  search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\", partitionKeys=\"id\"), on=\"id=a_i\", batchSize=\"3\", fl=\"subject\"))");
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+
+      assert (tuples.size() == 10);
+      t = tuples.get(0);
+      assertTrue("blah blah blah 0".equals(t.getString("subject")));
+      t = tuples.get(1);
+      assertTrue("blah blah blah 2".equals(t.getString("subject")));
+      t = tuples.get(2);
+      assertTrue("blah blah blah 3".equals(t.getString("subject")));
+      t = tuples.get(3);
+      assertTrue("blah blah blah 4".equals(t.getString("subject")));
+      t = tuples.get(4);
+      assertTrue("blah blah blah 1".equals(t.getString("subject")));
+      t = tuples.get(5);
+      assertTrue("blah blah blah 5".equals(t.getString("subject")));
+      t = tuples.get(6);
+      assertTrue("blah blah blah 6".equals(t.getString("subject")));
+      t = tuples.get(7);
+      assertTrue("blah blah blah 7".equals(t.getString("subject")));
+      t = tuples.get(8);
+      assertTrue("blah blah blah 8".equals(t.getString("subject")));
+      t = tuples.get(9);
+      assertTrue("blah blah blah 9".equals(t.getString("subject")));
+    } finally {
+      solrClientCache.close();
+    }
+  }
+
+  @Test
+  public void testDaemonStream() throws Exception {
+
+    new UpdateRequest()
+        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1")
+        .add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2")
+        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
+        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
+        .add(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5")
+        .add(id, "5", "a_s", "hello3", "a_i", "10", "a_f", "6")
+        .add(id, "6", "a_s", "hello4", "a_i", "11", "a_f", "7")
+        .add(id, "7", "a_s", "hello3", "a_i", "12", "a_f", "8")
+        .add(id, "8", "a_s", "hello3", "a_i", "13", "a_f", "9")
+        .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    StreamFactory factory = new StreamFactory()
+        .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
+        .withFunctionName("search", CloudSolrStream.class)
+        .withFunctionName("rollup", RollupStream.class)
+        .withFunctionName("sum", SumMetric.class)
+        .withFunctionName("min", MinMetric.class)
+        .withFunctionName("max", MaxMetric.class)
+        .withFunctionName("avg", MeanMetric.class)
+        .withFunctionName("count", CountMetric.class)
+        .withFunctionName("daemon", DaemonStream.class);
+
+    StreamExpression expression;
+    DaemonStream daemonStream;
+
+    expression = StreamExpressionParser.parse("daemon(rollup("
+        + "search(" + COLLECTIONORALIAS + ", q=\"*:*\", fl=\"a_i,a_s\", sort=\"a_s asc\"),"
+        + "over=\"a_s\","
+        + "sum(a_i)"
+        + "), id=\"test\", runInterval=\"1000\", queueSize=\"9\")");
+    daemonStream = (DaemonStream)factory.constructStream(expression);
+    StreamContext streamContext = new StreamContext();
+    SolrClientCache solrClientCache = new SolrClientCache();
+    streamContext.setSolrClientCache(solrClientCache);
+    daemonStream.setStreamContext(streamContext);
+    try {
+      //Test Long and Double Sums
+
+      daemonStream.open(); // This will start the daemon thread
+
+      for (int i = 0; i < 4; i++) {
+        Tuple tuple = daemonStream.read(); // Reads from the queue
+        String bucket = tuple.getString("a_s");
+        Double sumi = tuple.getDouble("sum(a_i)");
+
+        //System.out.println("#################################### Bucket 1:"+bucket);
+        assertTrue(bucket.equals("hello0"));
+        assertTrue(sumi.doubleValue() == 17.0D);
+
+        tuple = daemonStream.read();
+        bucket = tuple.getString("a_s");
+        sumi = tuple.getDouble("sum(a_i)");
+
+        //System.out.println("#################################### Bucket 2:"+bucket);
+        assertTrue(bucket.equals("hello3"));
+        assertTrue(sumi.doubleValue() == 38.0D);
+
+        tuple = daemonStream.read();
+        bucket = tuple.getString("a_s");
+        sumi = tuple.getDouble("sum(a_i)");
+        //System.out.println("#################################### Bucket 3:"+bucket);
+        assertTrue(bucket.equals("hello4"));
+        assertTrue(sumi.longValue() == 15);
+      }
+
+      //Now lets wait until the internal queue fills up
+
+      while (daemonStream.remainingCapacity() > 0) {
+        try {
+          Thread.sleep(1000);
+        } catch (Exception e) {
+
+        }
+      }
+
+      //OK capacity is full, let's index a new doc
+
+      new UpdateRequest()
+          .add(id, "10", "a_s", "hello0", "a_i", "1", "a_f", "10")
+          .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+      //Now lets clear the existing docs in the queue 9, plus 3 more to get passed the run that was blocked. The next run should
+      //have the tuples with the updated count.
+      for (int i = 0; i < 12; i++) {
+        daemonStream.read();
+      }
+
+      //And rerun the loop. It should have a new count for hello0
+      for (int i = 0; i < 4; i++) {
+        Tuple tuple = daemonStream.read(); // Reads from the queue
+        String bucket = tuple.getString("a_s");
+        Double sumi = tuple.getDouble("sum(a_i)");
+
+        assertTrue(bucket.equals("hello0"));
+        assertTrue(sumi.doubleValue() == 18.0D);
+
+        tuple = daemonStream.read();
+        bucket = tuple.getString("a_s");
+        sumi = tuple.getDouble("sum(a_i)");
+
+        assertTrue(bucket.equals("hello3"));
+        assertTrue(sumi.doubleValue() == 38.0D);
+
+        tuple = daemonStream.read();
+        bucket = tuple.getString("a_s");
+        sumi = tuple.getDouble("sum(a_i)");
+        assertTrue(bucket.equals("hello4"));
+        assertTrue(sumi.longValue() == 15);
+      }
+    } finally {
+      daemonStream.close(); //This should stop the daemon thread
+      solrClientCache.close();
+    }
+  }
+
+  @Test
+  public void testTerminatingDaemonStream() throws Exception {
+    Assume.assumeTrue(!useAlias);
+
+    new UpdateRequest()
+        .add(id, "0", "a_s", "hello", "a_i", "0", "a_f", "1")
+        .add(id, "2", "a_s", "hello", "a_i", "2", "a_f", "2")
+        .add(id, "3", "a_s", "hello", "a_i", "3", "a_f", "3")
+        .add(id, "4", "a_s", "hello", "a_i", "4", "a_f", "4")
+        .add(id, "1", "a_s", "hello", "a_i", "1", "a_f", "5")
+        .add(id, "5", "a_s", "hello", "a_i", "10", "a_f", "6")
+        .add(id, "6", "a_s", "hello", "a_i", "11", "a_f", "7")
+        .add(id, "7", "a_s", "hello", "a_i", "12", "a_f", "8")
+        .add(id, "8", "a_s", "hello", "a_i", "13", "a_f", "9")
+        .add(id, "9", "a_s", "hello", "a_i", "14", "a_f", "10")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    StreamFactory factory = new StreamFactory()
+        .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
+        .withFunctionName("topic", TopicStream.class)
+        .withFunctionName("daemon", DaemonStream.class);
+
+    StreamExpression expression;
+    DaemonStream daemonStream;
+
+    SolrClientCache cache = new SolrClientCache();
+    StreamContext context = new StreamContext();
+    context.setSolrClientCache(cache);
+    expression = StreamExpressionParser.parse("daemon(topic("+ COLLECTIONORALIAS +","+ COLLECTIONORALIAS +", q=\"a_s:hello\", initialCheckpoint=0, id=\"topic1\", rows=2, fl=\"id\""
+        + "), id=test, runInterval=1000, terminate=true, queueSize=50)");
+    daemonStream = (DaemonStream)factory.constructStream(expression);
+    daemonStream.setStreamContext(context);
+
+    List<Tuple> tuples = getTuples(daemonStream);
+    assertTrue(tuples.size() == 10);
+    cache.close();
+  }
+
+  @Test
+  public void testRollupStream() throws Exception {
+
+    new UpdateRequest()
+        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1")
+        .add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2")
+        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
+        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
+        .add(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5")
+        .add(id, "5", "a_s", "hello3", "a_i", "10", "a_f", "6")
+        .add(id, "6", "a_s", "hello4", "a_i", "11", "a_f", "7")
+        .add(id, "7", "a_s", "hello3", "a_i", "12", "a_f", "8")
+        .add(id, "8", "a_s", "hello3", "a_i", "13", "a_f", "9")
+        .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    StreamFactory factory = new StreamFactory()
+      .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
+      .withFunctionName("search", CloudSolrStream.class)
+      .withFunctionName("rollup", RollupStream.class)
+      .withFunctionName("sum", SumMetric.class)
+      .withFunctionName("min", MinMetric.class)
+      .withFunctionName("max", MaxMetric.class)
+      .withFunctionName("avg", MeanMetric.class)
+      .withFunctionName("count", CountMetric.class);     
+    
+    StreamExpression expression;
+    TupleStream stream;
+    List<Tuple> tuples;
+    StreamContext streamContext = new StreamContext();
+    SolrClientCache solrClientCache = new SolrClientCache();
+    streamContext.setSolrClientCache(solrClientCache);
+    try {
+      expression = StreamExpressionParser.parse("rollup("
+          + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"a_s,a_i,a_f\", sort=\"a_s asc\"),"
+          + "over=\"a_s\","
+          + "sum(a_i),"
+          + "sum(a_f),"
+          + "min(a_i),"
+          + "min(a_f),"
+          + "max(a_i),"
+          + "max(a_f),"
+          + "avg(a_i),"
+          + "avg(a_f),"
+          + "count(*),"
+          + ")");
+      stream = factory.constructStream(expression);
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+
+      assert (tuples.size() == 3);
+
+      //Test Long and Double Sums
+
+      Tuple tuple = tuples.get(0);
+      String bucket = tuple.getString("a_s");
+      Double sumi = tuple.getDouble("sum(a_i)");
+      Double sumf = tuple.getDouble("sum(a_f)");
+      Double mini = tuple.getDouble("min(a_i)");
+      Double minf = tuple.getDouble("min(a_f)");
+      Double maxi = tuple.getDouble("max(a_i)");
+      Double maxf = tuple.getDouble("max(a_f)");
+      Double avgi = tuple.getDouble("avg(a_i)");
+      Double avgf = tuple.getDouble("avg(a_f)");
+      Double count = tuple.getDouble("count(*)");
+
+      assertTrue(bucket.equals("hello0"));
+      assertTrue(sumi.doubleValue() == 17.0D);
+      assertTrue(sumf.doubleValue() == 18.0D);
+      assertTrue(mini.doubleValue() == 0.0D);
+      assertTrue(minf.doubleValue() == 1.0D);
+      assertTrue(maxi.doubleValue() == 14.0D);
+      assertTrue(maxf.doubleValue() == 10.0D);
+      assertTrue(avgi.doubleValue() == 4.25D);
+      assertTrue(avgf.doubleValue() == 4.5D);
+      assertTrue(count.doubleValue() == 4);
+
+      tuple = tuples.get(1);
+      bucket = tuple.getString("a_s");
+      sumi = tuple.getDouble("sum(a_i)");
+      sumf = tuple.getDouble("sum(a_f)");
+      mini = tuple.getDouble("min(a_i)");
+      minf = tuple.getDouble("min(a_f)");
+      maxi = tuple.getDouble("max(a_i)");
+      maxf = tuple.getDouble("max(a_f)");
+      avgi = tuple.getDouble("avg(a_i)");
+      avgf = tuple.getDouble("avg(a_f)");
+      count = tuple.getDouble("count(*)");
+
+      assertTrue(bucket.equals("hello3"));
+      assertTrue(sumi.doubleValue() == 38.0D);
+      assertTrue(sumf.doubleValue() == 26.0D);
+      assertTrue(mini.doubleValue() == 3.0D);
+      assertTrue(minf.doubleValue() == 3.0D);
+      assertTrue(maxi.doubleValue() == 13.0D);
+      assertTrue(maxf.doubleValue() == 9.0D);
+      assertTrue(avgi.doubleValue() == 9.5D);
+      assertTrue(avgf.doubleValue() == 6.5D);
+      assertTrue(count.doubleValue() == 4);
+
+      tuple = tuples.get(2);
+      bucket = tuple.getString("a_s");
+      sumi = tuple.getDouble("sum(a_i)");
+      sumf = tuple.getDouble("sum(a_f)");
+      mini = tuple.getDouble("min(a_i)");
+      minf = tuple.getDouble("min(a_f)");
+      maxi = tuple.getDouble("max(a_i)");
+      maxf = tuple.getDouble("max(a_f)");
+      avgi = tuple.getDouble("avg(a_i)");
+      avgf = tuple.getDouble("avg(a_f)");
+      count = tuple.getDouble("count(*)");
+
+      assertTrue(bucket.equals("hello4"));
+      assertTrue(sumi.longValue() == 15);
+      assertTrue(sumf.doubleValue() == 11.0D);
+      assertTrue(mini.doubleValue() == 4.0D);
+      assertTrue(minf.doubleValue() == 4.0D);
+      assertTrue(maxi.doubleValue() == 11.0D);
+      assertTrue(maxf.doubleValue() == 7.0D);
+      assertTrue(avgi.doubleValue() == 7.5D);
+      assertTrue(avgf.doubleValue() == 5.5D);
+      assertTrue(count.doubleValue() == 2);
+
+    } finally {
+      solrClientCache.close();
+    }
+  }
+
+  @Test
+  public void testParallelUniqueStream() throws Exception {
+
+    new UpdateRequest()
+        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0")
+        .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0")
+        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
+        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
+        .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1")
+        .add(id, "5", "a_s", "hello1", "a_i", "10", "a_f", "1")
+        .add(id, "6", "a_s", "hello1", "a_i", "11", "a_f", "5")
+        .add(id, "7", "a_s", "hello1", "a_i", "12", "a_f", "5")
+        .add(id, "8", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    String zkHost = cluster.getZkServer().getZkAddress();
+    StreamFactory streamFactory = new StreamFactory().withCollectionZkHost(COLLECTIONORALIAS, zkHost)
+        .withFunctionName("search", CloudSolrStream.class)
+        .withFunctionName("unique", UniqueStream.class)
+        .withFunctionName("top", RankStream.class)
+        .withFunctionName("group", ReducerStream.class)
+        .withFunctionName("parallel", ParallelStream.class);
+    StreamContext streamContext = new StreamContext();
+    SolrClientCache solrClientCache = new SolrClientCache();
+    streamContext.setSolrClientCache(solrClientCache);
+
+
+
+    try {
+
+      ParallelStream pstream = (ParallelStream) streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", unique(search(collection1, q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\", partitionKeys=\"a_f\"), over=\"a_f\"), workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"a_f asc\")");
+      pstream.setStreamContext(streamContext);
+      List<Tuple> tuples = getTuples(pstream);
+      assert (tuples.size() == 5);
+      assertOrder(tuples, 0, 1, 3, 4, 6);
+
+      //Test the eofTuples
+
+      Map<String, Tuple> eofTuples = pstream.getEofTuples();
+      assert (eofTuples.size() == 2); //There should be an EOF tuple for each worker.
+    } finally {
+      solrClientCache.close();
+    }
+  }
+
+  @Test
+  public void testParallelShuffleStream() throws Exception {
+
+    new UpdateRequest()
+        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0")
+        .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0")
+        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
+        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
+        .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1")
+        .add(id, "5", "a_s", "hello1", "a_i", "10", "a_f", "1")
+        .add(id, "6", "a_s", "hello1", "a_i", "11", "a_f", "5")
+        .add(id, "7", "a_s", "hello1", "a_i", "12", "a_f", "5")
+        .add(id, "8", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "9", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "10", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "11", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "12", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "13", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "14", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "15", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "16", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "17", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "18", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "19", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "20", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "21", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "22", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "23", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "24", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "25", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "26", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "27", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "28", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "29", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "30", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "31", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "32", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "33", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "34", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "35", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "36", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "37", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "38", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "39", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "40", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "41", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "42", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "43", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "44", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "45", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "46", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "47", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "48", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "49", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "50", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "51", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "52", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "53", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "54", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "55", "a_s", "hello1", "a_i", "13", "a_f", "4")
+        .add(id, "56", "a_s", "hello1", "a_i", "13", "a_f", "1000")
+
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    StreamContext streamContext = new StreamContext();
+    SolrClientCache solrClientCache = new SolrClientCache();
+    streamContext.setSolrClientCache(solrClientCache);
+
+    String zkHost = cluster.getZkServer().getZkAddress();
+    StreamFactory streamFactory = new StreamFactory().withCollectionZkHost(COLLECTIONORALIAS, zkHost)
+        .withFunctionName("shuffle", ShuffleStream.class)
+        .withFunctionName("unique", UniqueStream.class)
+        .withFunctionName("parallel", ParallelStream.class);
+
+    try {
+      ParallelStream pstream = (ParallelStream) streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", unique(shuffle(collection1, q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\", partitionKeys=\"a_f\"), over=\"a_f\"), workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"a_f asc\")");
+      pstream.setStreamFactory(streamFactory);
+      pstream.setStreamContext(streamContext);
+      List<Tuple> tuples = getTuples(pstream);
+      assert (tuples.size() == 6);
+      assertOrder(tuples, 0, 1, 3, 4, 6, 56);
+
+      //Test the eofTuples
+
+      Map<String, Tuple> eofTuples = pstream.getEofTuples();
+      assert (eofTuples.size() == 2); //There should be an EOF tuple for each worker.
+      assert (pstream.toExpression(streamFactory).toString().contains("shuffle"));
+    } finally {
+      solrClientCache.close();
+    }
+  }
+
+  @Test
+  public void testParallelReducerStream() throws Exception {
+
+    new UpdateRequest()
+        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1")
+        .add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2")
+        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
+        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
+        .add(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5")
+        .add(id, "5", "a_s", "hello3", "a_i", "10", "a_f", "6")
+        .add(id, "6", "a_s", "hello4", "a_i", "11", "a_f", "7")
+        .add(id, "7", "a_s", "hello3", "a_i", "12", "a_f", "8")
+        .add(id, "8", "a_s", "hello3", "a_i", "13", "a_f", "9")
+        .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    StreamContext streamContext = new StreamContext();
+    SolrClientCache solrClientCache = new SolrClientCache();
+    streamContext.setSolrClientCache(solrClientCache);
+
+
+    String zkHost = cluster.getZkServer().getZkAddress();
+    StreamFactory streamFactory = new StreamFactory().withCollectionZkHost(COLLECTIONORALIAS, zkHost)
+        .withFunctionName("search", CloudSolrStream.class)
+        .withFunctionName("group", GroupOperation.class)
+        .withFunctionName("reduce", ReducerStream.class)
+        .withFunctionName("parallel", ParallelStream.class);
+
+
+    try {
+      ParallelStream pstream = (ParallelStream) streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", " +
+          "reduce(" +
+          "search(" + COLLECTIONORALIAS + ", q=\"*:*\", fl=\"id,a_s,a_i,a_f\", sort=\"a_s asc,a_f asc\", partitionKeys=\"a_s\"), " +
+          "by=\"a_s\"," +
+          "group(sort=\"a_i asc\", n=\"5\")), " +
+          "workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"a_s asc\")");
+
+      pstream.setStreamContext(streamContext);
+
+      List<Tuple> tuples = getTuples(pstream);
+
+      assert (tuples.size() == 3);
+
+      Tuple t0 = tuples.get(0);
+      List<Map> maps0 = t0.getMaps("group");
+      assertMaps(maps0, 0, 1, 2, 9);
+
+      Tuple t1 = tuples.get(1);
+      List<Map> maps1 = t1.getMaps("group");
+      assertMaps(maps1, 3, 5, 7, 8);
+
+      Tuple t2 = tuples.get(2);
+      List<Map> maps2 = t2.getMaps("group");
+      assertMaps(maps2, 4, 6);
+
+
+      pstream = (ParallelStream) streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", " +
+          "reduce(" +
+          "search(" + COLLECTIONORALIAS + ", q=\"*:*\", fl=\"id,a_s,a_i,a_f\", sort=\"a_s desc,a_f asc\", partitionKeys=\"a_s\"), " +
+          "by=\"a_s\", " +
+          "group(sort=\"a_i desc\", n=\"5\"))," +
+          "workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"a_s desc\")");
+
+      pstream.setStreamContext(streamContext);
+      tuples = getTuples(pstream);
+
+      assert (tuples.size() == 3);
+
+      t0 = tuples.get(0);
+      maps0 = t0.getMaps("group");
+      assertMaps(maps0, 6, 4);
+
+
+      t1 = tuples.get(1);
+      maps1 = t1.getMaps("group");
+      assertMaps(maps1, 8, 7, 5, 3);
+
+
+      t2 = tuples.get(2);
+      maps2 = t2.getMaps("group");
+      assertMaps(maps2, 9, 2, 1, 0);
+    } finally {
+      solrClientCache.close();
+    }
+
+  }
+
+  @Test
+  public void testParallelRankStream() throws Exception {
+
+    new UpdateRequest()
+        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0")
+        .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0")
+        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
+        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
+        .add(id, "5", "a_s", "hello1", "a_i", "5", "a_f", "1")
+        .add(id, "6", "a_s", "hello1", "a_i", "6", "a_f", "1")
+        .add(id, "7", "a_s", "hello1", "a_i", "7", "a_f", "1")
+        .add(id, "8", "a_s", "hello1", "a_i", "8", "a_f", "1")
+        .add(id, "9", "a_s", "hello1", "a_i", "9", "a_f", "1")
+        .add(id, "10", "a_s", "hello1", "a_i", "10", "a_f", "1")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    String zkHost = cluster.getZkServer().getZkAddress();
+    StreamFactory streamFactory = new StreamFactory().withCollectionZkHost(COLLECTIONORALIAS, zkHost)
+        .withFunctionName("search", CloudSolrStream.class)
+        .withFunctionName("unique", UniqueStream.class)
+        .withFunctionName("top", RankStream.class)
+        .withFunctionName("group", ReducerStream.class)
+        .withFunctionName("parallel", ParallelStream.class);
+
+    StreamContext streamContext = new StreamContext();
+    SolrClientCache solrClientCache = new SolrClientCache();
+    streamContext.setSolrClientCache(solrClientCache);
+    try {
+      ParallelStream pstream = (ParallelStream) streamFactory.constructStream("parallel("
+          + COLLECTIONORALIAS + ", "
+          + "top("
+          + "search(" + COLLECTIONORALIAS + ", q=\"*:*\", fl=\"id,a_s,a_i\", sort=\"a_i asc\", partitionKeys=\"a_i\"), "
+          + "n=\"11\", "
+          + "sort=\"a_i desc\"), workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"a_i desc\")");
+      pstream.setStreamContext(streamContext);
+      List<Tuple> tuples = getTuples(pstream);
+
+      assert (tuples.size() == 10);
+      assertOrder(tuples, 10, 9, 8, 7, 6, 5, 4, 3, 2, 0);
+    } finally {
+      solrClientCache.close();
+    }
+
+  }
+
+  @Test
+  public void testParallelMergeStream() throws Exception {
+
+    new UpdateRequest()
+        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0")
+        .add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0")
+        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
+        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
+        .add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1")
+        .add(id, "5", "a_s", "hello0", "a_i", "10", "a_f", "0")
+        .add(id, "6", "a_s", "hello2", "a_i", "8", "a_f", "0")
+        .add(id, "7", "a_s", "hello3", "a_i", "7", "a_f", "3")
+        .add(id, "8", "a_s", "hello4", "a_i", "11", "a_f", "4")
+        .add(id, "9", "a_s", "hello1", "a_i", "100", "a_f", "1")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    String zkHost = cluster.getZkServer().getZkAddress();
+    StreamFactory streamFactory = new StreamFactory().withCollectionZkHost(COLLECTIONORALIAS, zkHost)
+        .withFunctionName("search", CloudSolrStream.class)
+        .withFunctionName("unique", UniqueStream.class)
+        .withFunctionName("top", RankStream.class)
+        .withFunctionName("group", ReducerStream.class)
+        .withFunctionName("merge", MergeStream.class)
+        .withFunctionName("parallel", ParallelStream.class);
+
+    StreamContext streamContext = new StreamContext();
+    SolrClientCache solrClientCache = new SolrClientCache();
+    streamContext.setSolrClientCache(solrClientCache);
+    try {
+      //Test ascending
+      ParallelStream pstream = (ParallelStream) streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", merge(search(" + COLLECTIONORALIAS + ", q=\"id:(4 1 8 7 9)\", fl=\"id,a_s,a_i\", sort=\"a_i asc\", partitionKeys=\"a_i\"), search(" + COLLECTIONORALIAS + ", q=\"id:(0 2 3 6)\", fl=\"id,a_s,a_i\", sort=\"a_i asc\", partitionKeys=\"a_i\"), on=\"a_i asc\"), workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"a_i asc\")");
+      pstream.setStreamContext(streamContext);
+      List<Tuple> tuples = getTuples(pstream);
+
+      assert (tuples.size() == 9);
+      assertOrder(tuples, 0, 1, 2, 3, 4, 7, 6, 8, 9);
+
+      //Test descending
+
+      pstream = (ParallelStream) streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", merge(search(" + COLLECTIONORALIAS + ", q=\"id:(4 1 8 9)\", fl=\"id,a_s,a_i\", sort=\"a_i desc\", partitionKeys=\"a_i\"), search(" + COLLECTIONORALIAS + ", q=\"id:(0 2 3 6)\", fl=\"id,a_s,a_i\", sort=\"a_i desc\", partitionKeys=\"a_i\"), on=\"a_i desc\"), workers=\"2\", zkHost=\"" + zkHost + "\", sort=\"a_i desc\")");
+      pstream.setStreamContext(streamContext);
+      tuples = getTuples(pstream);
+
+      assert (tuples.size() == 8);
+      assertOrder(tuples, 9, 8, 6, 4, 3, 2, 1, 0);
+    } finally {
+      solrClientCache.close();
+    }
+
+  }
+
+  @Test
+  public void testParallelRollupStream() throws Exception {
+
+    new UpdateRequest()
+        .add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1")
+        .add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2")
+        .add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
+        .add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
+        .add(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5")
+        .add(id, "5", "a_s", "hello3", "a_i", "10", "a_f", "6")
+        .add(id, "6", "a_s", "hello4", "a_i", "11", "a_f", "7")
+        .add(id, "7", "a_s", "hello3", "a_i", "12", "a_f", "8")
+        .add(id, "8", "a_s", "hello3", "a_i", "13", "a_f", "9")
+        .add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    StreamFactory factory = new StreamFactory()
+      .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
+      .withFunctionName("search", CloudSolrStream.class)
+      .withFunctionName("parallel", ParallelStream.class)
+      .withFunctionName("rollup", RollupStream.class)
+      .withFunctionName("sum", SumMetric.class)
+      .withFunctionName("min", MinMetric.class)
+      .withFunctionName("max", MaxMetric.class)
+      .withFunctionName("avg", MeanMetric.class)
+      .withFunctionName("count", CountMetric.class);
+
+
+    StreamContext streamContext = new StreamContext();
+    SolrClientCache solrClientCache = new SolrClientCache();
+    streamContext.setSolrClientCache(solrClientCache);
+
+    StreamExpression expression;
+    TupleStream stream;
+    List<Tuple> tuples;
+
+    try {
+      expression = StreamExpressionParser.parse("parallel(" + COLLECTIONORALIAS + ","
+              + "rollup("
+              + "search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"a_s,a_i,a_f\", sort=\"a_s asc\", partitionKeys=\"a_s\"),"
+              + "over=\"a_s\","
+              + "sum(a_i),"
+              + "sum(a_f),"
+              + "min(a_i),"
+              + "min(a_f),"
+              + "max(a_i),"
+              + "max(a_f),"
+              + "avg(a_i),"
+              + "avg(a_f),"
+              + "count(*)"
+              + "),"
+              + "workers=\"2\", zkHost=\"" + cluster.getZkServer().getZkAddress() + "\", sort=\"a_s asc\")"
+      );
+
+
+      stream = factory.constructStream(expression);
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+
+      assert (tuples.size() == 3);
+
+      //Test Long and Double Sums
+
+      Tuple tuple = tuples.get(0);
+      String bucket = tuple.getString("a_s");
+      Double sumi = tuple.getDouble("sum(a_i)");
+      Double sumf = tuple.getDouble("sum(a_f)");
+      Double mini = tuple.getDouble("min(a_i)");
+      Double minf = tuple.getDouble("min(a_f)");
+      Double maxi = tuple.getDouble("max(a_i)");
+      Double maxf = tuple.getDouble("max(a_f)");
+      Double avgi = tuple.getDouble("avg(a_i)");
+      Double avgf = tuple.getDouble("avg(a_f)");
+      Double count = tuple.getDouble("count(*)");
+
+      assertTrue(bucket.equals("hello0"));
+      assertTrue(sumi.doubleValue() == 17.0D);
+      assertTrue(sumf.doubleValue() == 18.0D);
+      assertTrue(mini.doubleValue() == 0.0D);
+      assertTrue(minf.doubleValue() == 1.0D);
+      assertTrue(maxi.doubleValue() == 14.0D);
+      assertTrue(maxf.doubleValue() == 10.0D);
+      assertTrue(avgi.doubleValue() == 4.25D);
+      assertTrue(avgf.doubleValue() == 4.5D);
+      assertTrue(count.doubleValue() == 4);
+
+      tuple = tuples.get(1);
+      bucket = tuple.getString("a_s");
+      sumi = tuple.getDouble("sum(a_i)");
+      sumf = tuple.getDouble("sum(a_f)");
+      mini = tuple.getDouble("min(a_i)");
+      minf = tuple.getDouble("min(a_f)");
+      maxi = tuple.getDouble("max(a_i)");
+      maxf = tuple.getDouble("max(a_f)");
+      avgi = tuple.getDouble("avg(a_i)");
+      avgf = tuple.getDouble("avg(a_f)");
+      count = tuple.getDouble("count(*)");
+
+      assertTrue(bucket.equals("hello3"));
+      assertTrue(sumi.doubleValue() == 38.0D);
+      assertTrue(sumf.doubleValue() == 26.0D);
+      assertTrue(mini.doubleValue() == 3.0D);
+      assertTrue(minf.doubleValue() == 3.0D);
+      assertTrue(maxi.doubleValue() == 13.0D);
+      assertTrue(maxf.doubleValue() == 9.0D);
+      assertTrue(avgi.doubleValue() == 9.5D);
+      assertTrue(avgf.doubleValue() == 6.5D);
+      assertTrue(count.doubleValue() == 4);
+
+      tuple = tuples.get(2);
+      bucket = tuple.getString("a_s");
+      sumi = tuple.getDouble("sum(a_i)");
+      sumf = tuple.getDouble("sum(a_f)");
+      mini = tuple.getDouble("min(a_i)");
+      minf = tuple.getDouble("min(a_f)");
+      maxi = tuple.getDouble("max(a_i)");
+      maxf = tuple.getDouble("max(a_f)");
+      avgi = tuple.getDouble("avg(a_i)");
+      avgf = tuple.getDouble("avg(a_f)");
+      count = tuple.getDouble("count(*)");
+
+      assertTrue(bucket.equals("hello4"));
+      assertTrue(sumi.longValue() == 15);
+      assertTrue(sumf.doubleValue() == 11.0D);
+      assertTrue(mini.doubleValue() == 4.0D);
+      assertTrue(minf.doubleValue() == 4.0D);
+      assertTrue(maxi.doubleValue() == 11.0D);
+      assertTrue(maxf.doubleValue() == 7.0D);
+      assertTrue(avgi.doubleValue() == 7.5D);
+      assertTrue(avgf.doubleValue() == 5.5D);
+      assertTrue(count.doubleValue() == 2);
+    } finally {
+      solrClientCache.close();
+    }
+  }
+
+  @Test
+  public void testInnerJoinStream() throws Exception {
+
+    new UpdateRequest()
+        .add(id, "1", "side_s", "left", "join1_i", "0", "join2_s", "a", "ident_s", "left_1") // 8, 9
+        .add(id, "15", "side_s", "left", "join1_i", "0", "join2_s", "a", "ident_s", "left_1") // 8, 9
+        .add(id, "2", "side_s", "left", "join1_i", "0", "join2_s", "b", "ident_s", "left_2")
+        .add(id, "3", "side_s", "left", "join1_i", "1", "join2_s", "a", "ident_s", "left_3") // 10
+        .add(id, "4", "side_s", "left", "join1_i", "1", "join2_s", "b", "ident_s", "left_4") // 11
+        .add(id, "5", "side_s", "left", "join1_i", "1", "join2_s", "c", "ident_s", "left_5") // 12
+        .add(id, "6", "side_s", "left", "join1_i", "2", "join2_s", "d", "ident_s", "left_6")
+        .add(id, "7", "side_s", "left", "join1_i", "3", "join2_s", "e", "ident_s", "left_7") // 14
+
+        .add(id, "8", "side_s", "right", "join1_i", "0", "join2_s", "a", "ident_s", "right_1", "join3_i", "0") // 1,15
+        .add(id, "9", "side_s", "right", "join1_i", "0", "join2_s", "a", "ident_s", "right_2", "join3_i", "0") // 1,15
+        .add(id, "10", "side_s", "right", "join1_i", "1", "join2_s", "a", "ident_s", "right_3", "join3_i", "1") // 3
+        .add(id, "11", "side_s", "right", "join1_i", "1", "join2_s", "b", "ident_s", "right_4", "join3_i", "1") // 4
+        .add(id, "12", "side_s", "right", "join1_i", "1", "join2_s", "c", "ident_s", "right_5", "join3_i", "1") // 5
+        .add(id, "13", "side_s", "right", "join1_i", "2", "join2_s", "dad", "ident_s", "right_6", "join3_i", "2")
+        .add(id, "14", "side_s", "right", "join1_i", "3", "join2_s", "e", "ident_s", "right_7", "join3_i", "3") // 7
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    StreamExpression expression;
+    TupleStream stream;
+    List<Tuple> tuples;
+    StreamContext streamContext = new StreamContext();
+    SolrClientCache solrClientCache = new SolrClientCache();
+    streamContext.setSolrClientCache(solrClientCache);
+    
+    StreamFactory factory = new StreamFactory()
+      .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
+      .withFunctionName("search", CloudSolrStream.class)
+      .withFunctionName("innerJoin", InnerJoinStream.class);
+
+    try {
+      // Basic test
+      expression = StreamExpressionParser.parse("innerJoin("
+          + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\"),"
+          + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc\"),"
+          + "on=\"join1_i=join1_i, join2_s=join2_s\")");
+      stream = new InnerJoinStream(expression, factory);
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+      assert (tuples.size() == 8);
+      assertOrder(tuples, 1, 1, 15, 15, 3, 4, 5, 7);
+
+      // Basic desc
+      expression = StreamExpressionParser.parse("innerJoin("
+          + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\"),"
+          + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\"),"
+          + "on=\"join1_i=join1_i, join2_s=join2_s\")");
+      stream = new InnerJoinStream(expression, factory);
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+      assert (tuples.size() == 8);
+      assertOrder(tuples, 7, 3, 4, 5, 1, 1, 15, 15);
+
+      // Results in both searches, no join matches
+      expression = StreamExpressionParser.parse("innerJoin("
+          + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\"),"
+          + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\", aliases=\"id=right.id, join1_i=right.join1_i, join2_s=right.join2_s, ident_s=right.ident_s\"),"
+          + "on=\"ident_s=right.ident_s\")");
+      stream = new InnerJoinStream(expression, factory);
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+      assert (tuples.size() == 0);
+
+      // Differing field names
+      expression = StreamExpressionParser.parse("innerJoin("
+          + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\"),"
+          + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"join3_i,join2_s,ident_s\", sort=\"join3_i asc, join2_s asc\", aliases=\"join3_i=aliasesField\"),"
+          + "on=\"join1_i=aliasesField, join2_s=join2_s\")");
+      stream = new InnerJoinStream(expression, factory);
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+
+      assert (tuples.size() == 8);
+      assertOrder(tuples, 1, 1, 15, 15, 3, 4, 5, 7);
+    } finally {
+      solrClientCache.close();
+    }
+  }
+
+  @Test
+  public void testLeftOuterJoinStream() throws Exception {
+
+    new UpdateRequest()
+        .add(id, "1", "side_s", "left", "join1_i", "0", "join2_s", "a", "ident_s", "left_1") // 8, 9
+        .add(id, "15", "side_s", "left", "join1_i", "0", "join2_s", "a", "ident_s", "left_1") // 8, 9
+        .add(id, "2", "side_s", "left", "join1_i", "0", "join2_s", "b", "ident_s", "left_2")
+        .add(id, "3", "side_s", "left", "join1_i", "1", "join2_s", "a", "ident_s", "left_3") // 10
+        .add(id, "4", "side_s", "left", "join1_i", "1", "join2_s", "b", "ident_s", "left_4") // 11
+        .add(id, "5", "side_s", "left", "join1_i", "1", "join2_s", "c", "ident_s", "left_5") // 12
+        .add(id, "6", "side_s", "left", "join1_i", "2", "join2_s", "d", "ident_s", "left_6")
+        .add(id, "7", "side_s", "left", "join1_i", "3", "join2_s", "e", "ident_s", "left_7") // 14
+
+        .add(id, "8", "side_s", "right", "join1_i", "0", "join2_s", "a", "ident_s", "right_1", "join3_i", "0") // 1,15
+        .add(id, "9", "side_s", "right", "join1_i", "0", "join2_s", "a", "ident_s", "right_2", "join3_i", "0") // 1,15
+        .add(id, "10", "side_s", "right", "join1_i", "1", "join2_s", "a", "ident_s", "right_3", "join3_i", "1") // 3
+        .add(id, "11", "side_s", "right", "join1_i", "1", "join2_s", "b", "ident_s", "right_4", "join3_i", "1") // 4
+        .add(id, "12", "side_s", "right", "join1_i", "1", "join2_s", "c", "ident_s", "right_5", "join3_i", "1") // 5
+        .add(id, "13", "side_s", "right", "join1_i", "2", "join2_s", "dad", "ident_s", "right_6", "join3_i", "2")
+        .add(id, "14", "side_s", "right", "join1_i", "3", "join2_s", "e", "ident_s", "right_7", "join3_i", "3") // 7
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    StreamExpression expression;
+    TupleStream stream;
+    List<Tuple> tuples;
+    StreamContext streamContext = new StreamContext();
+    SolrClientCache solrClientCache = new SolrClientCache();
+    streamContext.setSolrClientCache(solrClientCache);
+    
+    StreamFactory factory = new StreamFactory()
+      .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
+      .withFunctionName("search", CloudSolrStream.class)
+      .withFunctionName("leftOuterJoin", LeftOuterJoinStream.class);
+    
+    // Basic test
+    try {
+      expression = StreamExpressionParser.parse("leftOuterJoin("
+          + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\"),"
+          + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc\"),"
+          + "on=\"join1_i=join1_i, join2_s=join2_s\")");
+      stream = new LeftOuterJoinStream(expression, factory);
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+      assert (tuples.size() == 10);
+      assertOrder(tuples, 1, 1, 15, 15, 2, 3, 4, 5, 6, 7);
+
+      // Basic desc
+      expression = StreamExpressionParser.parse("leftOuterJoin("
+          + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\"),"
+          + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\"),"
+          + "on=\"join1_i=join1_i, join2_s=join2_s\")");
+      stream = new LeftOuterJoinStream(expression, factory);
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+      assert (tuples.size() == 10);
+      assertOrder(tuples, 7, 6, 3, 4, 5, 1, 1, 15, 15, 2);
+
+      // Results in both searches, no join matches
+      expression = StreamExpressionParser.parse("leftOuterJoin("
+          + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\"),"
+          + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\", aliases=\"id=right.id, join1_i=right.join1_i, join2_s=right.join2_s, ident_s=right.ident_s\"),"
+          + "on=\"ident_s=right.ident_s\")");
+      stream = new LeftOuterJoinStream(expression, factory);
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+      assert (tuples.size() == 8);
+      assertOrder(tuples, 1, 15, 2, 3, 4, 5, 6, 7);
+
+      // Differing field names
+      expression = StreamExpressionParser.parse("leftOuterJoin("
+          + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\"),"
+          + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"join3_i,join2_s,ident_s\", sort=\"join3_i asc, join2_s asc\", aliases=\"join3_i=aliasesField\"),"
+          + "on=\"join1_i=aliasesField, join2_s=join2_s\")");
+      stream = new LeftOuterJoinStream(expression, factory);
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+      assert (tuples.size() == 10);
+      assertOrder(tuples, 1, 1, 15, 15, 2, 3, 4, 5, 6, 7);
+    } finally {
+      solrClientCache.close();
+    }
+  }
+
+  @Test
+  public void testHashJoinStream() throws Exception {
+
+    new UpdateRequest()
+        .add(id, "1", "side_s", "left", "join1_i", "0", "join2_s", "a", "ident_s", "left_1") // 8, 9
+        .add(id, "15", "side_s", "left", "join1_i", "0", "join2_s", "a", "ident_s", "left_1") // 8, 9
+        .add(id, "2", "side_s", "left", "join1_i", "0", "join2_s", "b", "ident_s", "left_2")
+        .add(id, "3", "side_s", "left", "join1_i", "1", "join2_s", "a", "ident_s", "left_3") // 10
+        .add(id, "4", "side_s", "left", "join1_i", "1", "join2_s", "b", "ident_s", "left_4") // 11
+        .add(id, "5", "side_s", "left", "join1_i", "1", "join2_s", "c", "ident_s", "left_5") // 12
+        .add(id, "6", "side_s", "left", "join1_i", "2", "join2_s", "d", "ident_s", "left_6")
+        .add(id, "7", "side_s", "left", "join1_i", "3", "join2_s", "e", "ident_s", "left_7") // 14
+
+        .add(id, "8", "side_s", "right", "join1_i", "0", "join2_s", "a", "ident_s", "right_1", "join3_i", "0") // 1,15
+        .add(id, "9", "side_s", "right", "join1_i", "0", "join2_s", "a", "ident_s", "right_2", "join3_i", "0") // 1,15
+        .add(id, "10", "side_s", "right", "join1_i", "1", "join2_s", "a", "ident_s", "right_3", "join3_i", "1") // 3
+        .add(id, "11", "side_s", "right", "join1_i", "1", "join2_s", "b", "ident_s", "right_4", "join3_i", "1") // 4
+        .add(id, "12", "side_s", "right", "join1_i", "1", "join2_s", "c", "ident_s", "right_5", "join3_i", "1") // 5
+        .add(id, "13", "side_s", "right", "join1_i", "2", "join2_s", "dad", "ident_s", "right_6", "join3_i", "2")
+        .add(id, "14", "side_s", "right", "join1_i", "3", "join2_s", "e", "ident_s", "right_7", "join3_i", "3") // 7
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    StreamExpression expression;
+    TupleStream stream;
+    List<Tuple> tuples;
+    StreamContext streamContext = new StreamContext();
+    SolrClientCache solrClientCache = new SolrClientCache();
+    streamContext.setSolrClientCache(solrClientCache);
+    
+    StreamFactory factory = new StreamFactory()
+      .withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
+      .withFunctionName("search", CloudSolrStream.class)
+      .withFunctionName("hashJoin", HashJoinStream.class);
+    try {
+      // Basic test
+      expression = StreamExpressionParser.parse("hashJoin("
+          + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc, id asc\"),"
+          + "hashed=search(collection1, q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i asc, join2_s asc\"),"
+          + "on=\"join1_i, join2_s\")");
+      stream = new HashJoinStream(expression, factory);
+      stream.setStreamContext(streamContext);
+      tuples = getTuples(stream);
+      assert (tuples.size() == 8);
+      assertOrder(tuples, 1, 1, 15, 15, 3, 4, 5, 7);
+
+      // Basic desc
+      expression = StreamExpressionParser.parse("hashJoin("
+          + "search(collection1, q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\"),"
+          + "hashed=search(collection1, q=\"side_s:right\", fl=\"join1_i,join2_s,ident_s\", sort=\"join1_i desc, join2_s asc\"),"
+          + "on=\"join1_i, join2_s\")");
+      stream = new HashJoinStream(expression, factory);
+      stream.setStreamContext(streamContext);
+      tuples = getTuples

<TRUNCATED>

[16/50] lucene-solr:jira/solr-12181: Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/lucene-solr

Posted by ab...@apache.org.
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/lucene-solr


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/27f47726
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/27f47726
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/27f47726

Branch: refs/heads/jira/solr-12181
Commit: 27f477267807d4d1e150246c8fb1e961d10b8d69
Parents: 9b03f8c 8b3fc53
Author: Karl Wright <Da...@gmail.com>
Authored: Thu Apr 5 12:07:24 2018 -0400
Committer: Karl Wright <Da...@gmail.com>
Committed: Thu Apr 5 12:07:24 2018 -0400

----------------------------------------------------------------------
 solr/solr-ref-guide/src/highlighting.adoc | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------



[06/50] lucene-solr:jira/solr-12181: SOLR-11929: UpdateLog metrics are not initialized on core reload

Posted by ab...@apache.org.
SOLR-11929: UpdateLog metrics are not initialized on core reload


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/8e276b90
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/8e276b90
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/8e276b90

Branch: refs/heads/jira/solr-12181
Commit: 8e276b90f520df771d8a1e60408fe112c40ceea4
Parents: 2bbd193
Author: Steve Rowe <sa...@apache.org>
Authored: Wed Apr 4 11:18:10 2018 -0400
Committer: Steve Rowe <sa...@apache.org>
Committed: Wed Apr 4 11:19:21 2018 -0400

----------------------------------------------------------------------
 solr/CHANGES.txt                                         | 4 +++-
 solr/core/src/java/org/apache/solr/update/UpdateLog.java | 7 +++----
 2 files changed, 6 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e276b90/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 091ce94..6a41c8d 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -107,7 +107,9 @@ Bug Fixes
   (Susheel Kumar, Aibao Luo, Nikkolay Martinov via Erick Erickson)
 
 * SOLR-12172: Fixed race condition that could cause an invalid set of collection properties to be kept in
-  memory when multiple collection property changes are done in a short period of time. (Tomás Fernández Löbbe) 
+  memory when multiple collection property changes are done in a short period of time. (Tomás Fernández Löbbe)
+  
+* SOLR-11929: UpdateLog metrics are not initialized on core reload.  (ab, Steve Rowe) 
 
 Optimizations
 ----------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e276b90/solr/core/src/java/org/apache/solr/update/UpdateLog.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/UpdateLog.java b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
index 4ca4bf2..fbdf616 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateLog.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
@@ -347,13 +347,12 @@ public class UpdateLog implements PluginInfoInitialized, SolrMetricProducer {
     this.uhandler = uhandler;
 
     if (dataDir.equals(lastDataDir)) {
+      versionInfo.reload();
+      core.getCoreMetricManager().registerMetricProducer(SolrInfoBean.Category.TLOG.toString(), this);
+
       if (debug) {
         log.debug("UpdateHandler init: tlogDir=" + tlogDir + ", next id=" + id, " this is a reopen... nothing else to do.");
       }
-
-      versionInfo.reload();
-
-      // on a normal reopen, we currently shouldn't have to do anything
       return;
     }
     lastDataDir = dataDir;


[25/50] lucene-solr:jira/solr-12181: SOLR-12183: Refactor Streaming Expression test cases

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/80375acb/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
new file mode 100644
index 0000000..41116e2
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
@@ -0,0 +1,4145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.stream;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.io.SolrClientCache;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.cloud.AbstractDistribZkTestBase;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+@Slow
+@LuceneTestCase.SuppressCodecs({"Lucene3x", "Lucene40","Lucene41","Lucene42","Lucene45"})
+public class MathExpressionTest extends SolrCloudTestCase {
+
+  private static final String COLLECTIONORALIAS = "collection1";
+  private static final int TIMEOUT = DEFAULT_TIMEOUT;
+  private static final String id = "id";
+
+  private static boolean useAlias;
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(4)
+        .addConfig("conf", getFile("solrj").toPath().resolve("solr").resolve("configsets").resolve("streaming").resolve("conf"))
+        .addConfig("ml", getFile("solrj").toPath().resolve("solr").resolve("configsets").resolve("ml").resolve("conf"))
+        .configure();
+
+    String collection;
+    useAlias = random().nextBoolean();
+    if (useAlias) {
+      collection = COLLECTIONORALIAS + "_collection";
+    } else {
+      collection = COLLECTIONORALIAS;
+    }
+
+    CollectionAdminRequest.createCollection(collection, "conf", 2, 1).process(cluster.getSolrClient());
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish(collection, cluster.getSolrClient().getZkStateReader(),
+        false, true, TIMEOUT);
+    if (useAlias) {
+      CollectionAdminRequest.createAlias(COLLECTIONORALIAS, collection).process(cluster.getSolrClient());
+    }
+  }
+
+  @Before
+  public void cleanIndex() throws Exception {
+    new UpdateRequest()
+        .deleteByQuery("*:*")
+        .commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+  }
+
+    @Test
+  public void testAnalyzeEvaluator() throws Exception {
+
+    UpdateRequest updateRequest = new UpdateRequest();
+    updateRequest.add(id, "1", "test_t", "l b c d c");
+    updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+
+    SolrClientCache cache = new SolrClientCache();
+    try {
+
+      String expr = "cartesianProduct(search("+COLLECTIONORALIAS+", q=\"*:*\", fl=\"id, test_t\", sort=\"id desc\"), analyze(test_t, test_t) as test_t)";
+      ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+      paramsLoc.set("expr", expr);
+      paramsLoc.set("qt", "/stream");
+      String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+
+      SolrStream solrStream = new SolrStream(url, paramsLoc);
+
+      StreamContext context = new StreamContext();
+      solrStream.setStreamContext(context);
+      List<Tuple> tuples = getTuples(solrStream);
+      assertTrue(tuples.size() == 5);
+
+      Tuple t = tuples.get(0);
+      assertTrue(t.getString("test_t").equals("l"));
+      assertTrue(t.getString("id").equals("1"));
+
+      t = tuples.get(1);
+      assertTrue(t.getString("test_t").equals("b"));
+      assertTrue(t.getString("id").equals("1"));
+
+
+      t = tuples.get(2);
+      assertTrue(t.getString("test_t").equals("c"));
+      assertTrue(t.getString("id").equals("1"));
+
+
+      t = tuples.get(3);
+      assertTrue(t.getString("test_t").equals("d"));
+      assertTrue(t.getString("id").equals("1"));
+
+      t = tuples.get(4);
+      assertTrue(t.getString("test_t").equals("c"));
+      assertTrue(t.getString("id").equals("1"));
+
+
+      expr = "analyze(\"hello world\", test_t)";
+      paramsLoc = new ModifiableSolrParams();
+      paramsLoc.set("expr", expr);
+      paramsLoc.set("qt", "/stream");
+
+      solrStream = new SolrStream(url, paramsLoc);
+      context = new StreamContext();
+      solrStream.setStreamContext(context);
+      tuples = getTuples(solrStream);
+      assertEquals(tuples.size(), 1);
+      List terms = (List)tuples.get(0).get("return-value");
+      assertTrue(terms.get(0).equals("hello"));
+      assertTrue(terms.get(1).equals("world"));
+
+      //Try with single param
+      expr = "cartesianProduct(search("+COLLECTIONORALIAS+", q=\"*:*\", fl=\"id, test_t\", sort=\"id desc\"), analyze(test_t) as test_t)";
+      paramsLoc = new ModifiableSolrParams();
+      paramsLoc.set("expr", expr);
+      paramsLoc.set("qt", "/stream");
+
+      solrStream = new SolrStream(url, paramsLoc);
+
+      context = new StreamContext();
+      solrStream.setStreamContext(context);
+      tuples = getTuples(solrStream);
+      assertTrue(tuples.size() == 5);
+
+      t = tuples.get(0);
+      assertTrue(t.getString("test_t").equals("l"));
+      assertTrue(t.getString("id").equals("1"));
+
+      t = tuples.get(1);
+      assertTrue(t.getString("test_t").equals("b"));
+      assertTrue(t.getString("id").equals("1"));
+
+
+      t = tuples.get(2);
+      assertTrue(t.getString("test_t").equals("c"));
+      assertTrue(t.getString("id").equals("1"));
+
+
+      t = tuples.get(3);
+      assertTrue(t.getString("test_t").equals("d"));
+      assertTrue(t.getString("id").equals("1"));
+
+      t = tuples.get(4);
+      assertTrue(t.getString("test_t").equals("c"));
+      assertTrue(t.getString("id").equals("1"));
+
+      //Try with null in the test_t field
+      expr = "cartesianProduct(search("+COLLECTIONORALIAS+", q=\"*:*\", fl=\"id\", sort=\"id desc\"), analyze(test_t, test_t) as test_t)";
+      paramsLoc = new ModifiableSolrParams();
+      paramsLoc.set("expr", expr);
+      paramsLoc.set("qt", "/stream");
+
+      solrStream = new SolrStream(url, paramsLoc);
+
+      context = new StreamContext();
+      solrStream.setStreamContext(context);
+      tuples = getTuples(solrStream);
+      assertTrue(tuples.size() == 1);
+
+      //Test annotating tuple
+      expr = "select(search("+COLLECTIONORALIAS+", q=\"*:*\", fl=\"id, test_t\", sort=\"id desc\"), analyze(test_t, test_t) as test1_t)";
+      paramsLoc = new ModifiableSolrParams();
+      paramsLoc.set("expr", expr);
+      paramsLoc.set("qt", "/stream");
+
+      solrStream = new SolrStream(url, paramsLoc);
+
+      context = new StreamContext();
+      solrStream.setStreamContext(context);
+      tuples = getTuples(solrStream);
+      assertTrue(tuples.size() == 1);
+      List l = (List)tuples.get(0).get("test1_t");
+      assertTrue(l.get(0).equals("l"));
+      assertTrue(l.get(1).equals("b"));
+      assertTrue(l.get(2).equals("c"));
+      assertTrue(l.get(3).equals("d"));
+      assertTrue(l.get(4).equals("c"));
+    } finally {
+      cache.close();
+    }
+  }
+
+  @Test
+  public void testHist() throws Exception {
+    String expr = "hist(sequence(100, 0, 1), 10)";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", expr);
+    paramsLoc.set("qt", "/stream");
+
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<Map> hist = (List<Map>)tuples.get(0).get("return-value");
+    assertTrue(hist.size() == 10);
+    for(int i=0; i<hist.size(); i++) {
+      Map stats = hist.get(i);
+      assertTrue(((Number)stats.get("N")).intValue() == 10);
+      assertTrue(((Number)stats.get("min")).intValue() == 10*i);
+      assertTrue(((Number)stats.get("var")).doubleValue() == 9.166666666666666);
+      assertTrue(((Number)stats.get("stdev")).doubleValue() == 3.0276503540974917);
+    }
+
+    expr = "hist(sequence(100, 0, 1), 5)";
+    paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", expr);
+    paramsLoc.set("qt", "/stream");
+
+    solrStream = new SolrStream(url, paramsLoc);
+    solrStream.setStreamContext(context);
+    tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    hist = (List<Map>)tuples.get(0).get("return-value");
+    assertTrue(hist.size() == 5);
+    for(int i=0; i<hist.size(); i++) {
+      Map stats = hist.get(i);
+      assertTrue(((Number)stats.get("N")).intValue() == 20);
+      assertTrue(((Number)stats.get("min")).intValue() == 20*i);
+      assertTrue(((Number)stats.get("var")).doubleValue() == 35);
+      assertTrue(((Number)stats.get("stdev")).doubleValue() == 5.916079783099616);
+    }
+  }
+
+  @Test
+  public void testCumulativeProbability() throws Exception {
+    String expr = "cumulativeProbability(normalDistribution(500, 40), 500)";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", expr);
+    paramsLoc.set("qt", "/stream");
+
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    Number number = (Number)tuples.get(0).get("return-value");
+    assertTrue(number.doubleValue() == .5D);
+  }
+
+  private String getDateString(String year, String month, String day) {
+    return year+"-"+month+"-"+day+"T00:00:00Z";
+  }
+
+  @Test
+  public void testCorrelationStream() throws Exception {
+    UpdateRequest updateRequest = new UpdateRequest();
+
+    int i=0;
+    while(i<50) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2016", "5", "1"), "price_f", "400.00");
+    }
+
+    while(i<100) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2015", "5", "1"), "price_f", "300.0");
+    }
+
+    while(i<150) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2014", "5", "1"), "price_f", "500.0");
+    }
+
+    while(i<250) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2013", "5", "1"), "price_f", "100.00");
+    }
+
+    updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    String expr = "timeseries("+COLLECTIONORALIAS+", q=\"*:*\", start=\"2013-01-01T01:00:00.000Z\", " +
+        "end=\"2016-12-01T01:00:00.000Z\", " +
+        "gap=\"+1YEAR\", " +
+        "field=\"test_dt\", " +
+        "count(*), sum(price_f), max(price_f), min(price_f))";
+
+    String cexpr = "let(a="+expr+", b=select("+expr+",mult(-1, count(*)) as nvalue), c=col(a, count(*)), d=col(b, nvalue), " +
+                       "tuple(corr=corr(c,d), scorr=corr(array(500, 50, 50, 50),d, type=spearmans), kcorr=corr(array(500, 50, 50, 50),d, type=kendalls), d=d))";
+
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    assertTrue(tuples.get(0).getDouble("corr").equals(-1.0D));
+    assertTrue(tuples.get(0).getDouble("scorr").equals(-1.0D));
+    assertTrue(tuples.get(0).getDouble("kcorr").equals(-1.0D));
+  }
+
+  @Test
+  public void testCovariance() throws Exception {
+    UpdateRequest updateRequest = new UpdateRequest();
+
+    int i=0;
+    while(i<50) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2016", "5", "1"), "price_f", "400.00");
+    }
+
+    while(i<100) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2015", "5", "1"), "price_f", "300.0");
+    }
+
+    while(i<150) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2014", "5", "1"), "price_f", "500.0");
+    }
+
+    while(i<250) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2013", "5", "1"), "price_f", "100.00");
+    }
+
+    updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    String expr = "timeseries("+COLLECTIONORALIAS+", q=\"*:*\", start=\"2013-01-01T01:00:00.000Z\", " +
+        "end=\"2016-12-01T01:00:00.000Z\", " +
+        "gap=\"+1YEAR\", " +
+        "field=\"test_dt\", " +
+        "count(*), sum(price_f), max(price_f), min(price_f))";
+
+    String cexpr = "let(a="+expr+", b=select("+expr+",mult(-1, count(*)) as nvalue), c=col(a, count(*)), d=col(b, nvalue), tuple(colc=c, cold=d, cov=cov(c,d)))";
+
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    assertTrue(tuples.get(0).getDouble("cov").equals(-625.0D));
+  }
+
+  @Test
+  public void testDistance() throws Exception {
+    String cexpr = "let(echo=true, " +
+                       "a=array(1,2,3,4)," +
+                       "b=array(2,3,4,5), " +
+                       "c=array(3,4,5,6), " +
+                       "d=distance(a, b), " +
+                       "e=distance(a, c)," +
+                       "f=distance(b, c)," +
+                       "g=transpose(matrix(a, b, c))," +
+                       "h=distance(g)," +
+                       "i=distance(a, b, type=manhattan), " +
+                       "j=distance(a, c, type=manhattan)," +
+                       "k=distance(b, c, type=manhattan)," +
+                       "l=transpose(matrix(a, b, c))," +
+                       "m=distance(l, type=manhattan)," +
+                       "n=distance(a, b, type=canberra), " +
+                       "o=distance(a, c, type=canberra)," +
+                       "p=distance(b, c, type=canberra)," +
+                       "q=transpose(matrix(a, b, c))," +
+                       "r=distance(q, type=canberra)," +
+                       "s=distance(a, b, type=earthMovers), " +
+                       "t=distance(a, c, type=earthMovers)," +
+                       "u=distance(b, c, type=earthMovers)," +
+                       "w=transpose(matrix(a, b, c))," +
+                       "x=distance(w, type=earthMovers)," +
+                       ")";
+
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    Number d = (Number)tuples.get(0).get("d");
+    assertEquals(d.doubleValue(), 2.0, 0.0);
+    Number e = (Number)tuples.get(0).get("e");
+    assertEquals(e.doubleValue(), 4.0, 0.0);
+    Number f = (Number)tuples.get(0).get("f");
+    assertEquals(f.doubleValue(), 2.0, 0.0);
+
+    List<List<Number>> h = (List<List<Number>>)tuples.get(0).get("h");
+    assertEquals(h.size(), 3);
+    assertEquals(h.get(0).size(), 3);
+    List<Number> row0 = h.get(0);
+    assertEquals(row0.get(0).doubleValue(), 0, 0);
+    assertEquals(row0.get(1).doubleValue(), 2, 0);
+    assertEquals(row0.get(2).doubleValue(), 4, 0);
+
+    List<Number> row1 = h.get(1);
+    assertEquals(row1.get(0).doubleValue(), 2, 0);
+    assertEquals(row1.get(1).doubleValue(), 0, 0);
+    assertEquals(row1.get(2).doubleValue(), 2, 0);
+
+    List<Number> row2 = h.get(2);
+    assertEquals(row2.get(0).doubleValue(), 4, 0);
+    assertEquals(row2.get(1).doubleValue(), 2, 0);
+    assertEquals(row2.get(2).doubleValue(), 0, 0);
+
+    Number i = (Number)tuples.get(0).get("i");
+    assertEquals(i.doubleValue(), 4.0, 0.0);
+    Number j = (Number)tuples.get(0).get("j");
+    assertEquals(j.doubleValue(), 8.0, 0.0);
+    Number k = (Number)tuples.get(0).get("k");
+    assertEquals(k.doubleValue(), 4.0, 0.0);
+
+    List<List<Number>> m = (List<List<Number>>)tuples.get(0).get("m");
+    assertEquals(m.size(), 3);
+    assertEquals(m.get(0).size(), 3);
+    row0 = m.get(0);
+    assertEquals(row0.get(0).doubleValue(), 0, 0);
+    assertEquals(row0.get(1).doubleValue(), 4, 0);
+    assertEquals(row0.get(2).doubleValue(), 8, 0);
+
+    row1 = m.get(1);
+    assertEquals(row1.get(0).doubleValue(), 4, 0);
+    assertEquals(row1.get(1).doubleValue(), 0, 0);
+    assertEquals(row1.get(2).doubleValue(), 4, 0);
+
+    row2 = m.get(2);
+    assertEquals(row2.get(0).doubleValue(), 8, 0);
+    assertEquals(row2.get(1).doubleValue(), 4, 0);
+    assertEquals(row2.get(2).doubleValue(), 0, 0);
+
+    Number n = (Number)tuples.get(0).get("n");
+    assertEquals(n.doubleValue(), 0.787302, 0.0001);
+    Number o = (Number)tuples.get(0).get("o");
+    assertEquals(o.doubleValue(), 1.283333, 0.0001);
+    Number p = (Number)tuples.get(0).get("p");
+    assertEquals(p.doubleValue(), 0.544877, 0.0001);
+
+    List<List<Number>> r = (List<List<Number>>)tuples.get(0).get("r");
+    assertEquals(r.size(), 3);
+    assertEquals(r.get(0).size(), 3);
+    row0 = r.get(0);
+    assertEquals(row0.get(0).doubleValue(), 0, 0);
+    assertEquals(row0.get(1).doubleValue(), 0.787302, .0001);
+    assertEquals(row0.get(2).doubleValue(), 1.283333, .0001);
+
+    row1 = r.get(1);
+    assertEquals(row1.get(0).doubleValue(), 0.787302, .0001);
+    assertEquals(row1.get(1).doubleValue(), 0, 0);
+    assertEquals(row1.get(2).doubleValue(), 0.544877, .0001);
+
+    row2 = r.get(2);
+    assertEquals(row2.get(0).doubleValue(), 1.283333, .0001);
+    assertEquals(row2.get(1).doubleValue(), 0.544877, .0001);
+    assertEquals(row2.get(2).doubleValue(), 0, 0);
+
+
+    Number s = (Number)tuples.get(0).get("s");
+    assertEquals(s.doubleValue(), 10.0, 0);
+    Number t = (Number)tuples.get(0).get("t");
+    assertEquals(t.doubleValue(), 20.0, 0);
+    Number u = (Number)tuples.get(0).get("u");
+    assertEquals(u.doubleValue(), 10.0, 0);
+
+    List<List<Number>> x = (List<List<Number>>)tuples.get(0).get("x");
+    assertEquals(x.size(), 3);
+    assertEquals(x.get(0).size(), 3);
+    row0 = x.get(0);
+    assertEquals(row0.get(0).doubleValue(), 0, 0);
+    assertEquals(row0.get(1).doubleValue(), 10.0, 0);
+    assertEquals(row0.get(2).doubleValue(), 20, 0);
+
+    row1 = x.get(1);
+    assertEquals(row1.get(0).doubleValue(), 10, 0);
+    assertEquals(row1.get(1).doubleValue(), 0, 0);
+    assertEquals(row1.get(2).doubleValue(), 10, 0);
+
+    row2 = x.get(2);
+    assertEquals(row2.get(0).doubleValue(), 20, 0);
+    assertEquals(row2.get(1).doubleValue(), 10, 0);
+    assertEquals(row2.get(2).doubleValue(), 0, 0);
+  }
+
+  @Test
+  public void testReverse() throws Exception {
+    UpdateRequest updateRequest = new UpdateRequest();
+
+    int i=0;
+    while(i<50) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2016", "5", "1"), "price_f", "400.00");
+    }
+
+    while(i<100) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2015", "5", "1"), "price_f", "300.0");
+    }
+
+    while(i<150) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2014", "5", "1"), "price_f", "500.0");
+    }
+
+    while(i<250) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2013", "5", "1"), "price_f", "100.00");
+    }
+
+    updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    String expr = "timeseries("+COLLECTIONORALIAS+", q=\"*:*\", start=\"2013-01-01T01:00:00.000Z\", " +
+        "end=\"2016-12-01T01:00:00.000Z\", " +
+        "gap=\"+1YEAR\", " +
+        "field=\"test_dt\", " +
+        "count(*), sum(price_f), max(price_f), min(price_f))";
+
+    String cexpr = "let(a="+expr+", c=col(a, max(price_f)), tuple(reverse=rev(c)))";
+
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<Number> reverse = (List<Number>)tuples.get(0).get("reverse");
+    assertTrue(reverse.size() == 4);
+    assertTrue(reverse.get(0).doubleValue() == 400D);
+    assertTrue(reverse.get(1).doubleValue() == 300D);
+    assertTrue(reverse.get(2).doubleValue() == 500D);
+    assertTrue(reverse.get(3).doubleValue() == 100D);
+  }
+
+  @Test
+  public void testCopyOf() throws Exception {
+    UpdateRequest updateRequest = new UpdateRequest();
+
+    int i=0;
+    while(i<50) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2016", "5", "1"), "price_f", "400.00");
+    }
+
+    while(i<100) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2015", "5", "1"), "price_f", "300.0");
+    }
+
+    while(i<150) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2014", "5", "1"), "price_f", "500.0");
+    }
+
+    while(i<250) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2013", "5", "1"), "price_f", "100.00");
+    }
+
+    updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    String expr = "timeseries("+COLLECTIONORALIAS+", q=\"*:*\", start=\"2013-01-01T01:00:00.000Z\", " +
+        "end=\"2016-12-01T01:00:00.000Z\", " +
+        "gap=\"+1YEAR\", " +
+        "field=\"test_dt\", " +
+        "count(*), sum(price_f), max(price_f), min(price_f))";
+
+    String cexpr = "let(a="+expr+", c=col(a, max(price_f)), tuple(copy1=copyOf(c, 10), copy2=copyOf(c), copy3=copyOf(c, 2) ))";
+
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<Number> copy1 = (List<Number>)tuples.get(0).get("copy1");
+    assertTrue(copy1.size() == 4);
+    assertTrue(copy1.get(0).doubleValue() == 100D);
+    assertTrue(copy1.get(1).doubleValue() == 500D);
+    assertTrue(copy1.get(2).doubleValue() == 300D);
+    assertTrue(copy1.get(3).doubleValue() == 400D);
+
+    List<Number> copy2 = (List<Number>)tuples.get(0).get("copy2");
+    assertTrue(copy2.size() == 4);
+    assertTrue(copy2.get(0).doubleValue() == 100D);
+    assertTrue(copy2.get(1).doubleValue() == 500D);
+    assertTrue(copy2.get(2).doubleValue() == 300D);
+    assertTrue(copy2.get(3).doubleValue() == 400D);
+
+    List<Number> copy3 = (List<Number>)tuples.get(0).get("copy3");
+    assertTrue(copy3.size() == 2);
+    assertTrue(copy3.get(0).doubleValue() == 100D);
+    assertTrue(copy3.get(1).doubleValue() == 500D);
+  }
+
+  @Test
+  public void testCopyOfRange() throws Exception {
+    UpdateRequest updateRequest = new UpdateRequest();
+
+    int i=0;
+    while(i<50) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2016", "5", "1"), "price_f", "400.00");
+    }
+
+    while(i<100) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2015", "5", "1"), "price_f", "300.0");
+    }
+
+    while(i<150) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2014", "5", "1"), "price_f", "500.0");
+    }
+
+    while(i<250) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2013", "5", "1"), "price_f", "100.00");
+    }
+
+    updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    String expr = "timeseries("+COLLECTIONORALIAS+", q=\"*:*\", start=\"2013-01-01T01:00:00.000Z\", " +
+        "end=\"2016-12-01T01:00:00.000Z\", " +
+        "gap=\"+1YEAR\", " +
+        "field=\"test_dt\", " +
+        "count(*), sum(price_f), max(price_f), min(price_f))";
+
+    String cexpr = "let(a="+expr+", c=col(a, max(price_f)), tuple(copy=copyOfRange(c, 1, 3), copy2=copyOfRange(c, 2, 4), l=length(c)))";
+
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<Number> copy1 = (List<Number>)tuples.get(0).get("copy");
+    assertTrue(copy1.size() == 2);
+    assertTrue(copy1.get(0).doubleValue() == 500D);
+    assertTrue(copy1.get(1).doubleValue() == 300D);
+
+    List<Number> copy2 = (List<Number>)tuples.get(0).get("copy2");
+    assertTrue(copy2.size() == 2);
+    assertTrue(copy2.get(0).doubleValue() == 300D);
+    assertTrue(copy2.get(1).doubleValue() == 400D);
+
+    long l = tuples.get(0).getLong("l");
+    assertTrue(l == 4);
+
+  }
+
+  @Test
+  public void testPercentile() throws Exception {
+    String cexpr = "percentile(array(1,2,3,4,5,6,7,8,9,10,11), 50)";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    Tuple tuple = tuples.get(0);
+    double p = tuple.getDouble("return-value");
+    assertEquals(p, 6, 0.0);
+
+
+    cexpr = "percentile(array(11,10,3,4,5,6,7,8,9,2,1), 50)";
+    paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+
+    solrStream = new SolrStream(url, paramsLoc);
+
+    context = new StreamContext();
+    solrStream.setStreamContext(context);
+    tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    tuple = tuples.get(0);
+    p = tuple.getDouble("return-value");
+    assertEquals(p, 6, 0.0);
+
+    cexpr = "percentile(array(11,10,3,4,5,6,7,8,9,2,1), 20)";
+    paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+
+    solrStream = new SolrStream(url, paramsLoc);
+
+    context = new StreamContext();
+    solrStream.setStreamContext(context);
+    tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    tuple = tuples.get(0);
+    p = tuple.getDouble("return-value");
+    assertEquals(p, 2.4, 0.001);
+  }
+
+  @Test
+  public void testPrimes() throws Exception {
+    String cexpr = "primes(10, 0)";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    Tuple tuple = tuples.get(0);
+    List<Number> asort = (List<Number>)tuple.get("return-value");
+    assertEquals(asort.size(), 10);
+    assertEquals(asort.get(0).intValue(), 2);
+    assertEquals(asort.get(1).intValue(), 3);
+    assertEquals(asort.get(2).intValue(), 5);
+    assertEquals(asort.get(3).intValue(), 7);
+    assertEquals(asort.get(4).intValue(), 11);
+    assertEquals(asort.get(5).intValue(), 13);
+    assertEquals(asort.get(6).intValue(), 17);
+    assertEquals(asort.get(7).intValue(), 19);
+    assertEquals(asort.get(8).intValue(), 23);
+    assertEquals(asort.get(9).intValue(), 29);
+  }
+
+  @Test
+  public void testBinomialCoefficient() throws Exception {
+    String cexpr = "binomialCoefficient(8,3)";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    Tuple tuple = tuples.get(0);
+    long binomialCoefficient = (long) tuple.get("return-value");
+    assertEquals(binomialCoefficient, 56);
+  }
+
+  @Test
+  public void testAscend() throws Exception {
+    String cexpr = "asc(array(11.5, 12.3, 4, 3, 1, 0))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    Tuple tuple = tuples.get(0);
+    List<Number> asort = (List<Number>)tuple.get("return-value");
+    assertEquals(asort.size(), 6);
+    assertEquals(asort.get(0).doubleValue(), 0, 0.0);
+    assertEquals(asort.get(1).doubleValue(), 1, 0.0);
+    assertEquals(asort.get(2).doubleValue(), 3, 0.0);
+    assertEquals(asort.get(3).doubleValue(), 4, 0.0);
+    assertEquals(asort.get(4).doubleValue(), 11.5, 0.0);
+    assertEquals(asort.get(5).doubleValue(), 12.3, 0.0);
+  }
+
+  @Test
+  public void testRankTransform() throws Exception {
+    UpdateRequest updateRequest = new UpdateRequest();
+
+    int i=0;
+    while(i<50) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2016", "5", "1"), "price_f", "400.00");
+    }
+
+    while(i<100) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2015", "5", "1"), "price_f", "300.0");
+    }
+
+    while(i<150) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2014", "5", "1"), "price_f", "500.0");
+    }
+
+    while(i<250) {
+      updateRequest.add(id, "id_"+(++i),"test_dt", getDateString("2013", "5", "1"), "price_f", "100.00");
+    }
+
+    updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+    String expr = "timeseries("+COLLECTIONORALIAS+", q=\"*:*\", start=\"2013-01-01T01:00:00.000Z\", " +
+        "end=\"2016-12-01T01:00:00.000Z\", " +
+        "gap=\"+1YEAR\", " +
+        "field=\"test_dt\", " +
+        "count(*), sum(price_f), max(price_f), min(price_f))";
+
+    String cexpr = "let(a="+expr+", c=col(a, max(price_f)), tuple(reverse=rev(c), ranked=rank(c)))";
+
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<Number> reverse = (List<Number>)tuples.get(0).get("reverse");
+    assertTrue(reverse.size() == 4);
+    assertTrue(reverse.get(0).doubleValue() == 400D);
+    assertTrue(reverse.get(1).doubleValue() == 300D);
+    assertTrue(reverse.get(2).doubleValue() == 500D);
+    assertTrue(reverse.get(3).doubleValue() == 100D);
+
+    List<Number> ranked = (List<Number>)tuples.get(0).get("ranked");
+    assertTrue(ranked.size() == 4);
+    assertTrue(ranked.get(0).doubleValue() == 1D);
+    assertTrue(ranked.get(1).doubleValue() == 4D);
+    assertTrue(ranked.get(2).doubleValue() == 2D);
+    assertTrue(ranked.get(3).doubleValue() == 3D);
+  }
+
+  @Test
+  public void testArray() throws Exception {
+    String cexpr = "array(1, 2, 3, 300, 2, 500)";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<Number> out = (List<Number>)tuples.get(0).get("return-value");
+    assertTrue(out.size() == 6);
+    assertTrue(out.get(0).intValue() == 1);
+    assertTrue(out.get(1).intValue() == 2);
+    assertTrue(out.get(2).intValue() == 3);
+    assertTrue(out.get(3).intValue() == 300);
+    assertTrue(out.get(4).intValue() == 2);
+    assertTrue(out.get(5).intValue() == 500);
+
+    cexpr = "array(1.122, 2.222, 3.333, 300.1, 2.13, 500.23)";
+    paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    solrStream = new SolrStream(url, paramsLoc);
+    solrStream.setStreamContext(context);
+    tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    out = (List<Number>)tuples.get(0).get("return-value");
+    assertTrue(out.size() == 6);
+    assertTrue(out.get(0).doubleValue() == 1.122D);
+    assertTrue(out.get(1).doubleValue() == 2.222D);
+    assertTrue(out.get(2).doubleValue() == 3.333D);
+    assertTrue(out.get(3).doubleValue() == 300.1D);
+    assertTrue(out.get(4).doubleValue() == 2.13D);
+    assertTrue(out.get(5).doubleValue() == 500.23D);
+  }
+
+  @Test
+  public void testMatrix() throws Exception {
+    String cexpr = "let(echo=true," +
+        "               a=setColumnLabels(matrix(array(1, 2, 3), " +
+        "                                        rev(array(4,5,6)))," +
+        "                                        array(col1, col2, col3))," +
+        "               b=rowAt(a, 1)," +
+        "               c=colAt(a, 2)," +
+        "               d=getColumnLabels(a)," +
+        "               e=topFeatures(a, 1)," +
+        "               f=rowCount(a)," +
+        "               g=columnCount(a)," +
+        "               h=indexOf(d, \"col2\")," +
+        "               i=indexOf(d, col3))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<List<Number>> out = (List<List<Number>>)tuples.get(0).get("a");
+
+    List<Number> array1 = out.get(0);
+    assertEquals(array1.size(), 3);
+    assertEquals(array1.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(array1.get(1).doubleValue(), 2.0, 0.0);
+    assertEquals(array1.get(2).doubleValue(), 3.0, 0.0);
+
+    List<Number> array2 = out.get(1);
+    assertEquals(array2.size(), 3);
+    assertEquals(array2.get(0).doubleValue(), 6.0, 0.0);
+    assertEquals(array2.get(1).doubleValue(), 5.0, 0.0);
+    assertEquals(array2.get(2).doubleValue(), 4.0, 0.0);
+
+    List<Number> row = (List<Number>)tuples.get(0).get("b");
+
+    assertEquals(row.size(), 3);
+    assertEquals(array2.get(0).doubleValue(), 6.0, 0.0);
+    assertEquals(array2.get(1).doubleValue(), 5.0, 0.0);
+    assertEquals(array2.get(2).doubleValue(), 4.0, 0.0);
+
+    List<Number> col = (List<Number>)tuples.get(0).get("c");
+    assertEquals(col.size(), 2);
+    assertEquals(col.get(0).doubleValue(), 3.0, 0.0);
+    assertEquals(col.get(1).doubleValue(), 4.0, 0.0);
+
+    List<String> colLabels = (List<String>)tuples.get(0).get("d");
+    assertEquals(colLabels.size(), 3);
+    assertEquals(colLabels.get(0), "col1");
+    assertEquals(colLabels.get(1), "col2");
+    assertEquals(colLabels.get(2), "col3");
+
+    List<List<String>> features  = (List<List<String>>)tuples.get(0).get("e");
+    assertEquals(features.size(), 2);
+    assertEquals(features.get(0).size(), 1);
+    assertEquals(features.get(1).size(), 1);
+    assertEquals(features.get(0).get(0), "col3");
+    assertEquals(features.get(1).get(0), "col1");
+
+    assertTrue(tuples.get(0).getLong("f") == 2);
+    assertTrue(tuples.get(0).getLong("g")== 3);
+    assertTrue(tuples.get(0).getLong("h")== 1);
+    assertTrue(tuples.get(0).getLong("i")== 2);
+  }
+
+  @Test
+  public void testMatrixMath() throws Exception {
+    String cexpr = "let(echo=true, a=matrix(array(1.5, 2.5, 3.5), array(4.5,5.5,6.5)), " +
+                                  "b=grandSum(a), " +
+                                  "c=sumRows(a), " +
+                                  "d=sumColumns(a), " +
+                                  "e=scalarAdd(1, a)," +
+                                  "f=scalarSubtract(1, a)," +
+                                  "g=scalarMultiply(1.5, a)," +
+                                  "h=scalarDivide(1.5, a)," +
+                                  "i=scalarAdd(1.5, array(1.5, 2.5, 3.5))," +
+                                  "j=scalarSubtract(1.5, array(1.5, 2.5, 3.5))," +
+                                  "k=scalarMultiply(1.5, array(1.5, 2.5, 3.5))," +
+                                  "l=scalarDivide(1.5, array(1.5, 2.5, 3.5)))";
+
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+
+    double grandSum = tuples.get(0).getDouble("b");
+    assertEquals(grandSum, 24, 0.0);
+
+    List<Number> sumRows = (List<Number>)tuples.get(0).get("c");
+    assertEquals(sumRows.size(), 2);
+    assertEquals(sumRows.get(0).doubleValue(), 7.5, 0.0);
+    assertEquals(sumRows.get(1).doubleValue(), 16.5, 0.0);
+
+    List<Number> sumCols = (List<Number>)tuples.get(0).get("d");
+    assertEquals(sumCols.size(), 3);
+    assertEquals(sumCols.get(0).doubleValue(), 6.0, 0.0);
+    assertEquals(sumCols.get(1).doubleValue(), 8.0, 0.0);
+    assertEquals(sumCols.get(2).doubleValue(), 10, 0.0);
+
+    List<List<Number>> scalarAdd = (List<List<Number>>)tuples.get(0).get("e");
+    List<Number> row1 = scalarAdd.get(0);
+    assertEquals(row1.size(), 3);
+    assertEquals(row1.get(0).doubleValue(), 2.5, 0.0);
+    assertEquals(row1.get(1).doubleValue(), 3.5, 0.0);
+    assertEquals(row1.get(2).doubleValue(), 4.5, 0.0);
+
+    List<Number> row2 = scalarAdd.get(1);
+    assertEquals(row2.get(0).doubleValue(), 5.5, 0.0);
+    assertEquals(row2.get(1).doubleValue(), 6.5, 0.0);
+    assertEquals(row2.get(2).doubleValue(), 7.5, 0.0);
+
+    List<List<Number>> scalarSubtract = (List<List<Number>>)tuples.get(0).get("f");
+    row1 = scalarSubtract.get(0);
+    assertEquals(row1.size(), 3);
+    assertEquals(row1.get(0).doubleValue(), 0.5, 0.0);
+    assertEquals(row1.get(1).doubleValue(), 1.5, 0.0);
+    assertEquals(row1.get(2).doubleValue(), 2.5, 0.0);
+
+    row2 = scalarSubtract.get(1);
+    assertEquals(row2.get(0).doubleValue(), 3.5, 0.0);
+    assertEquals(row2.get(1).doubleValue(), 4.5, 0.0);
+    assertEquals(row2.get(2).doubleValue(), 5.5, 0.0);
+
+    List<List<Number>> scalarMultiply = (List<List<Number>>)tuples.get(0).get("g");
+    row1 = scalarMultiply.get(0);
+    assertEquals(row1.size(), 3);
+    assertEquals(row1.get(0).doubleValue(), 2.25, 0.0);
+    assertEquals(row1.get(1).doubleValue(), 3.75, 0.0);
+    assertEquals(row1.get(2).doubleValue(), 5.25, 0.0);
+
+    row2 = scalarMultiply.get(1);
+    assertEquals(row2.get(0).doubleValue(), 6.75, 0.0);
+    assertEquals(row2.get(1).doubleValue(), 8.25, 0.0);
+    assertEquals(row2.get(2).doubleValue(), 9.75, 0.0);
+
+    List<List<Number>> scalarDivide = (List<List<Number>>)tuples.get(0).get("h");
+    row1 = scalarDivide.get(0);
+    assertEquals(row1.size(), 3);
+    assertEquals(row1.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(row1.get(1).doubleValue(), 1.66666666666667, 0.001);
+    assertEquals(row1.get(2).doubleValue(), 2.33333333333333, 0.001);
+
+    row2 = scalarDivide.get(1);
+    assertEquals(row2.get(0).doubleValue(), 3, 0.0);
+    assertEquals(row2.get(1).doubleValue(), 3.66666666666667, 0.001);
+    assertEquals(row2.get(2).doubleValue(), 4.33333333333333, 0.001);
+
+    List<Number> rowA = (List<Number>)tuples.get(0).get("i");
+    assertEquals(rowA.size(), 3);
+    assertEquals(rowA.get(0).doubleValue(), 3.0, 0.0);
+    assertEquals(rowA.get(1).doubleValue(), 4.0, 0.0);
+    assertEquals(rowA.get(2).doubleValue(), 5.0, 0.0);
+
+    rowA = (List<Number>)tuples.get(0).get("j");
+    assertEquals(rowA.size(), 3);
+    assertEquals(rowA.get(0).doubleValue(), 0, 0.0);
+    assertEquals(rowA.get(1).doubleValue(), 1.0, 0.0);
+    assertEquals(rowA.get(2).doubleValue(), 2.0, 0.0);
+
+    rowA = (List<Number>)tuples.get(0).get("k");
+    assertEquals(rowA.size(), 3);
+    assertEquals(rowA.get(0).doubleValue(), 2.25, 0.0);
+    assertEquals(rowA.get(1).doubleValue(), 3.75, 0.0);
+    assertEquals(rowA.get(2).doubleValue(), 5.25, 0.0);
+
+    rowA = (List<Number>)tuples.get(0).get("l");
+    assertEquals(rowA.size(), 3);
+    assertEquals(rowA.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(rowA.get(1).doubleValue(), 1.66666666666667, 0.001);
+    assertEquals(rowA.get(2).doubleValue(), 2.33333333333333, 0.001);
+  }
+
+  @Test
+  public void testTranspose() throws Exception {
+    String cexpr = "let(a=matrix(array(1,2,3), array(4,5,6)), b=transpose(a))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<List<Number>> out = (List<List<Number>>)tuples.get(0).get("b");
+    assertEquals(out.size(), 3);
+    List<Number> array1 = out.get(0);
+    assertEquals(array1.size(), 2);
+    assertEquals(array1.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(array1.get(1).doubleValue(), 4.0, 0.0);
+
+    List<Number> array2 = out.get(1);
+    assertEquals(array2.size(), 2);
+    assertEquals(array2.get(0).doubleValue(), 2.0, 0.0);
+    assertEquals(array2.get(1).doubleValue(), 5.0, 0.0);
+
+    List<Number> array3 = out.get(2);
+    assertEquals(array3.size(), 2);
+    assertEquals(array3.get(0).doubleValue(), 3.0, 0.0);
+    assertEquals(array3.get(1).doubleValue(), 6.0, 0.0);
+  }
+
+  @Test
+  public void testUnitize() throws Exception {
+    String cexpr = "let(echo=true, a=unitize(matrix(array(1,2,3), array(4,5,6))), b=unitize(array(4,5,6)))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<List<Number>> out = (List<List<Number>>)tuples.get(0).get("a");
+    assertEquals(out.size(), 2);
+    List<Number> array1 = out.get(0);
+    assertEquals(array1.size(), 3);
+    assertEquals(array1.get(0).doubleValue(), 0.2672612419124244, 0.0);
+    assertEquals(array1.get(1).doubleValue(), 0.5345224838248488, 0.0);
+    assertEquals(array1.get(2).doubleValue(), 0.8017837257372732, 0.0);
+
+    List<Number> array2 = out.get(1);
+    assertEquals(array2.size(), 3);
+    assertEquals(array2.get(0).doubleValue(), 0.4558423058385518, 0.0);
+    assertEquals(array2.get(1).doubleValue(), 0.5698028822981898, 0.0);
+    assertEquals(array2.get(2).doubleValue(), 0.6837634587578276, 0.0);
+
+    List<Number> array3 = (List<Number>)tuples.get(0).get("b");
+    assertEquals(array3.size(), 3);
+    assertEquals(array3.get(0).doubleValue(), 0.4558423058385518, 0.0);
+    assertEquals(array3.get(1).doubleValue(), 0.5698028822981898, 0.0);
+    assertEquals(array3.get(2).doubleValue(), 0.6837634587578276, 0.0);
+  }
+
+  @Test
+  public void testNormalizeSum() throws Exception {
+    String cexpr = "let(echo=true, " +
+                       "a=normalizeSum(matrix(array(1,2,3), array(4,5,6))), " +
+                       "b=normalizeSum(array(1,2,3))," +
+                       "c=normalizeSum(array(1,2,3), 100))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<List<Number>> out = (List<List<Number>>)tuples.get(0).get("a");
+    assertEquals(out.size(), 2);
+    List<Number> array1 = out.get(0);
+    assertEquals(array1.size(), 3);
+    assertEquals(array1.get(0).doubleValue(), 0.16666666666666666, 0.0001);
+    assertEquals(array1.get(1).doubleValue(), 0.3333333333333333, 0.00001);
+    assertEquals(array1.get(2).doubleValue(), 0.5, 0.0001);
+
+    List<Number> array2 = out.get(1);
+    assertEquals(array2.size(), 3);
+    assertEquals(array2.get(0).doubleValue(), 0.26666666666666666, 0.0001);
+    assertEquals(array2.get(1).doubleValue(), 0.3333333333333333, 0.0001);
+    assertEquals(array2.get(2).doubleValue(), 0.4, 0.0001);
+
+    List<Number> array3 = (List<Number>)tuples.get(0).get("b");
+    assertEquals(array3.size(), 3);
+    assertEquals(array3.get(0).doubleValue(), 0.16666666666666666, 0.0001);
+    assertEquals(array3.get(1).doubleValue(), 0.3333333333333333, 0.0001);
+    assertEquals(array3.get(2).doubleValue(), 0.5, 0.0001);
+
+    List<Number> array4 = (List<Number>)tuples.get(0).get("c");
+    assertEquals(array4.size(), 3);
+    assertEquals(array4.get(0).doubleValue(), 16.666666666666666, 0.0001);
+    assertEquals(array4.get(1).doubleValue(), 33.33333333333333, 0.00001);
+    assertEquals(array4.get(2).doubleValue(), 50, 0.0001);
+  }
+
+  @Test
+  public void testStandardize() throws Exception {
+    String cexpr = "let(echo=true, a=standardize(matrix(array(1,2,3), array(4,5,6))), b=standardize(array(4,5,6)))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<List<Number>> out = (List<List<Number>>)tuples.get(0).get("a");
+    assertEquals(out.size(), 2);
+    List<Number> array1 = out.get(0);
+    assertEquals(array1.size(), 3);
+    assertEquals(array1.get(0).doubleValue(), -1, 0.0);
+    assertEquals(array1.get(1).doubleValue(), 0, 0.0);
+    assertEquals(array1.get(2).doubleValue(), 1, 0.0);
+
+    List<Number> array2 = out.get(1);
+    assertEquals(array2.size(), 3);
+    assertEquals(array2.get(0).doubleValue(), -1, 0.0);
+    assertEquals(array2.get(1).doubleValue(), 0, 0.0);
+    assertEquals(array2.get(2).doubleValue(), 1, 0.0);
+
+    List<Number> array3 = (List<Number>)tuples.get(0).get("b");
+    assertEquals(array3.size(), 3);
+    assertEquals(array2.get(0).doubleValue(), -1, 0.0);
+    assertEquals(array2.get(1).doubleValue(), 0, 0.0);
+    assertEquals(array2.get(2).doubleValue(), 1, 0.0);
+  }
+
+  @Test
+  public void testMarkovChain() throws Exception {
+    String cexpr = "let(state0=array(.5,.5),\n" +
+                   "    state1=array(.5,.5),\n" +
+                   "    states=matrix(state0, state1),\n" +
+                   "    m=markovChain(states, 0),\n" +
+                   "    s=sample(m, 50000),\n" +
+                   "    f=freqTable(s))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<Map<String, Number>> out = (List<Map<String, Number>>)tuples.get(0).get("f");
+    assertEquals(out.size(), 2);
+    Map<String, Number> bin0 = out.get(0);
+    double state0Pct = bin0.get("pct").doubleValue();
+    assertEquals(state0Pct, .5, .015);
+    Map<String, Number> bin1 = out.get(1);
+    double state1Pct = bin1.get("pct").doubleValue();
+    assertEquals(state1Pct, .5, .015);
+  }
+
+  @Test
+  public void testAddAll() throws Exception {
+    String cexpr = "addAll(array(1, 2, 3), array(4.5, 5.5, 6.5), array(7,8,9))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<Number> out = (List<Number>)tuples.get(0).get("return-value");
+    assertTrue(out.size() == 9);
+    assertTrue(out.get(0).intValue() == 1);
+    assertTrue(out.get(1).intValue() == 2);
+    assertTrue(out.get(2).intValue() == 3);
+    assertTrue(out.get(3).doubleValue() == 4.5D);
+    assertTrue(out.get(4).doubleValue() == 5.5D);
+    assertTrue(out.get(5).doubleValue() == 6.5D);
+    assertTrue(out.get(6).intValue() == 7);
+    assertTrue(out.get(7).intValue() == 8);
+    assertTrue(out.get(8).intValue() == 9);
+  }
+
+  @Test
+  public void testProbabilityRange() throws Exception {
+    String cexpr = "let(a=normalDistribution(500, 20), " +
+                       "b=probability(a, 520, 530))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    Number prob = (Number)tuples.get(0).get("b");
+    assertEquals(prob.doubleValue(),  0.09184805266259899, 0.0);
+  }
+
+  @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028")
+  public void testDistributions() throws Exception {
+    String cexpr = "let(a=normalDistribution(10, 2), " +
+                       "b=sample(a, 250), " +
+                       "c=normalDistribution(100, 6), " +
+                       "d=sample(c, 250), " +
+                       "u=uniformDistribution(1, 6),"+
+                       "t=sample(u, 250),"+
+                       "e=empiricalDistribution(d),"+
+                       "f=sample(e, 250),"+
+                       "tuple(sample=b, ks=ks(a,b), ks2=ks(a, d), ks3=ks(u, t)))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    try {
+      TupleStream solrStream = new SolrStream(url, paramsLoc);
+      StreamContext context = new StreamContext();
+      solrStream.setStreamContext(context);
+      List<Tuple> tuples = getTuples(solrStream);
+      assertTrue(tuples.size() == 1);
+      List<Number> out = (List<Number>) tuples.get(0).get("sample");
+
+      Map ks = (Map) tuples.get(0).get("ks");
+      Map ks2 = (Map) tuples.get(0).get("ks2");
+      Map ks3 = (Map) tuples.get(0).get("ks3");
+
+      assertTrue(out.size() == 250);
+      Number pvalue = (Number) ks.get("p-value");
+      Number pvalue2 = (Number) ks2.get("p-value");
+      Number pvalue3 = (Number) ks3.get("p-value");
+
+      assertTrue(pvalue.doubleValue() > .05D);
+      assertTrue(pvalue2.doubleValue() == 0);
+      assertTrue(pvalue3.doubleValue() > .05D);
+
+    } catch(AssertionError e) {
+
+      //This test will have random failures do to the random sampling. So if it fails try it again.
+      //If it fails twice in a row, we probably broke some code.
+
+      TupleStream solrStream = new SolrStream(url, paramsLoc);
+      StreamContext context = new StreamContext();
+      solrStream.setStreamContext(context);
+      List<Tuple> tuples = getTuples(solrStream);
+      assertTrue(tuples.size() == 1);
+      List<Number> out = (List<Number>) tuples.get(0).get("sample");
+
+      Map ks = (Map) tuples.get(0).get("ks");
+      Map ks2 = (Map) tuples.get(0).get("ks2");
+      Map ks3 = (Map) tuples.get(0).get("ks3");
+
+      assertTrue(out.size() == 250);
+      Number pvalue = (Number) ks.get("p-value");
+      Number pvalue2 = (Number) ks2.get("p-value");
+      Number pvalue3 = (Number) ks3.get("p-value");
+
+      assertTrue(pvalue.doubleValue() > .05D);
+      assertTrue(pvalue2.doubleValue() == 0);
+      assertTrue(pvalue3.doubleValue() > .05D);
+    }
+  }
+
+  @Test
+  public void testSumDifference() throws Exception {
+    String cexpr = "sumDifference(array(2,4,6,8,10,12),array(1,2,3,4,5,6))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    double sd = tuples.get(0).getDouble("return-value");
+    assertEquals(sd, 21.0D, 0.0);
+  }
+
+  @Test
+  public void testMeanDifference() throws Exception {
+    String cexpr = "meanDifference(array(2,4,6,8,10,12),array(1,2,3,4,5,6))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    double sd = tuples.get(0).getDouble("return-value");
+    assertEquals(sd, 3.5, 0.0);
+  }
+
+  @Test
+  public void testTermVectors() throws Exception {
+    // Test termVectors with only documents and default termVector settings
+    String cexpr = "let(echo=true," +
+                       "a=select(list(tuple(id=\"1\", text=\"hello world\"), " +
+                                     "tuple(id=\"2\", text=\"hello steve\"), " +
+                                     "tuple(id=\"3\", text=\"hello jim jim\"), " +
+                                     "tuple(id=\"4\", text=\"hello jack\")), id, analyze(text, test_t) as terms)," +
+                   "    b=termVectors(a, minDocFreq=0, maxDocFreq=1)," +
+        "               c=getRowLabels(b)," +
+        "               d=getColumnLabels(b)," +
+        "               e=getAttribute(b, docFreqs))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<List<Number>> termVectors  = (List<List<Number>>)tuples.get(0).get("b");
+
+    assertEquals(termVectors.size(), 4);
+    List<Number> termVector = termVectors.get(0);
+    assertEquals(termVector.size(), 5);
+    assertEquals(termVector.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(termVector.get(1).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(2).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(3).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(4).doubleValue(), 1.916290731874155, 0.0);
+
+    termVector = termVectors.get(1);
+    assertEquals(termVector.size(), 5);
+    assertEquals(termVector.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(termVector.get(1).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(2).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(3).doubleValue(), 1.916290731874155, 0.0);
+    assertEquals(termVector.get(4).doubleValue(), 0.0, 0.0);
+
+    termVector = termVectors.get(2);
+    assertEquals(termVector.size(), 5);
+    assertEquals(termVector.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(termVector.get(1).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(2).doubleValue(), 2.7100443424662948, 0.0);
+    assertEquals(termVector.get(3).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(4).doubleValue(), 0.0, 0.0);
+
+    termVector = termVectors.get(3);
+    assertEquals(termVector.size(), 5);
+    assertEquals(termVector.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(termVector.get(1).doubleValue(), 1.916290731874155, 0.0);
+    assertEquals(termVector.get(2).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(3).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(4).doubleValue(), 0.0, 0.0);
+
+    List<String> rowLabels  = (List<String>)tuples.get(0).get("c");
+    assertEquals(rowLabels.size(), 4);
+    assertEquals(rowLabels.get(0), "1");
+    assertEquals(rowLabels.get(1), "2");
+    assertEquals(rowLabels.get(2), "3");
+    assertEquals(rowLabels.get(3), "4");
+
+    List<String> columnLabels  = (List<String>)tuples.get(0).get("d");
+    assertEquals(columnLabels.size(), 5);
+    assertEquals(columnLabels.get(0), "hello");
+    assertEquals(columnLabels.get(1), "jack");
+    assertEquals(columnLabels.get(2), "jim");
+    assertEquals(columnLabels.get(3), "steve");
+    assertEquals(columnLabels.get(4), "world");
+
+    Map<String, Number> docFreqs  = (Map<String, Number>)tuples.get(0).get("e");
+
+    assertEquals(docFreqs.size(), 5);
+    assertEquals(docFreqs.get("hello").intValue(), 4);
+    assertEquals(docFreqs.get("jack").intValue(), 1);
+    assertEquals(docFreqs.get("jim").intValue(), 1);
+    assertEquals(docFreqs.get("steve").intValue(), 1);
+    assertEquals(docFreqs.get("world").intValue(), 1);
+
+    //Test minTermLength. This should drop off the term jim
+
+    cexpr = "let(echo=true," +
+                 "a=select(list(tuple(id=\"1\", text=\"hello world\"), " +
+                               "tuple(id=\"2\", text=\"hello steve\"), " +
+                               "tuple(id=\"3\", text=\"hello jim jim\"), " +
+                               "tuple(id=\"4\", text=\"hello jack\")), id, analyze(text, test_t) as terms)," +
+            "    b=termVectors(a, minTermLength=4, minDocFreq=0, maxDocFreq=1)," +
+            "    c=getRowLabels(b)," +
+            "    d=getColumnLabels(b)," +
+            "    e=getAttribute(b, docFreqs))";
+    paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    solrStream = new SolrStream(url, paramsLoc);
+    context = new StreamContext();
+    solrStream.setStreamContext(context);
+    tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    termVectors  = (List<List<Number>>)tuples.get(0).get("b");
+    assertEquals(termVectors.size(), 4);
+    termVector = termVectors.get(0);
+    assertEquals(termVector.size(), 4);
+    assertEquals(termVector.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(termVector.get(1).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(2).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(3).doubleValue(), 1.916290731874155, 0.0);
+
+    termVector = termVectors.get(1);
+    assertEquals(termVector.size(), 4);
+    assertEquals(termVector.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(termVector.get(1).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(2).doubleValue(), 1.916290731874155, 0.0);
+    assertEquals(termVector.get(3).doubleValue(), 0.0, 0.0);
+
+    termVector = termVectors.get(2);
+    assertEquals(termVector.size(), 4);
+    assertEquals(termVector.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(termVector.get(1).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(2).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(3).doubleValue(), 0.0, 0.0);
+
+    termVector = termVectors.get(3);
+    assertEquals(termVector.size(), 4);
+    assertEquals(termVector.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(termVector.get(1).doubleValue(), 1.916290731874155, 0.0);
+    assertEquals(termVector.get(2).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(3).doubleValue(), 0.0, 0.0);
+
+    rowLabels  = (List<String>)tuples.get(0).get("c");
+    assertEquals(rowLabels.size(), 4);
+    assertEquals(rowLabels.get(0), "1");
+    assertEquals(rowLabels.get(1), "2");
+    assertEquals(rowLabels.get(2), "3");
+    assertEquals(rowLabels.get(3), "4");
+
+    columnLabels  = (List<String>)tuples.get(0).get("d");
+    assertEquals(columnLabels.size(), 4);
+    assertEquals(columnLabels.get(0), "hello");
+    assertEquals(columnLabels.get(1), "jack");
+    assertEquals(columnLabels.get(2), "steve");
+    assertEquals(columnLabels.get(3), "world");
+
+    docFreqs  = (Map<String, Number>)tuples.get(0).get("e");
+
+    assertEquals(docFreqs.size(), 4);
+    assertEquals(docFreqs.get("hello").intValue(), 4);
+    assertEquals(docFreqs.get("jack").intValue(), 1);
+    assertEquals(docFreqs.get("steve").intValue(), 1);
+    assertEquals(docFreqs.get("world").intValue(), 1);
+
+
+    //Test exclude. This should drop off the term jim
+
+    cexpr = "let(echo=true," +
+        "        a=select(list(tuple(id=\"1\", text=\"hello world\"), " +
+        "                      tuple(id=\"2\", text=\"hello steve\"), " +
+        "                      tuple(id=\"3\", text=\"hello jim jim\"), " +
+        "                      tuple(id=\"4\", text=\"hello jack\")), id, analyze(text, test_t) as terms)," +
+        "        b=termVectors(a, exclude=jim, minDocFreq=0, maxDocFreq=1)," +
+        "        c=getRowLabels(b)," +
+        "        d=getColumnLabels(b)," +
+        "        e=getAttribute(b, docFreqs))";
+
+    paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    solrStream = new SolrStream(url, paramsLoc);
+    context = new StreamContext();
+    solrStream.setStreamContext(context);
+    tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    termVectors  = (List<List<Number>>)tuples.get(0).get("b");
+    assertEquals(termVectors.size(), 4);
+    termVector = termVectors.get(0);
+    assertEquals(termVector.size(), 4);
+    assertEquals(termVector.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(termVector.get(1).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(2).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(3).doubleValue(), 1.916290731874155, 0.0);
+
+    termVector = termVectors.get(1);
+    assertEquals(termVector.size(), 4);
+    assertEquals(termVector.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(termVector.get(1).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(2).doubleValue(), 1.916290731874155, 0.0);
+    assertEquals(termVector.get(3).doubleValue(), 0.0, 0.0);
+
+    termVector = termVectors.get(2);
+    assertEquals(termVector.size(), 4);
+    assertEquals(termVector.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(termVector.get(1).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(2).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(3).doubleValue(), 0.0, 0.0);
+
+    termVector = termVectors.get(3);
+    assertEquals(termVector.size(), 4);
+    assertEquals(termVector.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(termVector.get(1).doubleValue(), 1.916290731874155, 0.0);
+    assertEquals(termVector.get(2).doubleValue(), 0.0, 0.0);
+    assertEquals(termVector.get(3).doubleValue(), 0.0, 0.0);
+
+    rowLabels  = (List<String>)tuples.get(0).get("c");
+    assertEquals(rowLabels.size(), 4);
+    assertEquals(rowLabels.get(0), "1");
+    assertEquals(rowLabels.get(1), "2");
+    assertEquals(rowLabels.get(2), "3");
+    assertEquals(rowLabels.get(3), "4");
+
+    columnLabels  = (List<String>)tuples.get(0).get("d");
+    assertEquals(columnLabels.size(), 4);
+    assertEquals(columnLabels.get(0), "hello");
+    assertEquals(columnLabels.get(1), "jack");
+    assertEquals(columnLabels.get(2), "steve");
+    assertEquals(columnLabels.get(3), "world");
+
+    docFreqs  = (Map<String, Number>)tuples.get(0).get("e");
+
+    assertEquals(docFreqs.size(), 4);
+    assertEquals(docFreqs.get("hello").intValue(), 4);
+    assertEquals(docFreqs.get("jack").intValue(), 1);
+    assertEquals(docFreqs.get("steve").intValue(), 1);
+    assertEquals(docFreqs.get("world").intValue(), 1);
+
+    //Test minDocFreq attribute at .5. This should eliminate all but the term hello
+
+    cexpr = "let(echo=true," +
+        "a=select(list(tuple(id=\"1\", text=\"hello world\"), " +
+        "tuple(id=\"2\", text=\"hello steve\"), " +
+        "tuple(id=\"3\", text=\"hello jim jim\"), " +
+        "tuple(id=\"4\", text=\"hello jack\")), id, analyze(text, test_t) as terms)," +
+        "    b=termVectors(a, minDocFreq=.5, maxDocFreq=1)," +
+        "    c=getRowLabels(b)," +
+        "    d=getColumnLabels(b)," +
+        "    e=getAttribute(b, docFreqs))";
+    paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    solrStream = new SolrStream(url, paramsLoc);
+    context = new StreamContext();
+    solrStream.setStreamContext(context);
+    tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    termVectors  = (List<List<Number>>)tuples.get(0).get("b");
+
+    assertEquals(termVectors.size(), 4);
+    termVector = termVectors.get(0);
+    assertEquals(termVector.size(), 1);
+    assertEquals(termVector.get(0).doubleValue(), 1.0, 0.0);
+
+    termVector = termVectors.get(1);
+    assertEquals(termVector.size(), 1);
+    assertEquals(termVector.get(0).doubleValue(), 1.0, 0.0);
+
+    termVector = termVectors.get(2);
+    assertEquals(termVector.size(), 1);
+    assertEquals(termVector.get(0).doubleValue(), 1.0, 0.0);
+
+    termVector = termVectors.get(3);
+    assertEquals(termVector.size(), 1);
+    assertEquals(termVector.get(0).doubleValue(), 1.0, 0.0);
+
+    rowLabels  = (List<String>)tuples.get(0).get("c");
+    assertEquals(rowLabels.size(), 4);
+    assertEquals(rowLabels.get(0), "1");
+    assertEquals(rowLabels.get(1), "2");
+    assertEquals(rowLabels.get(2), "3");
+    assertEquals(rowLabels.get(3), "4");
+
+    columnLabels  = (List<String>)tuples.get(0).get("d");
+    assertEquals(columnLabels.size(), 1);
+    assertEquals(columnLabels.get(0), "hello");
+
+    docFreqs  = (Map<String, Number>)tuples.get(0).get("e");
+
+    assertEquals(docFreqs.size(), 1);
+    assertEquals(docFreqs.get("hello").intValue(), 4);
+
+    //Test maxDocFreq attribute at 0. This should eliminate all terms
+
+    cexpr = "let(echo=true," +
+        "a=select(list(tuple(id=\"1\", text=\"hello world\"), " +
+        "tuple(id=\"2\", text=\"hello steve\"), " +
+        "tuple(id=\"3\", text=\"hello jim jim\"), " +
+        "tuple(id=\"4\", text=\"hello jack\")), id, analyze(text, test_t) as terms)," +
+        "    b=termVectors(a, maxDocFreq=0)," +
+        "    c=getRowLabels(b)," +
+        "    d=getColumnLabels(b)," +
+        "    e=getAttribute(b, docFreqs))";
+    paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    solrStream = new SolrStream(url, paramsLoc);
+    context = new StreamContext();
+    solrStream.setStreamContext(context);
+    tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    termVectors  = (List<List<Number>>)tuples.get(0).get("b");
+    assertEquals(termVectors.size(), 4);
+    assertEquals(termVectors.get(0).size(), 0);
+  }
+
+  @Test
+  public void testEbeSubtract() throws Exception {
+    String cexpr = "let(echo=true," +
+        "               a=array(2, 4, 6, 8, 10, 12)," +
+        "               b=array(1, 2, 3, 4, 5, 6)," +
+        "               c=ebeSubtract(a,b)," +
+        "               d=array(10, 11, 12, 13, 14, 15)," +
+        "               e=array(100, 200, 300, 400, 500, 600)," +
+        "               f=matrix(a, b)," +
+        "               g=matrix(d, e)," +
+        "               h=ebeSubtract(f, g))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<Number> out = (List<Number>)tuples.get(0).get("c");
+    assertEquals(out.size(), 6);
+    assertEquals(out.get(0).doubleValue(), 1.0, 0.0);
+    assertEquals(out.get(1).doubleValue(), 2.0, 0.0);
+    assertEquals(out.get(2).doubleValue(), 3.0, 0.0);
+    assertEquals(out.get(3).doubleValue(), 4.0, 0.0);
+    assertEquals(out.get(4).doubleValue(), 5.0, 0.0);
+    assertEquals(out.get(5).doubleValue(), 6.0, 0.0);
+
+    List<List<Number>> mout = (List<List<Number>>)tuples.get(0).get("h");
+    assertEquals(mout.size(), 2);
+    List<Number> row1 = mout.get(0);
+    assertEquals(row1.size(), 6);
+    assertEquals(row1.get(0).doubleValue(), -8.0, 0.0);
+    assertEquals(row1.get(1).doubleValue(), -7.0, 0.0);
+    assertEquals(row1.get(2).doubleValue(), -6.0, 0.0);
+    assertEquals(row1.get(3).doubleValue(), -5.0, 0.0);
+    assertEquals(row1.get(4).doubleValue(), -4.0, 0.0);
+    assertEquals(row1.get(5).doubleValue(), -3.0, 0.0);
+
+    List<Number> row2 = mout.get(1);
+    assertEquals(row2.size(), 6);
+    assertEquals(row2.get(0).doubleValue(), -99.0, 0.0);
+    assertEquals(row2.get(1).doubleValue(), -198.0, 0.0);
+    assertEquals(row2.get(2).doubleValue(), -297.0, 0.0);
+    assertEquals(row2.get(3).doubleValue(), -396.0, 0.0);
+    assertEquals(row2.get(4).doubleValue(), -495.0, 0.0);
+    assertEquals(row2.get(5).doubleValue(), -594.0, 0.0);
+  }
+
+  @Test
+  public void testMatrixMult() throws Exception {
+    String cexpr = "let(echo=true," +
+        "               a=array(1,2,3)," +
+        "               b=matrix(array(4), array(5), array(6))," +
+        "               c=matrixMult(a, b)," +
+        "               d=matrix(array(3, 4), array(10,11), array(30, 40))," +
+        "               e=matrixMult(a, d)," +
+        "               f=array(4,8,10)," +
+        "               g=matrix(a, f)," +
+        "               h=matrixMult(d, g)," +
+        "               i=matrixMult(b, a))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<List<Number>> matrix = (List<List<Number>>)tuples.get(0).get("c");
+    assertEquals(matrix.size(), 1);
+    List<Number> row = matrix.get(0);
+    assertEquals(row.size(), 1);
+    assertEquals(row.get(0).doubleValue(), 32.0, 0.0);
+
+    matrix = (List<List<Number>>)tuples.get(0).get("e");
+    assertEquals(matrix.size(), 1);
+    row = matrix.get(0);
+    assertEquals(row.size(), 2);
+    assertEquals(row.get(0).doubleValue(), 113.0, 0.0);
+    assertEquals(row.get(1).doubleValue(), 146.0, 0.0);
+
+    matrix = (List<List<Number>>)tuples.get(0).get("h");
+    assertEquals(matrix.size(), 3);
+    row = matrix.get(0);
+    assertEquals(row.size(), 3);
+    assertEquals(row.get(0).doubleValue(), 19.0, 0.0);
+    assertEquals(row.get(1).doubleValue(), 38.0, 0.0);
+    assertEquals(row.get(2).doubleValue(), 49.0, 0.0);
+
+    row = matrix.get(1);
+    assertEquals(row.size(), 3);
+    assertEquals(row.get(0).doubleValue(), 54.0, 0.0);
+    assertEquals(row.get(1).doubleValue(), 108.0, 0.0);
+    assertEquals(row.get(2).doubleValue(), 140.0, 0.0);
+
+    row = matrix.get(2);
+    assertEquals(row.size(), 3);
+    assertEquals(row.get(0).doubleValue(), 190.0, 0.0);
+    assertEquals(row.get(1).doubleValue(), 380.0, 0.0);
+    assertEquals(row.get(2).doubleValue(), 490.0, 0.0);
+
+    matrix = (List<List<Number>>)tuples.get(0).get("i");
+
+    assertEquals(matrix.size(), 3);
+    row = matrix.get(0);
+    assertEquals(row.size(), 3);
+    assertEquals(row.get(0).doubleValue(), 4.0, 0.0);
+    assertEquals(row.get(1).doubleValue(), 8.0, 0.0);
+    assertEquals(row.get(2).doubleValue(), 12.0, 0.0);
+
+    row = matrix.get(1);
+    assertEquals(row.size(), 3);
+    assertEquals(row.get(0).doubleValue(), 5.0, 0.0);
+    assertEquals(row.get(1).doubleValue(), 10.0, 0.0);
+    assertEquals(row.get(2).doubleValue(), 15.0, 0.0);
+
+    row = matrix.get(2);
+    assertEquals(row.size(), 3);
+    assertEquals(row.get(0).doubleValue(), 6.0, 0.0);
+    assertEquals(row.get(1).doubleValue(), 12.0, 0.0);
+    assertEquals(row.get(2).doubleValue(), 18.0, 0.0);
+  }
+
+  @Test
+  public void testKmeans() throws Exception {
+    String cexpr = "let(echo=true," +
+        "               a=array(1,1,1,0,0,0)," +
+        "               b=array(1,1,1,0,0,0)," +
+        "               c=array(0,0,0,1,1,1)," +
+        "               d=array(0,0,0,1,1,1)," +
+        "               e=setRowLabels(matrix(a,b,c,d), " +
+        "                              array(doc1, doc2, doc3, doc4))," +
+        "               f=kmeans(e, 2)," +
+        "               g=getCluster(f, 0)," +
+        "               h=getCluster(f, 1)," +
+        "               i=getCentroids(f)," +
+        "               j=getRowLabels(g)," +
+        "               k=getRowLabels(h))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<List<Number>> cluster1 = (List<List<Number>>)tuples.get(0).get("g");
+    List<List<Number>> cluster2 = (List<List<Number>>)tuples.get(0).get("h");
+    List<List<Number>> centroids = (List<List<Number>>)tuples.get(0).get("i");
+    List<String> labels1 = (List<String>)tuples.get(0).get("j");
+    List<String> labels2 = (List<String>)tuples.get(0).get("k");
+
+    assertEquals(cluster1.size(), 2);
+    assertEquals(cluster2.size(), 2);
+    assertEquals(centroids.size(), 2);
+
+    //Assert that the docs are not in both clusters
+    assertTrue(!(labels1.contains("doc1") && labels2.contains("doc1")));
+    assertTrue(!(labels1.contains("doc2") && labels2.contains("doc2")));
+    assertTrue(!(labels1.contains("doc3") && labels2.contains("doc3")));
+    assertTrue(!(labels1.contains("doc4") && labels2.contains("doc4")));
+
+    //Assert that (doc1 and doc2) or (doc3 and doc4) are in labels1
+    assertTrue((labels1.contains("doc1") && labels1.contains("doc2")) ||
+        ((labels1.contains("doc3") && labels1.contains("doc4"))));
+
+    //Assert that (doc1 and doc2) or (doc3 and doc4) are in labels2
+    assertTrue((labels2.contains("doc1") && labels2.contains("doc2")) ||
+        ((labels2.contains("doc3") && labels2.contains("doc4"))));
+
+    if(labels1.contains("doc1")) {
+      assertEquals(centroids.get(0).get(0).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(0).get(1).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(0).get(2).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(0).get(3).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(0).get(4).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(0).get(5).doubleValue(), 0.0, 0.0);
+
+      assertEquals(centroids.get(1).get(0).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(1).get(1).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(1).get(2).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(1).get(3).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(1).get(4).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(1).get(5).doubleValue(), 1.0, 0.0);
+    } else {
+      assertEquals(centroids.get(0).get(0).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(0).get(1).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(0).get(2).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(0).get(3).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(0).get(4).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(0).get(5).doubleValue(), 1.0, 0.0);
+
+      assertEquals(centroids.get(1).get(0).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(1).get(1).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(1).get(2).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(1).get(3).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(1).get(4).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(1).get(5).doubleValue(), 0.0, 0.0);
+    }
+  }
+
+  @Test
+  public void testMultiKmeans() throws Exception {
+    String cexpr = "let(echo=true," +
+        "               a=array(1,1,1,0,0,0)," +
+        "               b=array(1,1,1,0,0,0)," +
+        "               c=array(0,0,0,1,1,1)," +
+        "               d=array(0,0,0,1,1,1)," +
+        "               e=setRowLabels(matrix(a,b,c,d), " +
+        "                              array(doc1, doc2, doc3, doc4))," +
+        "               f=multiKmeans(e, 2, 5)," +
+        "               g=getCluster(f, 0)," +
+        "               h=getCluster(f, 1)," +
+        "               i=getCentroids(f)," +
+        "               j=getRowLabels(g)," +
+        "               k=getRowLabels(h))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<List<Number>> cluster1 = (List<List<Number>>)tuples.get(0).get("g");
+    List<List<Number>> cluster2 = (List<List<Number>>)tuples.get(0).get("h");
+    List<List<Number>> centroids = (List<List<Number>>)tuples.get(0).get("i");
+    List<String> labels1 = (List<String>)tuples.get(0).get("j");
+    List<String> labels2 = (List<String>)tuples.get(0).get("k");
+
+    assertEquals(cluster1.size(), 2);
+    assertEquals(cluster2.size(), 2);
+    assertEquals(centroids.size(), 2);
+
+    //Assert that the docs are not in both clusters
+    assertTrue(!(labels1.contains("doc1") && labels2.contains("doc1")));
+    assertTrue(!(labels1.contains("doc2") && labels2.contains("doc2")));
+    assertTrue(!(labels1.contains("doc3") && labels2.contains("doc3")));
+    assertTrue(!(labels1.contains("doc4") && labels2.contains("doc4")));
+
+    //Assert that (doc1 and doc2) or (doc3 and doc4) are in labels1
+    assertTrue((labels1.contains("doc1") && labels1.contains("doc2")) ||
+        ((labels1.contains("doc3") && labels1.contains("doc4"))));
+
+    //Assert that (doc1 and doc2) or (doc3 and doc4) are in labels2
+    assertTrue((labels2.contains("doc1") && labels2.contains("doc2")) ||
+        ((labels2.contains("doc3") && labels2.contains("doc4"))));
+
+    if(labels1.contains("doc1")) {
+      assertEquals(centroids.get(0).get(0).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(0).get(1).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(0).get(2).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(0).get(3).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(0).get(4).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(0).get(5).doubleValue(), 0.0, 0.0);
+
+      assertEquals(centroids.get(1).get(0).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(1).get(1).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(1).get(2).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(1).get(3).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(1).get(4).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(1).get(5).doubleValue(), 1.0, 0.0);
+    } else {
+      assertEquals(centroids.get(0).get(0).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(0).get(1).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(0).get(2).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(0).get(3).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(0).get(4).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(0).get(5).doubleValue(), 1.0, 0.0);
+
+      assertEquals(centroids.get(1).get(0).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(1).get(1).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(1).get(2).doubleValue(), 1.0, 0.0);
+      assertEquals(centroids.get(1).get(3).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(1).get(4).doubleValue(), 0.0, 0.0);
+      assertEquals(centroids.get(1).get(5).doubleValue(), 0.0, 0.0);
+    }
+  }
+
+  @Test
+  public void testFuzzyKmeans() throws Exception {
+    String cexpr = "let(echo=true," +
+        "               a=array(1,1,1,0,0,0)," +
+        "               b=array(1,1,1,0,0,0)," +
+        "               c=array(0,0,0,1,1,1)," +
+        "               d=array(0,0,0,1,1,1)," +
+        "               e=setRowLabels(matrix(a,b,c,d), " +
+        "                              array(doc1, doc2, doc3, doc4))," +
+        "               f=fuzzyKmeans(e, 2)," +
+        "               g=getCluster(f, 0)," +
+        "               h=getCluster(f, 1)," +
+        "               i=getCentroids(f)," +
+        "               j=getRowLabels(g)," +
+        "               k=getRowLabels(h)," +
+        "               l=getMembershipMatrix(f))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<List<Number>> cluster1 = (List<List<Number>>)tuples.get(0).get("g");
+    List<List<Number>> cluster2 = (List<List<Number>>)tuples.get(0).get("h");
+    List<List<Number>> centroids = (List<List<Number>>)tuples.get(0).get("i");
+    List<List<Number>> membership = (List<List<Number>>)tuples.get(0).get("l");
+
+    List<String> labels1 = (List<String>)tuples.get(0).get("j");
+    List<String> labels2 = (List<String>)tuples.get(0).get("k");
+
+    assertEquals(cluster1.size(), 2);
+    assertEquals(cluster2.size(), 2);
+    assertEquals(centroids.size(), 2);
+
+    //Assert that the docs are not in both clusters
+    assertTrue(!(labels1.contains("doc1") && labels2.contains("doc1")));
+    assertTrue(!(labels1.contains("doc2") && labels2.contains("doc2")));
+    assertTrue(!(labels1.contains("doc3") && labels2.contains("doc3")));
+    assertTrue(!(labels1.contains("doc4") && labels2.contains("doc4")));
+
+    //Assert that (doc1 and doc2) or (doc3 and doc4) are in labels1
+    assertTrue((labels1.contains("doc1") && labels1.contains("doc2")) ||
+        ((labels1.contains("doc3") && labels1.contains("doc4"))));
+
+    //Assert that (doc1 and doc2) or (doc3 and doc4) are in labels2
+    assertTrue((labels2.contains("doc1") && labels2.contains("doc2")) ||
+        ((labels2.contains("doc3") && labels2.contains("doc4"))));
+
+
+    if(labels1.contains("doc1")) {
+      assertEquals(centroids.get(0).get(0).doubleValue(), 1.0, 0.001);
+      assertEquals(centroids.get(0).get(1).doubleValue(), 1.0, 0.001);
+      assertEquals(centroids.get(0).get(2).doubleValue(), 1.0, 0.001);
+      assertEquals(centroids.get(0).get(3).doubleValue(), 0.0, 0.001);
+      assertEquals(centroids.get(0).get(4).doubleValue(), 0.0, 0.001);
+      assertEquals(centroids.get(0).get(5).doubleValue(), 0.0, 0.001);
+
+      assertEquals(centroids.get(1).get(0).doubleValue(), 0.0, 0.001);
+      assertEquals(centroids.get(1).get(1).doubleValue(), 0.0, 0.001);
+      assertEquals(centroids.get(1).get(2).doubleValue(), 0.0,

<TRUNCATED>

[03/50] lucene-solr:jira/solr-12181: LUCENE-8233: Add support for soft deletes to IndexWriter

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesRetentionMergePolicy.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesRetentionMergePolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesRetentionMergePolicy.java
new file mode 100644
index 0000000..3f4f405
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesRetentionMergePolicy.java
@@ -0,0 +1,312 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.index;
+
+import java.io.IOException;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Supplier;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.IntPoint;
+import org.apache.lucene.document.LongPoint;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.search.DocValuesFieldExistsQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestSoftDeletesRetentionMergePolicy extends LuceneTestCase {
+
+  public void testKeepFullyDeletedSegments() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriterConfig indexWriterConfig = newIndexWriterConfig();
+    IndexWriter writer = new IndexWriter(dir, indexWriterConfig);
+
+    Document doc = new Document();
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc.add(new NumericDocValuesField("soft_delete", 1));
+    writer.addDocument(doc);
+    DirectoryReader reader = writer.getReader();
+    assertEquals(1, reader.leaves().size());
+    SegmentReader segmentReader = (SegmentReader) reader.leaves().get(0).reader();
+    MergePolicy policy = new SoftDeletesRetentionMergePolicy("soft_delete",
+        () -> new DocValuesFieldExistsQuery("keep_around"), NoMergePolicy.INSTANCE);
+    assertFalse(policy.keepFullyDeletedSegment(segmentReader));
+    reader.close();
+
+    doc = new Document();
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc.add(new NumericDocValuesField("keep_around", 1));
+    doc.add(new NumericDocValuesField("soft_delete", 1));
+    writer.addDocument(doc);
+
+    reader = writer.getReader();
+    assertEquals(2, reader.leaves().size());
+    segmentReader = (SegmentReader) reader.leaves().get(0).reader();
+    assertFalse(policy.keepFullyDeletedSegment(segmentReader));
+
+    segmentReader = (SegmentReader) reader.leaves().get(1).reader();
+    assertTrue(policy.keepFullyDeletedSegment(segmentReader));
+
+    IOUtils.close(reader, writer, dir);
+  }
+
+  public void testFieldBasedRetention() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriterConfig indexWriterConfig = newIndexWriterConfig();
+    Instant now = Instant.now();
+    Instant time24HoursAgo = now.minus(Duration.ofDays(1));
+    String softDeletesField = "soft_delete";
+    Supplier<Query> docsOfLast24Hours = () -> LongPoint.newRangeQuery("creation_date", time24HoursAgo.toEpochMilli(), now.toEpochMilli());
+    indexWriterConfig.setMergePolicy(new SoftDeletesRetentionMergePolicy(softDeletesField, docsOfLast24Hours,
+        new LogDocMergePolicy()));
+    indexWriterConfig.setSoftDeletesField(softDeletesField);
+    IndexWriter writer = new IndexWriter(dir, indexWriterConfig);
+
+    long time28HoursAgo = now.minus(Duration.ofHours(28)).toEpochMilli();
+    Document doc = new Document();
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc.add(new StringField("version", "1", Field.Store.YES));
+    doc.add(new LongPoint("creation_date", time28HoursAgo));
+    writer.addDocument(doc);
+
+    writer.flush();
+    long time26HoursAgo = now.minus(Duration.ofHours(26)).toEpochMilli();
+    doc = new Document();
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc.add(new StringField("version", "2", Field.Store.YES));
+    doc.add(new LongPoint("creation_date", time26HoursAgo));
+    writer.softUpdateDocument(new Term("id", "1"), doc, new NumericDocValuesField("soft_delete", 1));
+
+    if (random().nextBoolean()) {
+      writer.flush();
+    }
+    long time23HoursAgo = now.minus(Duration.ofHours(23)).toEpochMilli();
+    doc = new Document();
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc.add(new StringField("version", "3", Field.Store.YES));
+    doc.add(new LongPoint("creation_date", time23HoursAgo));
+    writer.softUpdateDocument(new Term("id", "1"), doc, new NumericDocValuesField("soft_delete", 1));
+
+    if (random().nextBoolean()) {
+      writer.flush();
+    }
+    long time12HoursAgo = now.minus(Duration.ofHours(12)).toEpochMilli();
+    doc = new Document();
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc.add(new StringField("version", "4", Field.Store.YES));
+    doc.add(new LongPoint("creation_date", time12HoursAgo));
+    writer.softUpdateDocument(new Term("id", "1"), doc, new NumericDocValuesField("soft_delete", 1));
+
+    if (random().nextBoolean()) {
+      writer.flush();
+    }
+    doc = new Document();
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc.add(new StringField("version", "5", Field.Store.YES));
+    doc.add(new LongPoint("creation_date", now.toEpochMilli()));
+    writer.softUpdateDocument(new Term("id", "1"), doc, new NumericDocValuesField("soft_delete", 1));
+
+    if (random().nextBoolean()) {
+      writer.flush();
+    }
+    writer.forceMerge(1);
+    DirectoryReader reader = writer.getReader();
+    assertEquals(1, reader.numDocs());
+    assertEquals(3, reader.maxDoc());
+    Set<String> versions = new HashSet<>();
+    versions.add(reader.document(0, Collections.singleton("version")).get("version"));
+    versions.add(reader.document(1, Collections.singleton("version")).get("version"));
+    versions.add(reader.document(2, Collections.singleton("version")).get("version"));
+    assertTrue(versions.contains("5"));
+    assertTrue(versions.contains("4"));
+    assertTrue(versions.contains("3"));
+    IOUtils.close(reader, writer, dir);
+  }
+
+  public void testKeepAllDocsAcrossMerges() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriterConfig indexWriterConfig = newIndexWriterConfig();
+    indexWriterConfig.setMergePolicy(new SoftDeletesRetentionMergePolicy("soft_delete",
+        () -> new MatchAllDocsQuery(),
+        indexWriterConfig.getMergePolicy()));
+    indexWriterConfig.setSoftDeletesField("soft_delete");
+    IndexWriter writer = new IndexWriter(dir, indexWriterConfig);
+
+    Document doc = new Document();
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    writer.softUpdateDocument(new Term("id", "1"), doc,
+        new NumericDocValuesField("soft_delete", 1));
+
+    writer.commit();
+    doc = new Document();
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    writer.softUpdateDocument(new Term("id", "1"), doc,
+        new NumericDocValuesField("soft_delete", 1));
+
+    writer.commit();
+    doc = new Document();
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc.add(new NumericDocValuesField("soft_delete", 1)); // already deleted
+    writer.softUpdateDocument(new Term("id", "1"), doc,
+        new NumericDocValuesField("soft_delete", 1));
+    writer.commit();
+    DirectoryReader reader = writer.getReader();
+    assertEquals(0, reader.numDocs());
+    assertEquals(3, reader.maxDoc());
+    assertEquals(0, writer.numDocs());
+    assertEquals(3, writer.maxDoc());
+    assertEquals(3, reader.leaves().size());
+    reader.close();
+    writer.forceMerge(1);
+    reader = writer.getReader();
+    assertEquals(0, reader.numDocs());
+    assertEquals(3, reader.maxDoc());
+    assertEquals(0, writer.numDocs());
+    assertEquals(3, writer.maxDoc());
+    assertEquals(1, reader.leaves().size());
+    IOUtils.close(reader, writer, dir);
+  }
+
+  /**
+   * tests soft deletes that carry over deleted documents on merge for history rentention.
+   */
+  public void testSoftDeleteWithRetention() throws IOException, InterruptedException {
+    AtomicInteger seqIds = new AtomicInteger(0);
+    Directory dir = newDirectory();
+    IndexWriterConfig indexWriterConfig = newIndexWriterConfig();
+    indexWriterConfig.setMergePolicy(new SoftDeletesRetentionMergePolicy("soft_delete",
+        () -> IntPoint.newRangeQuery("seq_id", seqIds.intValue() - 50, Integer.MAX_VALUE),
+        indexWriterConfig.getMergePolicy()));
+    indexWriterConfig.setSoftDeletesField("soft_delete");
+    IndexWriter writer = new IndexWriter(dir, indexWriterConfig);
+    Thread[] threads = new Thread[2 + random().nextInt(3)];
+    CountDownLatch startLatch = new CountDownLatch(1);
+    CountDownLatch started = new CountDownLatch(threads.length);
+    boolean updateSeveralDocs = random().nextBoolean();
+    Set<String> ids = Collections.synchronizedSet(new HashSet<>());
+    for (int i = 0; i < threads.length; i++) {
+      threads[i] = new Thread(() -> {
+        try {
+          started.countDown();
+          startLatch.await();
+          for (int d = 0;  d < 100; d++) {
+            String id = String.valueOf(random().nextInt(10));
+            int seqId = seqIds.incrementAndGet();
+            if (updateSeveralDocs) {
+              Document doc = new Document();
+              doc.add(new StringField("id", id, Field.Store.YES));
+              doc.add(new IntPoint("seq_id", seqId));
+              writer.softUpdateDocuments(new Term("id", id), Arrays.asList(doc, doc),
+                  new NumericDocValuesField("soft_delete", 1));
+            } else {
+              Document doc = new Document();
+              doc.add(new StringField("id", id, Field.Store.YES));
+              doc.add(new IntPoint("seq_id", seqId));
+              writer.softUpdateDocument(new Term("id", id), doc,
+                  new NumericDocValuesField("soft_delete", 1));
+            }
+            ids.add(id);
+          }
+        } catch (IOException | InterruptedException e) {
+          throw new AssertionError(e);
+        }
+      });
+      threads[i].start();
+    }
+    started.await();
+    startLatch.countDown();
+
+    for (int i = 0; i < threads.length; i++) {
+      threads[i].join();
+    }
+    DirectoryReader reader = DirectoryReader.open(writer);
+    IndexSearcher searcher = new IndexSearcher(reader);
+    for (String id : ids) {
+      TopDocs topDocs = searcher.search(new TermQuery(new Term("id", id)), 10);
+      if (updateSeveralDocs) {
+        assertEquals(2, topDocs.totalHits);
+        assertEquals(Math.abs(topDocs.scoreDocs[0].doc - topDocs.scoreDocs[1].doc), 1);
+      } else {
+        assertEquals(1, topDocs.totalHits);
+      }
+    }
+    writer.addDocument(new Document()); // add a dummy doc to trigger a segment here
+    writer.flush();
+    writer.forceMerge(1);
+    DirectoryReader oldReader = reader;
+    reader = DirectoryReader.openIfChanged(reader, writer);
+    if (reader != null) {
+      oldReader.close();
+      assertNotSame(oldReader, reader);
+    } else {
+      reader = oldReader;
+    }
+    assertEquals(1, reader.leaves().size());
+    LeafReaderContext leafReaderContext = reader.leaves().get(0);
+    LeafReader leafReader = leafReaderContext.reader();
+    searcher = new IndexSearcher(new FilterLeafReader(leafReader) {
+      @Override
+      public CacheHelper getCoreCacheHelper() {
+        return leafReader.getCoreCacheHelper();
+      }
+
+      @Override
+      public CacheHelper getReaderCacheHelper() {
+        return leafReader.getReaderCacheHelper();
+      }
+
+      @Override
+      public Bits getLiveDocs() {
+        return null;
+      }
+
+      @Override
+      public int numDocs() {
+        return maxDoc();
+      }
+    });
+    TopDocs seq_id = searcher.search(IntPoint.newRangeQuery("seq_id", seqIds.intValue() - 50, Integer.MAX_VALUE), 10);
+    assertTrue(seq_id.totalHits + " hits", seq_id.totalHits >= 50);
+    searcher = new IndexSearcher(reader);
+    for (String id : ids) {
+      if (updateSeveralDocs) {
+        assertEquals(2, searcher.search(new TermQuery(new Term("id", id)), 10).totalHits);
+      } else {
+        assertEquals(1, searcher.search(new TermQuery(new Term("id", id)), 10).totalHits);
+      }
+    }
+    IOUtils.close(reader, writer, dir);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java b/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java
index b08a85d..e6c91b8 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java
@@ -73,6 +73,7 @@ public class TestStressNRT extends LuceneTestCase {
     final int ndocs = atLeast(50);
     final int nWriteThreads = TestUtil.nextInt(random(), 1, TEST_NIGHTLY ? 10 : 5);
     final int maxConcurrentCommits = TestUtil.nextInt(random(), 1, TEST_NIGHTLY ? 10 : 5);   // number of committers at a time... needed if we want to avoid commit errors due to exceeding the max
+    final boolean useSoftDeletes = random().nextInt(10) < 3;
     
     final boolean tombstones = random().nextBoolean();
 
@@ -106,10 +107,10 @@ public class TestStressNRT extends LuceneTestCase {
 
     Directory dir = newMaybeVirusCheckingDirectory();
 
-    final RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    final RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(new MockAnalyzer(random())), useSoftDeletes);
     writer.setDoRandomForceMergeAssert(false);
     writer.commit();
-    reader = DirectoryReader.open(dir);
+    reader = useSoftDeletes ? writer.getReader() : DirectoryReader.open(dir);
 
     for (int i=0; i<nWriteThreads; i++) {
       Thread thread = new Thread("WRITER"+i) {
@@ -135,7 +136,7 @@ public class TestStressNRT extends LuceneTestCase {
                   }
 
                   DirectoryReader newReader;
-                  if (rand.nextInt(100) < softCommitPercent) {
+                  if (rand.nextInt(100) < softCommitPercent || useSoftDeletes) {
                     // assertU(h.commit("softCommit","true"));
                     if (random().nextBoolean()) {
                       if (VERBOSE) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java
----------------------------------------------------------------------
diff --git a/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java b/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java
index 0574e70..0318109 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java
@@ -67,7 +67,7 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase {
     Directory dir = newDirectory();
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc, false);
     Document doc = new Document();
     doc.add(makeIDField("id0", 100));
     w.addDocument(doc);
@@ -192,7 +192,7 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase {
     int minItemsInBlock = TestUtil.nextInt(random(), 2, 50);
     int maxItemsInBlock = 2*(minItemsInBlock-1) + random().nextInt(50);
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat(minItemsInBlock, maxItemsInBlock)));
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc, false);
     //IndexWriter w = new IndexWriter(dir, iwc);
     int numDocs = atLeast(1000);
     Map<String,Long> idValues = new HashMap<String,Long>();
@@ -359,7 +359,7 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase {
     Directory dir = newDirectory();
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc, false);
     Document doc = new Document();
     doc.add(makeIDField("id", 17));
     w.addDocument(doc);
@@ -415,7 +415,7 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase {
     Directory dir = newDirectory();
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc, false);
     Document doc = new Document();
     doc.add(makeIDField("id", 17));
     w.addDocument(doc);
@@ -432,7 +432,7 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase {
     Directory dir = newDirectory();
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc, false);
     Document doc = new Document();
     doc.add(makeIDField("id", 17));
     w.addDocument(doc);
@@ -460,7 +460,7 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase {
       };
     IndexWriterConfig iwc = newIndexWriterConfig(a);
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc, false);
     Document doc = new Document();
     doc.add(newTextField("id", "id", Field.Store.NO));
     expectThrows(IllegalArgumentException.class, () -> {
@@ -476,7 +476,7 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase {
     Directory dir = newDirectory();
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc, false);
     Document doc = new Document();
     doc.add(newStringField("id", "id", Field.Store.NO));
     expectThrows(IllegalArgumentException.class, () -> {
@@ -493,7 +493,7 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase {
     Directory dir = newDirectory();
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc, false);
     Document doc = new Document();
     doc.add(new StringAndPayloadField("id", "id", new BytesRef("foo")));
     expectThrows(IllegalArgumentException.class, () -> {
@@ -509,7 +509,7 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase {
     Directory dir = newDirectory();
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc, false);
     Document doc = new Document();
     doc.add(makeIDField("id", 17));
     w.addDocument(doc);
@@ -529,7 +529,7 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase {
     Directory dir = newDirectory();
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc, false);
     Document doc = new Document();
 
     FieldType ft = new FieldType(StringAndPayloadField.TYPE);
@@ -555,7 +555,7 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase {
     Directory dir = newDirectory();
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc, false);
     Document doc = new Document();
     doc.add(makeIDField("id", 17));
     doc.add(makeIDField("id", 17));
@@ -572,7 +572,7 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase {
     Directory dir = newDirectory();
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc, false);
     Document doc = new Document();
     // -1
     doc.add(new StringAndPayloadField("id", "id", new BytesRef(new byte[] {(byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff})));
@@ -590,7 +590,7 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase {
     Directory dir = newDirectory();
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc, false);
     Document doc = new Document();
     // Long.MAX_VALUE:
     doc.add(new StringAndPayloadField("id", "id", new BytesRef(new byte[] {(byte)0x7f, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff})));
@@ -610,7 +610,7 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase {
     Directory dir = newDirectory();
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
-    final RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    final RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc, false);
 
     IDSource idsSource = getRandomIDs();
     int numIDs = atLeast(100);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingLiveDocsFormat.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingLiveDocsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingLiveDocsFormat.java
index f4abb54..e02164b 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingLiveDocsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingLiveDocsFormat.java
@@ -82,7 +82,7 @@ public class AssertingLiveDocsFormat extends LiveDocsFormat {
         deletedCount++;
       }
     }
-    assert deletedCount == expectedDeleteCount;
+    assert deletedCount == expectedDeleteCount : "deleted: " + deletedCount + " != expected: " + expectedDeleteCount;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ecc17f90/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
index aa4da54..b82df68 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
@@ -18,12 +18,14 @@ package org.apache.lucene.index;
 
 import java.io.Closeable;
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.Iterator;
 import java.util.Random;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
@@ -41,13 +43,14 @@ import org.apache.lucene.util.TestUtil;
 
 public class RandomIndexWriter implements Closeable {
 
-  public IndexWriter w;
+  public final IndexWriter w;
   private final Random r;
   int docCount;
   int flushAt;
   private double flushAtFactor = 1.0;
   private boolean getReaderCalled;
   private final Analyzer analyzer; // only if WE created it (then we close it)
+  private final double softDeletesRatio;
 
   /** Returns an indexwriter that randomly mixes up thread scheduling (by yielding at test points) */
   public static IndexWriter mockIndexWriter(Directory dir, IndexWriterConfig conf, Random r) throws IOException {
@@ -94,7 +97,7 @@ public class RandomIndexWriter implements Closeable {
 
   /** create a RandomIndexWriter with a random config: Uses MockAnalyzer */
   public RandomIndexWriter(Random r, Directory dir) throws IOException {
-    this(r, dir, LuceneTestCase.newIndexWriterConfig(r, new MockAnalyzer(r)), true);
+    this(r, dir, LuceneTestCase.newIndexWriterConfig(r, new MockAnalyzer(r)), true, r.nextBoolean());
   }
   
   /** create a RandomIndexWriter with a random config */
@@ -104,12 +107,23 @@ public class RandomIndexWriter implements Closeable {
   
   /** create a RandomIndexWriter with the provided config */
   public RandomIndexWriter(Random r, Directory dir, IndexWriterConfig c) throws IOException {
-    this(r, dir, c, false);
+    this(r, dir, c, false, r.nextBoolean());
+  }
+
+  /** create a RandomIndexWriter with the provided config */
+  public RandomIndexWriter(Random r, Directory dir, IndexWriterConfig c, boolean useSoftDeletes) throws IOException {
+    this(r, dir, c, false, useSoftDeletes);
   }
       
-  private RandomIndexWriter(Random r, Directory dir, IndexWriterConfig c, boolean closeAnalyzer) throws IOException {
+  private RandomIndexWriter(Random r, Directory dir, IndexWriterConfig c, boolean closeAnalyzer, boolean useSoftDeletes) throws IOException {
     // TODO: this should be solved in a different way; Random should not be shared (!).
     this.r = new Random(r.nextLong());
+    if (useSoftDeletes) {
+      c.setSoftDeletesField("___soft_deletes");
+      softDeletesRatio = 1.d / (double)1 + r.nextInt(10);
+    } else {
+      softDeletesRatio = 0d;
+    }
     w = mockIndexWriter(dir, c, r);
     flushAt = TestUtil.nextInt(r, 10, 1000);
     if (closeAnalyzer) {
@@ -218,49 +232,39 @@ public class RandomIndexWriter implements Closeable {
 
   public long updateDocuments(Term delTerm, Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
     LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, w.getConfig());
-    long seqNo = w.updateDocuments(delTerm, docs);
+    long seqNo;
+    if (useSoftDeletes()) {
+      seqNo = w.softUpdateDocuments(delTerm, docs, new NumericDocValuesField(w.getConfig().getSoftDeletesField(), 1));
+    } else {
+      seqNo = w.updateDocuments(delTerm, docs);
+    }
     maybeFlushOrCommit();
     return seqNo;
   }
 
+  private boolean useSoftDeletes() {
+    return r.nextDouble() < softDeletesRatio;
+  }
+
   /**
    * Updates a document.
    * @see IndexWriter#updateDocument(Term, Iterable)
    */
   public <T extends IndexableField> long updateDocument(Term t, final Iterable<T> doc) throws IOException {
     LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, w.getConfig());
-    long seqNo;
-    if (r.nextInt(5) == 3) {
-      seqNo = w.updateDocuments(t, new Iterable<Iterable<T>>() {
-
-        @Override
-        public Iterator<Iterable<T>> iterator() {
-          return new Iterator<Iterable<T>>() {
-            boolean done;
-            
-            @Override
-            public boolean hasNext() {
-              return !done;
-            }
-
-            @Override
-            public void remove() {
-              throw new UnsupportedOperationException();
-            }
-
-            @Override
-            public Iterable<T> next() {
-              if (done) {
-                throw new IllegalStateException();
-              }
-              done = true;
-              return doc;
-            }
-          };
-        }
-        });
+    final long seqNo;
+    if (useSoftDeletes()) {
+      if (r.nextInt(5) == 3) {
+        seqNo = w.softUpdateDocuments(t, Arrays.asList(doc), new NumericDocValuesField(w.getConfig().getSoftDeletesField(), 1));
+      } else {
+        seqNo = w.softUpdateDocument(t, doc, new NumericDocValuesField(w.getConfig().getSoftDeletesField(), 1));
+      }
     } else {
-      seqNo = w.updateDocument(t, doc);
+      if (r.nextInt(5) == 3) {
+        seqNo = w.updateDocuments(t, Arrays.asList(doc));
+      } else {
+        seqNo = w.updateDocument(t, doc);
+      }
     }
     maybeFlushOrCommit();
 
@@ -377,7 +381,8 @@ public class RandomIndexWriter implements Closeable {
     if (r.nextInt(20) == 2) {
       doRandomForceMerge();
     }
-    if (!applyDeletions || r.nextBoolean()) {
+    if (!applyDeletions || r.nextBoolean() || w.getConfig().getSoftDeletesField() != null) {
+      // if we have soft deletes we can't open from a directory
       if (LuceneTestCase.VERBOSE) {
         System.out.println("RIW.getReader: use NRT reader");
       }


[30/50] lucene-solr:jira/solr-12181: LUCENE-8239: remove unused import of @Ignore

Posted by ab...@apache.org.
LUCENE-8239: remove unused import of @Ignore


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/aba793de
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/aba793de
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/aba793de

Branch: refs/heads/jira/solr-12181
Commit: aba793def66628407f18979ff7c079e638724e97
Parents: 9009fe6
Author: broustant <br...@salesforce.com>
Authored: Thu Apr 5 16:53:42 2018 -0400
Committer: David Smiley <ds...@apache.org>
Committed: Thu Apr 5 16:53:42 2018 -0400

----------------------------------------------------------------------
 .../src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java   | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/aba793de/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
index ee15ec4..581112d 100755
--- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
+++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPolygonTest.java
@@ -21,7 +21,6 @@ import java.util.List;
 import java.util.BitSet;
 import java.util.Collections;
 
-import org.junit.Ignore;
 import org.junit.Test;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;


[28/50] lucene-solr:jira/solr-12181: SOLR-12183: Fix precommit

Posted by ab...@apache.org.
SOLR-12183: Fix precommit


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/4137f320
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/4137f320
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/4137f320

Branch: refs/heads/jira/solr-12181
Commit: 4137f320aab4cb69ced9b8da352dd5ad5e1576c3
Parents: d2845b0
Author: Joel Bernstein <jb...@apache.org>
Authored: Thu Apr 5 13:24:52 2018 -0400
Committer: Joel Bernstein <jb...@apache.org>
Committed: Thu Apr 5 14:00:08 2018 -0400

----------------------------------------------------------------------
 .../apache/solr/client/solrj/io/stream/StreamExpressionTest.java    | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4137f320/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
index 487aca1..845703e 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
@@ -22,7 +22,6 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
-import java.util.Locale;
 import java.util.Map;
 
 import org.apache.lucene.util.LuceneTestCase;


[10/50] lucene-solr:jira/solr-12181: Add 7.3.0 back-compat test indexes

Posted by ab...@apache.org.
Add 7.3.0 back-compat test indexes


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/2ace16ce
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/2ace16ce
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/2ace16ce

Branch: refs/heads/jira/solr-12181
Commit: 2ace16cef7d329b5a2ac66945838a42e58311916
Parents: 469979d
Author: Alan Woodward <ro...@apache.org>
Authored: Wed Apr 4 22:46:17 2018 +0100
Committer: Alan Woodward <ro...@apache.org>
Committed: Wed Apr 4 22:46:17 2018 +0100

----------------------------------------------------------------------
 .../lucene/index/TestBackwardsCompatibility.java  |   7 +++++--
 .../org/apache/lucene/index/index.7.3.0-cfs.zip   | Bin 0 -> 15538 bytes
 .../org/apache/lucene/index/index.7.3.0-nocfs.zip | Bin 0 -> 15548 bytes
 .../test/org/apache/lucene/index/sorted.7.3.0.zip | Bin 0 -> 393834 bytes
 4 files changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2ace16ce/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
index 4b66f9d..e80bf9b 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
@@ -296,7 +296,9 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
     "7.2.0-cfs",
     "7.2.0-nocfs",
     "7.2.1-cfs",
-    "7.2.1-nocfs"
+    "7.2.1-nocfs",
+    "7.3.0-cfs",
+    "7.3.0-nocfs"
   };
 
   public static String[] getOldNames() {
@@ -308,7 +310,8 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
     "sorted.7.0.1",
     "sorted.7.1.0",
     "sorted.7.2.0",
-    "sorted.7.2.1"
+    "sorted.7.2.1",
+    "sorted.7.3.0"
   };
 
   public static String[] getOldSortedNames() {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2ace16ce/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.3.0-cfs.zip
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.3.0-cfs.zip b/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.3.0-cfs.zip
new file mode 100644
index 0000000..c213632
Binary files /dev/null and b/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.3.0-cfs.zip differ

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2ace16ce/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.3.0-nocfs.zip
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.3.0-nocfs.zip b/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.3.0-nocfs.zip
new file mode 100644
index 0000000..c12ca5f
Binary files /dev/null and b/lucene/backward-codecs/src/test/org/apache/lucene/index/index.7.3.0-nocfs.zip differ

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2ace16ce/lucene/backward-codecs/src/test/org/apache/lucene/index/sorted.7.3.0.zip
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/sorted.7.3.0.zip b/lucene/backward-codecs/src/test/org/apache/lucene/index/sorted.7.3.0.zip
new file mode 100644
index 0000000..cc2b886
Binary files /dev/null and b/lucene/backward-codecs/src/test/org/apache/lucene/index/sorted.7.3.0.zip differ


[02/50] lucene-solr:jira/solr-12181: LUCENE-8236: Filter duplicated points when creating GeoPath shapes to avoid creation of bogus planes.

Posted by ab...@apache.org.
LUCENE-8236: Filter duplicated points when creating GeoPath shapes to avoid creation of bogus planes.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/cf568904
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/cf568904
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/cf568904

Branch: refs/heads/jira/solr-12181
Commit: cf568904005ca203076186820b9a7876f7cb1882
Parents: 7117b68
Author: Ignacio Vera <iv...@apache.org>
Authored: Wed Apr 4 13:33:42 2018 +0200
Committer: Ignacio Vera <iv...@apache.org>
Committed: Wed Apr 4 13:33:42 2018 +0200

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |  3 +++
 .../lucene/spatial3d/geom/GeoPathFactory.java   | 22 +++++++++++++++++--
 .../lucene/spatial3d/geom/GeoPathTest.java      | 23 ++++++++++++++++++++
 3 files changed, 46 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/cf568904/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 1f83980..95d8738 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -120,6 +120,9 @@ Bug Fixes
 * LUCENE-8234: Fixed bug in how spatial relationship is computed for
   GeoStandardCircle when it covers the whole world. (Ignacio Vera)
 
+* LUCENE-8236: Filter duplicated points when creating GeoPath shapes to
+  avoid creation of bogus planes. (Ignacio Vera)
+
 Other
 
 * LUCENE-8228: removed obsolete IndexDeletionPolicy clone() requirements from

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/cf568904/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPathFactory.java
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPathFactory.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPathFactory.java
index 2ca132f..6389f57 100644
--- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPathFactory.java
+++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPathFactory.java
@@ -16,6 +16,9 @@
  */
 package org.apache.lucene.spatial3d.geom;
 
+import java.util.ArrayList;
+import java.util.List;
+
 /**
  * Class which constructs a GeoPath representing an arbitrary path.
  *
@@ -34,9 +37,24 @@ public class GeoPathFactory {
    */
   public static GeoPath makeGeoPath(final PlanetModel planetModel, final double maxCutoffAngle, final GeoPoint[] pathPoints) {
     if (maxCutoffAngle < Vector.MINIMUM_ANGULAR_RESOLUTION) {
-      return new GeoDegeneratePath(planetModel, pathPoints);
+      return new GeoDegeneratePath(planetModel, filterPoints(pathPoints));
+    }
+    return new GeoStandardPath(planetModel, maxCutoffAngle, filterPoints(pathPoints));
+  }
+
+  /** Filter duplicate points.
+   * @param pathPoints with the arras of points.
+   * @return the filtered array.
+   */
+  private static GeoPoint[] filterPoints(final GeoPoint[] pathPoints) {
+    final List<GeoPoint> noIdenticalPoints = new ArrayList<>(pathPoints.length);
+    for (int i = 0; i < pathPoints.length - 1; i++) {
+      if (!pathPoints[i].isNumericallyIdentical(pathPoints[i + 1])) {
+        noIdenticalPoints.add(pathPoints[i]);
+      }
     }
-    return new GeoStandardPath(planetModel, maxCutoffAngle, pathPoints);
+    noIdenticalPoints.add(pathPoints[pathPoints.length - 1]);
+    return noIdenticalPoints.toArray(new GeoPoint[noIdenticalPoints.size()]);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/cf568904/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPathTest.java
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPathTest.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPathTest.java
index f6d14f2..93f90f4 100755
--- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPathTest.java
+++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoPathTest.java
@@ -379,4 +379,27 @@ public class GeoPathTest {
 
   }
 
+  @Test
+  public void testIdenticalPoints() {
+    PlanetModel planetModel = PlanetModel.WGS84;
+    GeoPoint point1 = new GeoPoint(planetModel, 1.5707963267948963, -2.4818290647609542E-148);
+    GeoPoint point2 = new GeoPoint(planetModel, 1.570796326794895, -3.5E-323);
+    GeoPoint point3 = new GeoPoint(planetModel,4.4E-323, -3.1415926535897896);
+    GeoPath path = GeoPathFactory.makeGeoPath(planetModel, 0, new GeoPoint[] {point1, point2, point3});
+    GeoPoint point = new GeoPoint(planetModel, -1.5707963267948952,2.369064805649877E-284);
+    //If not filtered the point is wrongly in set
+    assertFalse(path.isWithin(point));
+    //If not filtered it throws error
+    path = GeoPathFactory.makeGeoPath(planetModel, 1e-6, new GeoPoint[] {point1, point2, point3});
+    assertFalse(path.isWithin(point));
+
+    GeoPoint point4 = new GeoPoint(planetModel, 1.5, 0);
+    GeoPoint point5 = new GeoPoint(planetModel, 1.5, 0);
+    GeoPoint point6 = new GeoPoint(planetModel,4.4E-323, -3.1415926535897896);
+    //If not filtered creates a degenerated Vector
+    path = GeoPathFactory.makeGeoPath(planetModel, 0, new GeoPoint[] {point4, point5, point6});
+    path = GeoPathFactory.makeGeoPath(planetModel, 0.5, new GeoPoint[] {point4, point5, point6});
+
+  }
+
 }


[20/50] lucene-solr:jira/solr-12181: Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/lucene-solr

Posted by ab...@apache.org.
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/lucene-solr


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/f1d69112
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/f1d69112
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/f1d69112

Branch: refs/heads/jira/solr-12181
Commit: f1d691127232690589bfef6bbb26c15b6e1a152c
Parents: 74c2b79 8e7b1b2
Author: Karl Wright <Da...@gmail.com>
Authored: Thu Apr 5 13:56:31 2018 -0400
Committer: Karl Wright <Da...@gmail.com>
Committed: Thu Apr 5 13:56:31 2018 -0400

----------------------------------------------------------------------
 solr/CHANGES.txt                  | 3 +++
 solr/solr-ref-guide/src/README.md | 3 ---
 2 files changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------



[40/50] lucene-solr:jira/solr-12181: LUCENE-8243: fix IndexWriter.addIndexes(Directory[]) to properly preserve index file names for updated doc values fields

Posted by ab...@apache.org.
LUCENE-8243: fix IndexWriter.addIndexes(Directory[]) to properly preserve index file names for updated doc values fields


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/99364584
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/99364584
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/99364584

Branch: refs/heads/jira/solr-12181
Commit: 99364584fffcb8352ca9cd566c3044ed04732582
Parents: bd8fe72
Author: Mike McCandless <mi...@apache.org>
Authored: Sun Apr 8 11:14:51 2018 -0400
Committer: Mike McCandless <mi...@apache.org>
Committed: Sun Apr 8 11:15:21 2018 -0400

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |  4 +
 .../org/apache/lucene/index/IndexWriter.java    |  9 ++-
 .../org/apache/lucene/index/TestAddIndexes.java | 81 ++++++++++++++++++++
 3 files changed, 91 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/99364584/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index f90f9e3..1baeb7f 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -129,6 +129,10 @@ Bug Fixes
 * LUCENE-8236: Filter duplicated points when creating GeoPath shapes to
   avoid creation of bogus planes. (Ignacio Vera)
 
+* LUCENE-8243: IndexWriter.addIndexes(Directory[]) did not properly preserve
+  index file names for updated doc values fields (Simon Willnauer,
+  Michael McCandless)
+
 Other
 
 * LUCENE-8228: removed obsolete IndexDeletionPolicy clone() requirements from

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/99364584/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
index 4305176..8ba460d 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
@@ -3207,8 +3207,10 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
                                           info.info.getDiagnostics(), info.info.getId(), info.info.getAttributes(), info.info.getIndexSort());
     SegmentCommitInfo newInfoPerCommit = new SegmentCommitInfo(newInfo, info.getDelCount(), info.getDelGen(), 
                                                                info.getFieldInfosGen(), info.getDocValuesGen());
-    
-    newInfo.setFiles(info.files());
+
+    newInfo.setFiles(info.info.files());
+    newInfoPerCommit.setFieldInfosFiles(info.getFieldInfosFiles());
+    newInfoPerCommit.setDocValuesUpdatesFiles(info.getDocValuesUpdatesFiles());
 
     boolean success = false;
 
@@ -3228,7 +3230,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
       }
     }
 
-    assert copiedFiles.equals(newInfoPerCommit.files());
+    assert copiedFiles.equals(newInfoPerCommit.files()): "copiedFiles=" + copiedFiles + " vs " + newInfoPerCommit.files();
     
     return newInfoPerCommit;
   }
@@ -3569,6 +3571,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
     return seqNo;
   }
 
+  @SuppressWarnings("try")
   private final void finishCommit() throws IOException {
 
     boolean commitCompleted = false;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/99364584/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
index 876328a..48a28e2 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
@@ -1332,4 +1332,85 @@ public class TestAddIndexes extends LuceneTestCase {
     assertEquals("cannot change index sort from <int: \"foo\"> to <string: \"foo\">", message);
     IOUtils.close(r1, dir1, w2, dir2);
   }
+
+  public void testAddIndexesDVUpdateSameSegmentName() throws Exception {
+    Directory dir1 = newDirectory();
+    IndexWriterConfig iwc1 = newIndexWriterConfig(new MockAnalyzer(random()));
+    IndexWriter w1 = new IndexWriter(dir1, iwc1);
+    Document doc = new Document();
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc.add(new StringField("version", "1", Field.Store.YES));
+    doc.add(new NumericDocValuesField("soft_delete", 1));
+    w1.addDocument(doc);
+    w1.flush();
+
+    w1.updateDocValues(new Term("id", "1"), new NumericDocValuesField("soft_delete", 1));
+    w1.commit();
+    w1.close();
+
+    IndexWriterConfig iwc2 = newIndexWriterConfig(new MockAnalyzer(random()));
+    Directory dir2 = newDirectory();
+    IndexWriter w2 = new IndexWriter(dir2, iwc2);
+    w2.addIndexes(dir1);
+    w2.commit();
+    w2.close();
+
+    if (VERBOSE) {
+      System.out.println("\nTEST: now open w3");
+    }
+    IndexWriterConfig iwc3 = newIndexWriterConfig(new MockAnalyzer(random()));
+    if (VERBOSE) {
+      iwc3.setInfoStream(System.out);
+    }        
+    IndexWriter w3 = new IndexWriter(dir2, iwc3);
+    w3.close();
+
+    iwc3 = newIndexWriterConfig(new MockAnalyzer(random()));
+    w3 = new IndexWriter(dir2, iwc3);
+    w3.close();
+    dir1.close();
+    dir2.close();
+  }
+
+  public void testAddIndexesDVUpdateNewSegmentName() throws Exception {
+    Directory dir1 = newDirectory();
+    IndexWriterConfig iwc1 = newIndexWriterConfig(new MockAnalyzer(random()));
+    IndexWriter w1 = new IndexWriter(dir1, iwc1);
+    Document doc = new Document();
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    doc.add(new StringField("version", "1", Field.Store.YES));
+    doc.add(new NumericDocValuesField("soft_delete", 1));
+    w1.addDocument(doc);
+    w1.flush();
+
+    w1.updateDocValues(new Term("id", "1"), new NumericDocValuesField("soft_delete", 1));
+    w1.commit();
+    w1.close();
+
+    IndexWriterConfig iwc2 = newIndexWriterConfig(new MockAnalyzer(random()));
+    Directory dir2 = newDirectory();
+    IndexWriter w2 = new IndexWriter(dir2, iwc2);
+    w2.addDocument(new Document());
+    w2.commit();
+    
+    w2.addIndexes(dir1);
+    w2.commit();
+    w2.close();
+
+    if (VERBOSE) {
+      System.out.println("\nTEST: now open w3");
+    }
+    IndexWriterConfig iwc3 = newIndexWriterConfig(new MockAnalyzer(random()));
+    if (VERBOSE) {
+      iwc3.setInfoStream(System.out);
+    }        
+    IndexWriter w3 = new IndexWriter(dir2, iwc3);
+    w3.close();
+
+    iwc3 = newIndexWriterConfig(new MockAnalyzer(random()));
+    w3 = new IndexWriter(dir2, iwc3);
+    w3.close();
+    dir1.close();
+    dir2.close();
+  }
 }


[47/50] lucene-solr:jira/solr-12181: SOLR-12096: Fixed inconsistent results format of subquery transformer for distributed search (multi-shard)

Posted by ab...@apache.org.
SOLR-12096: Fixed inconsistent results format of subquery transformer for distributed search (multi-shard)


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/ea08bd3b
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/ea08bd3b
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/ea08bd3b

Branch: refs/heads/jira/solr-12181
Commit: ea08bd3b67ff6b35d6264054d2131a87bbe9b870
Parents: a7a3c0a
Author: Ishan Chattopadhyaya <is...@apache.org>
Authored: Mon Apr 9 16:36:07 2018 +0530
Committer: Ishan Chattopadhyaya <is...@apache.org>
Committed: Mon Apr 9 16:36:07 2018 +0530

----------------------------------------------------------------------
 SOLR-12096.patch                                | 217 +++++++++++++++++++
 solr/CHANGES.txt                                |   3 +
 .../solr/response/GeoJSONResponseWriter.java    |   3 +-
 .../solr/response/JSONResponseWriter.java       |   6 +-
 .../apache/solr/response/JSONWriterTest.java    |  24 +-
 .../TestSubQueryTransformerDistrib.java         |  59 +++--
 6 files changed, 289 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ea08bd3b/SOLR-12096.patch
----------------------------------------------------------------------
diff --git a/SOLR-12096.patch b/SOLR-12096.patch
new file mode 100644
index 0000000..9ed1ad7
--- /dev/null
+++ b/SOLR-12096.patch
@@ -0,0 +1,217 @@
+diff --git a/solr/core/src/java/org/apache/solr/response/GeoJSONResponseWriter.java b/solr/core/src/java/org/apache/solr/response/GeoJSONResponseWriter.java
+index 43fd7b4..012290e 100644
+--- a/solr/core/src/java/org/apache/solr/response/GeoJSONResponseWriter.java
++++ b/solr/core/src/java/org/apache/solr/response/GeoJSONResponseWriter.java
+@@ -166,7 +166,8 @@ class GeoJSONWriter extends JSONWriter {
+ 
+       // SolrDocument will now have multiValued fields represented as a Collection,
+       // even if only a single value is returned for this document.
+-      if (val instanceof List) {
++      // For SolrDocumentList, use writeVal instead of writeArray
++      if (!(val instanceof SolrDocumentList) && val instanceof List) {
+         // shortcut this common case instead of going through writeVal again
+         writeArray(name,((Iterable)val).iterator());
+       } else {
+diff --git a/solr/core/src/java/org/apache/solr/response/JSONResponseWriter.java b/solr/core/src/java/org/apache/solr/response/JSONResponseWriter.java
+index 513df4e..5f6e2f2 100644
+--- a/solr/core/src/java/org/apache/solr/response/JSONResponseWriter.java
++++ b/solr/core/src/java/org/apache/solr/response/JSONResponseWriter.java
+@@ -25,10 +25,11 @@ import java.util.Map;
+ import java.util.Set;
+ 
+ import org.apache.solr.common.IteratorWriter;
++import org.apache.solr.common.MapWriter;
+ import org.apache.solr.common.MapWriter.EntryWriter;
+ import org.apache.solr.common.PushWriter;
+ import org.apache.solr.common.SolrDocument;
+-import org.apache.solr.common.MapWriter;
++import org.apache.solr.common.SolrDocumentList;
+ import org.apache.solr.common.params.SolrParams;
+ import org.apache.solr.common.util.NamedList;
+ import org.apache.solr.common.util.SimpleOrderedMap;
+@@ -367,7 +368,8 @@ class JSONWriter extends TextResponseWriter {
+ 
+       // SolrDocument will now have multiValued fields represented as a Collection,
+       // even if only a single value is returned for this document.
+-      if (val instanceof List) {
++      // For SolrDocumentList, use writeVal instead of writeArray
++      if (!(val instanceof SolrDocumentList) && val instanceof List) {
+         // shortcut this common case instead of going through writeVal again
+         writeArray(name,((Iterable)val).iterator());
+       } else {
+diff --git a/solr/core/src/test/org/apache/solr/response/JSONWriterTest.java b/solr/core/src/test/org/apache/solr/response/JSONWriterTest.java
+index 1b53150..68cebd2 100644
+--- a/solr/core/src/test/org/apache/solr/response/JSONWriterTest.java
++++ b/solr/core/src/test/org/apache/solr/response/JSONWriterTest.java
+@@ -22,7 +22,10 @@ import java.lang.reflect.Method;
+ import java.lang.reflect.Modifier;
+ import java.nio.charset.StandardCharsets;
+ import java.util.ArrayList;
++import java.util.Arrays;
+ import java.util.List;
++
++import org.apache.solr.JSONTestUtil;
+ import org.apache.solr.SolrTestCaseJ4;
+ import org.apache.solr.common.SolrDocument;
+ import org.apache.solr.common.SolrDocumentList;
+@@ -130,9 +133,9 @@ public class JSONWriterTest extends SolrTestCaseJ4 {
+   }
+ 
+   @Test
+-  public void testJSONSolrDocument() throws IOException {
++  public void testJSONSolrDocument() throws Exception {
+     SolrQueryRequest req = req(CommonParams.WT,"json",
+-                               CommonParams.FL,"id,score");
++                               CommonParams.FL,"id,score,_children_,path");
+     SolrQueryResponse rsp = new SolrQueryResponse();
+     JSONResponseWriter w = new JSONResponseWriter();
+ 
+@@ -141,11 +144,22 @@ public class JSONWriterTest extends SolrTestCaseJ4 {
+ 
+     StringWriter buf = new StringWriter();
+ 
++    SolrDocument childDoc = new SolrDocument();
++    childDoc.addField("id", "2");
++    childDoc.addField("score", "0.4");
++    childDoc.addField("path", Arrays.asList("a>b", "a>b>c"));
++
++    SolrDocumentList childList = new SolrDocumentList();
++    childList.setNumFound(1);
++    childList.setStart(0);
++    childList.add(childDoc);
++
+     SolrDocument solrDoc = new SolrDocument();
+     solrDoc.addField("id", "1");
+     solrDoc.addField("subject", "hello2");
+     solrDoc.addField("title", "hello3");
+     solrDoc.addField("score", "0.7");
++    solrDoc.setField("_children_", childList);
+ 
+     SolrDocumentList list = new SolrDocumentList();
+     list.setNumFound(1);
+@@ -163,8 +177,12 @@ public class JSONWriterTest extends SolrTestCaseJ4 {
+                 result.contains("\"title\""));
+     assertTrue("response doesn't contain expected fields: " + result, 
+                result.contains("\"id\"") &&
+-               result.contains("\"score\""));
++               result.contains("\"score\"") && result.contains("_children_"));
+ 
++    String expectedResult = "{'response':{'numFound':1,'start':0,'maxScore':0.7,'docs':[{'id':'1', 'score':'0.7'," +
++        " '_children_':{'numFound':1,'start':0,'docs':[{'id':'2', 'score':'0.4', 'path':['a>b', 'a>b>c']}] }}] }}";
++    String error = JSONTestUtil.match(result, "=="+expectedResult);
++    assertNull("response validation failed with error: " + error, error);
+ 
+     req.close();
+   }
+diff --git a/solr/core/src/test/org/apache/solr/response/transform/TestSubQueryTransformerDistrib.java b/solr/core/src/test/org/apache/solr/response/transform/TestSubQueryTransformerDistrib.java
+index 620cac0..f6d0a38 100644
+--- a/solr/core/src/test/org/apache/solr/response/transform/TestSubQueryTransformerDistrib.java
++++ b/solr/core/src/test/org/apache/solr/response/transform/TestSubQueryTransformerDistrib.java
+@@ -16,7 +16,11 @@
+  */
+ package org.apache.solr.response.transform;
+ 
++import java.io.ByteArrayOutputStream;
+ import java.io.IOException;
++import java.io.InputStream;
++import java.net.URL;
++import java.nio.charset.Charset;
+ import java.nio.file.Path;
+ import java.nio.file.Paths;
+ import java.util.ArrayList;
+@@ -26,6 +30,8 @@ import java.util.List;
+ import java.util.Map;
+ import java.util.Random;
+ 
++import org.apache.commons.io.IOUtils;
++import org.apache.solr.JSONTestUtil;
+ import org.apache.solr.client.solrj.SolrServerException;
+ import org.apache.solr.client.solrj.impl.CloudSolrClient;
+ import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+@@ -37,10 +43,12 @@ import org.apache.solr.cloud.SolrCloudTestCase;
+ import org.apache.solr.common.SolrDocument;
+ import org.apache.solr.common.SolrDocumentList;
+ import org.apache.solr.common.cloud.ZkStateReader;
++import org.apache.solr.common.params.ModifiableSolrParams;
+ import org.apache.solr.common.util.ContentStreamBase;
+ import org.junit.BeforeClass;
+ import org.junit.Test;
+ 
++@org.apache.solr.SolrTestCaseJ4.SuppressSSL()
+ public class TestSubQueryTransformerDistrib extends SolrCloudTestCase {
+   
+   private static final String support = "These guys help customers";
+@@ -92,7 +100,7 @@ public class TestSubQueryTransformerDistrib extends SolrCloudTestCase {
+   
+   @SuppressWarnings("serial")
+   @Test
+-  public void test() throws SolrServerException, IOException {
++  public void test() throws Exception {
+     int peopleMultiplier = atLeast(1);
+     int deptMultiplier = atLeast(1);
+     
+@@ -100,24 +108,26 @@ public class TestSubQueryTransformerDistrib extends SolrCloudTestCase {
+     
+     Random random1 = random();
+     
++    final ModifiableSolrParams params = params(
++        new String[]{"q","name_s:dave", "indent","true",
++            "fl","*,depts:[subquery "+((random1.nextBoolean() ? "" : "separator=,"))+"]",
++            "rows","" + peopleMultiplier,
++            "depts.q","{!terms f=dept_id_s v=$row.dept_ss_dv "+((random1.nextBoolean() ? "" : "separator=,"))+"}",
++            "depts.fl","text_t"+(differentUniqueId?",id:notid":""),
++            "depts.indent","true",
++            "depts.collection","departments",
++            differentUniqueId ? "depts.distrib.singlePass":"notnecessary","true",
++            "depts.rows",""+(deptMultiplier*2),
++            "depts.logParamsList","q,fl,rows,row.dept_ss_dv",
++            random().nextBoolean()?"depts.wt":"whatever",anyWt(),
++            random().nextBoolean()?"wt":"whatever",anyWt()});
++
++    final SolrDocumentList hits;
+     {
+-     
+-      final QueryRequest  qr = new QueryRequest(params(
+-          new String[]{"q","name_s:dave", "indent","true",
+-          "fl","*,depts:[subquery "+((random1.nextBoolean() ? "" : "separator=,"))+"]", 
+-          "rows","" + peopleMultiplier,
+-          "depts.q","{!terms f=dept_id_s v=$row.dept_ss_dv "+((random1.nextBoolean() ? "" : "separator=,"))+"}", 
+-          "depts.fl","text_t"+(differentUniqueId?",id:notid":""),
+-          "depts.indent","true",
+-          "depts.collection","departments",
+-          differentUniqueId ? "depts.distrib.singlePass":"notnecessary","true",
+-          "depts.rows",""+(deptMultiplier*2),
+-          "depts.logParamsList","q,fl,rows,row.dept_ss_dv",
+-          random().nextBoolean()?"depts.wt":"whatever",anyWt(),
+-          random().nextBoolean()?"wt":"whatever",anyWt()}));
++      final QueryRequest qr = new QueryRequest(params);
+       final QueryResponse  rsp = new QueryResponse();
+-      rsp.setResponse(cluster.getSolrClient().request(qr, people));
+-      final SolrDocumentList hits = rsp.getResults();
++      rsp.setResponse(cluster.getSolrClient().request(qr, people+","+depts));
++      hits = rsp.getResults();
+       
+       assertEquals(peopleMultiplier, hits.getNumFound());
+       
+@@ -140,6 +150,21 @@ public class TestSubQueryTransformerDistrib extends SolrCloudTestCase {
+       }
+       assertEquals(hits.toString(), engineerCount, supportCount); 
+     }
++
++    params.set("wt", "json");
++    final URL node = new URL(cluster.getRandomJetty(random()).getBaseUrl().toString()
++     +"/"+people+"/select"+params.toQueryString());
++
++    try(final InputStream jsonResponse = node.openStream()){
++      final ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
++      IOUtils.copy(jsonResponse, outBuffer);
++
++      final Object expected = ((SolrDocumentList) hits.get(0).getFieldValue("depts")).get(0).get("text_t");
++      final String err = JSONTestUtil.match("/response/docs/[0]/depts/docs/[0]/text_t"
++          ,outBuffer.toString(Charset.forName("UTF-8").toString()),
++          "\""+expected+"\"");
++      assertNull(err,err);
++    }
+     
+   }
+ 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ea08bd3b/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index f910224..9b99055 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -114,6 +114,9 @@ Bug Fixes
 * SOLR-12199: TestReplicationHandler.doTestRepeater(): TEST_PORT interpolation failure: 
   Server refused connection at: http://127.0.0.1:TEST_PORT/solr  (Mikhail Khludnev, Dawid Weiss, Steve Rowe)
 
+* SOLR-12096: Fixed inconsistent results format of subquery transformer for distributed search (multi-shard).
+  (Munendra S N, Mikhail Khludnev via Ishan Chattopadhyaya)
+
 Optimizations
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ea08bd3b/solr/core/src/java/org/apache/solr/response/GeoJSONResponseWriter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/response/GeoJSONResponseWriter.java b/solr/core/src/java/org/apache/solr/response/GeoJSONResponseWriter.java
index 43fd7b4..012290e 100644
--- a/solr/core/src/java/org/apache/solr/response/GeoJSONResponseWriter.java
+++ b/solr/core/src/java/org/apache/solr/response/GeoJSONResponseWriter.java
@@ -166,7 +166,8 @@ class GeoJSONWriter extends JSONWriter {
 
       // SolrDocument will now have multiValued fields represented as a Collection,
       // even if only a single value is returned for this document.
-      if (val instanceof List) {
+      // For SolrDocumentList, use writeVal instead of writeArray
+      if (!(val instanceof SolrDocumentList) && val instanceof List) {
         // shortcut this common case instead of going through writeVal again
         writeArray(name,((Iterable)val).iterator());
       } else {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ea08bd3b/solr/core/src/java/org/apache/solr/response/JSONResponseWriter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/response/JSONResponseWriter.java b/solr/core/src/java/org/apache/solr/response/JSONResponseWriter.java
index 513df4e..5f6e2f2 100644
--- a/solr/core/src/java/org/apache/solr/response/JSONResponseWriter.java
+++ b/solr/core/src/java/org/apache/solr/response/JSONResponseWriter.java
@@ -25,10 +25,11 @@ import java.util.Map;
 import java.util.Set;
 
 import org.apache.solr.common.IteratorWriter;
+import org.apache.solr.common.MapWriter;
 import org.apache.solr.common.MapWriter.EntryWriter;
 import org.apache.solr.common.PushWriter;
 import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.MapWriter;
+import org.apache.solr.common.SolrDocumentList;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.SimpleOrderedMap;
@@ -367,7 +368,8 @@ class JSONWriter extends TextResponseWriter {
 
       // SolrDocument will now have multiValued fields represented as a Collection,
       // even if only a single value is returned for this document.
-      if (val instanceof List) {
+      // For SolrDocumentList, use writeVal instead of writeArray
+      if (!(val instanceof SolrDocumentList) && val instanceof List) {
         // shortcut this common case instead of going through writeVal again
         writeArray(name,((Iterable)val).iterator());
       } else {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ea08bd3b/solr/core/src/test/org/apache/solr/response/JSONWriterTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/response/JSONWriterTest.java b/solr/core/src/test/org/apache/solr/response/JSONWriterTest.java
index 1b53150..68cebd2 100644
--- a/solr/core/src/test/org/apache/solr/response/JSONWriterTest.java
+++ b/solr/core/src/test/org/apache/solr/response/JSONWriterTest.java
@@ -22,7 +22,10 @@ import java.lang.reflect.Method;
 import java.lang.reflect.Modifier;
 import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
+
+import org.apache.solr.JSONTestUtil;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrDocumentList;
@@ -130,9 +133,9 @@ public class JSONWriterTest extends SolrTestCaseJ4 {
   }
 
   @Test
-  public void testJSONSolrDocument() throws IOException {
+  public void testJSONSolrDocument() throws Exception {
     SolrQueryRequest req = req(CommonParams.WT,"json",
-                               CommonParams.FL,"id,score");
+                               CommonParams.FL,"id,score,_children_,path");
     SolrQueryResponse rsp = new SolrQueryResponse();
     JSONResponseWriter w = new JSONResponseWriter();
 
@@ -141,11 +144,22 @@ public class JSONWriterTest extends SolrTestCaseJ4 {
 
     StringWriter buf = new StringWriter();
 
+    SolrDocument childDoc = new SolrDocument();
+    childDoc.addField("id", "2");
+    childDoc.addField("score", "0.4");
+    childDoc.addField("path", Arrays.asList("a>b", "a>b>c"));
+
+    SolrDocumentList childList = new SolrDocumentList();
+    childList.setNumFound(1);
+    childList.setStart(0);
+    childList.add(childDoc);
+
     SolrDocument solrDoc = new SolrDocument();
     solrDoc.addField("id", "1");
     solrDoc.addField("subject", "hello2");
     solrDoc.addField("title", "hello3");
     solrDoc.addField("score", "0.7");
+    solrDoc.setField("_children_", childList);
 
     SolrDocumentList list = new SolrDocumentList();
     list.setNumFound(1);
@@ -163,8 +177,12 @@ public class JSONWriterTest extends SolrTestCaseJ4 {
                 result.contains("\"title\""));
     assertTrue("response doesn't contain expected fields: " + result, 
                result.contains("\"id\"") &&
-               result.contains("\"score\""));
+               result.contains("\"score\"") && result.contains("_children_"));
 
+    String expectedResult = "{'response':{'numFound':1,'start':0,'maxScore':0.7,'docs':[{'id':'1', 'score':'0.7'," +
+        " '_children_':{'numFound':1,'start':0,'docs':[{'id':'2', 'score':'0.4', 'path':['a>b', 'a>b>c']}] }}] }}";
+    String error = JSONTestUtil.match(result, "=="+expectedResult);
+    assertNull("response validation failed with error: " + error, error);
 
     req.close();
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ea08bd3b/solr/core/src/test/org/apache/solr/response/transform/TestSubQueryTransformerDistrib.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/response/transform/TestSubQueryTransformerDistrib.java b/solr/core/src/test/org/apache/solr/response/transform/TestSubQueryTransformerDistrib.java
index 620cac0..f6d0a38 100644
--- a/solr/core/src/test/org/apache/solr/response/transform/TestSubQueryTransformerDistrib.java
+++ b/solr/core/src/test/org/apache/solr/response/transform/TestSubQueryTransformerDistrib.java
@@ -16,7 +16,11 @@
  */
 package org.apache.solr.response.transform;
 
+import java.io.ByteArrayOutputStream;
 import java.io.IOException;
+import java.io.InputStream;
+import java.net.URL;
+import java.nio.charset.Charset;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.ArrayList;
@@ -26,6 +30,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 
+import org.apache.commons.io.IOUtils;
+import org.apache.solr.JSONTestUtil;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
@@ -37,10 +43,12 @@ import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrDocumentList;
 import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.ContentStreamBase;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+@org.apache.solr.SolrTestCaseJ4.SuppressSSL()
 public class TestSubQueryTransformerDistrib extends SolrCloudTestCase {
   
   private static final String support = "These guys help customers";
@@ -92,7 +100,7 @@ public class TestSubQueryTransformerDistrib extends SolrCloudTestCase {
   
   @SuppressWarnings("serial")
   @Test
-  public void test() throws SolrServerException, IOException {
+  public void test() throws Exception {
     int peopleMultiplier = atLeast(1);
     int deptMultiplier = atLeast(1);
     
@@ -100,24 +108,26 @@ public class TestSubQueryTransformerDistrib extends SolrCloudTestCase {
     
     Random random1 = random();
     
+    final ModifiableSolrParams params = params(
+        new String[]{"q","name_s:dave", "indent","true",
+            "fl","*,depts:[subquery "+((random1.nextBoolean() ? "" : "separator=,"))+"]",
+            "rows","" + peopleMultiplier,
+            "depts.q","{!terms f=dept_id_s v=$row.dept_ss_dv "+((random1.nextBoolean() ? "" : "separator=,"))+"}",
+            "depts.fl","text_t"+(differentUniqueId?",id:notid":""),
+            "depts.indent","true",
+            "depts.collection","departments",
+            differentUniqueId ? "depts.distrib.singlePass":"notnecessary","true",
+            "depts.rows",""+(deptMultiplier*2),
+            "depts.logParamsList","q,fl,rows,row.dept_ss_dv",
+            random().nextBoolean()?"depts.wt":"whatever",anyWt(),
+            random().nextBoolean()?"wt":"whatever",anyWt()});
+
+    final SolrDocumentList hits;
     {
-     
-      final QueryRequest  qr = new QueryRequest(params(
-          new String[]{"q","name_s:dave", "indent","true",
-          "fl","*,depts:[subquery "+((random1.nextBoolean() ? "" : "separator=,"))+"]", 
-          "rows","" + peopleMultiplier,
-          "depts.q","{!terms f=dept_id_s v=$row.dept_ss_dv "+((random1.nextBoolean() ? "" : "separator=,"))+"}", 
-          "depts.fl","text_t"+(differentUniqueId?",id:notid":""),
-          "depts.indent","true",
-          "depts.collection","departments",
-          differentUniqueId ? "depts.distrib.singlePass":"notnecessary","true",
-          "depts.rows",""+(deptMultiplier*2),
-          "depts.logParamsList","q,fl,rows,row.dept_ss_dv",
-          random().nextBoolean()?"depts.wt":"whatever",anyWt(),
-          random().nextBoolean()?"wt":"whatever",anyWt()}));
+      final QueryRequest qr = new QueryRequest(params);
       final QueryResponse  rsp = new QueryResponse();
-      rsp.setResponse(cluster.getSolrClient().request(qr, people));
-      final SolrDocumentList hits = rsp.getResults();
+      rsp.setResponse(cluster.getSolrClient().request(qr, people+","+depts));
+      hits = rsp.getResults();
       
       assertEquals(peopleMultiplier, hits.getNumFound());
       
@@ -140,6 +150,21 @@ public class TestSubQueryTransformerDistrib extends SolrCloudTestCase {
       }
       assertEquals(hits.toString(), engineerCount, supportCount); 
     }
+
+    params.set("wt", "json");
+    final URL node = new URL(cluster.getRandomJetty(random()).getBaseUrl().toString()
+     +"/"+people+"/select"+params.toQueryString());
+
+    try(final InputStream jsonResponse = node.openStream()){
+      final ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
+      IOUtils.copy(jsonResponse, outBuffer);
+
+      final Object expected = ((SolrDocumentList) hits.get(0).getFieldValue("depts")).get(0).get("text_t");
+      final String err = JSONTestUtil.match("/response/docs/[0]/depts/docs/[0]/text_t"
+          ,outBuffer.toString(Charset.forName("UTF-8").toString()),
+          "\""+expected+"\"");
+      assertNull(err,err);
+    }
     
   }
 


[42/50] lucene-solr:jira/solr-12181: LUCENE-8243: Add original finder to the attribution list in the CHANGES.TXT

Posted by ab...@apache.org.
LUCENE-8243: Add original finder to the attribution list in the CHANGES.TXT


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a49543b4
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a49543b4
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a49543b4

Branch: refs/heads/jira/solr-12181
Commit: a49543b48aaec44c0735f6f41e6e455354720b9c
Parents: 3530397
Author: Simon Willnauer <si...@apache.org>
Authored: Mon Apr 9 09:16:30 2018 +0200
Committer: Simon Willnauer <si...@apache.org>
Committed: Mon Apr 9 09:16:30 2018 +0200

----------------------------------------------------------------------
 lucene/CHANGES.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a49543b4/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 1baeb7f..74efacc 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -131,7 +131,7 @@ Bug Fixes
 
 * LUCENE-8243: IndexWriter.addIndexes(Directory[]) did not properly preserve
   index file names for updated doc values fields (Simon Willnauer,
-  Michael McCandless)
+  Michael McCandless, Nhat Nguyen)
 
 Other
 


[08/50] lucene-solr:jira/solr-12181: SOLR-7887: fix maven compilation by turning off annotation processing

Posted by ab...@apache.org.
SOLR-7887: fix maven compilation by turning off annotation processing


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/ef902f9d
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/ef902f9d
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/ef902f9d

Branch: refs/heads/jira/solr-12181
Commit: ef902f9d8e5808cb874604990c7df1230e51f28c
Parents: 3e17933
Author: Steve Rowe <sa...@apache.org>
Authored: Wed Apr 4 14:45:09 2018 -0400
Committer: Steve Rowe <sa...@apache.org>
Committed: Wed Apr 4 14:45:09 2018 -0400

----------------------------------------------------------------------
 dev-tools/maven/pom.xml.template | 5 +++++
 1 file changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ef902f9d/dev-tools/maven/pom.xml.template
----------------------------------------------------------------------
diff --git a/dev-tools/maven/pom.xml.template b/dev-tools/maven/pom.xml.template
index 15b481c..4e21ca0 100644
--- a/dev-tools/maven/pom.xml.template
+++ b/dev-tools/maven/pom.xml.template
@@ -191,6 +191,10 @@
           <configuration>
             <source>${java.compat.version}</source>
             <target>${java.compat.version}</target>
+            <compilerArgs>
+              <!-- -proc:none was added because of LOG4J2-1925, JDK-8186647, https://github.com/apache/zookeeper/pull/317, JDK-8055048 -->
+              <arg>-proc:none</arg>
+            </compilerArgs>
           </configuration>
         </plugin>
         <plugin>
@@ -234,6 +238,7 @@
             <quiet>true</quiet>
             <additionalparam>-Xdoclint:all</additionalparam>
             <additionalparam>-Xdoclint:-missing</additionalparam>
+            <additionalparam>-proc:none</additionalparam>
           </configuration>
         </plugin>
         <plugin>


[22/50] lucene-solr:jira/solr-12181: SOLR-12183: Remove dead code

Posted by ab...@apache.org.
SOLR-12183: Remove dead code


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/c58516ed
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/c58516ed
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/c58516ed

Branch: refs/heads/jira/solr-12181
Commit: c58516edf1b1525e1341f1427cae066b105c9047
Parents: 80375ac
Author: Joel Bernstein <jb...@apache.org>
Authored: Thu Apr 5 12:38:41 2018 -0400
Committer: Joel Bernstein <jb...@apache.org>
Committed: Thu Apr 5 14:00:07 2018 -0400

----------------------------------------------------------------------
 .../solrj/io/stream/StreamExpressionTest.java   | 52 --------------------
 1 file changed, 52 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c58516ed/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
index 4d88b4e..487aca1 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
@@ -2493,41 +2493,6 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     return true;
   }
 
-  protected boolean assertFields(List<Tuple> tuples, String ... fields) throws Exception{
-    for(Tuple tuple : tuples){
-      for(String field : fields){
-        if(!tuple.fields.containsKey(field)){
-          throw new Exception(String.format(Locale.ROOT, "Expected field '%s' not found", field));
-        }
-      }
-    }
-    return true;
-  }
-  protected boolean assertNotFields(List<Tuple> tuples, String ... fields) throws Exception{
-    for(Tuple tuple : tuples){
-      for(String field : fields){
-        if(tuple.fields.containsKey(field)){
-          throw new Exception(String.format(Locale.ROOT, "Unexpected field '%s' found", field));
-        }
-      }
-    }
-    return true;
-  }  
-
-  protected boolean assertGroupOrder(Tuple tuple, int... ids) throws Exception {
-    List<?> group = (List<?>)tuple.get("tuples");
-    int i=0;
-    for(int val : ids) {
-      Map<?,?> t = (Map<?,?>)group.get(i);
-      Long tip = (Long)t.get("id");
-      if(tip.intValue() != val) {
-        throw new Exception("Found value:"+tip.intValue()+" expecting:"+val);
-      }
-      ++i;
-    }
-    return true;
-  }
-
   public boolean assertLong(Tuple tuple, String fieldName, long l) throws Exception {
     long lv = (long)tuple.get(fieldName);
     if(lv != l) {
@@ -2566,23 +2531,6 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     return true;
   }
 
-  private boolean assertList(List list, Object... vals) throws Exception {
-
-    if(list.size() != vals.length) {
-      throw new Exception("Lists are not the same size:"+list.size() +" : "+vals.length);
-    }
-
-    for(int i=0; i<list.size(); i++) {
-      Object a = list.get(i);
-      Object b = vals[i];
-      if(!a.equals(b)) {
-        throw new Exception("List items not equals:"+a+" : "+b);
-      }
-    }
-
-    return true;
-  }
-
   private void assertTopicRun(TupleStream stream, String... idArray) throws Exception {
     long version = -1;
     int count = 0;