You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by da...@apache.org on 2018/10/16 22:53:16 UTC

[01/50] [abbrv] lucene-solr:jira/http2: SOLR-12840: Add pairSort Stream Evaluator

Repository: lucene-solr
Updated Branches:
  refs/heads/jira/http2 6558203fd -> 9a36e87f8


SOLR-12840: Add pairSort Stream Evaluator


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/6a702ee1
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/6a702ee1
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/6a702ee1

Branch: refs/heads/jira/http2
Commit: 6a702ee16bf1b3bf2fda9509956c609b751b2c35
Parents: 49285e8
Author: Joel Bernstein <jb...@apache.org>
Authored: Mon Oct 8 12:53:23 2018 -0400
Committer: Joel Bernstein <jb...@apache.org>
Committed: Mon Oct 8 12:53:23 2018 -0400

----------------------------------------------------------------------
 .../org/apache/solr/client/solrj/io/Lang.java   |  1 +
 .../client/solrj/io/eval/PairSortEvaluator.java | 93 ++++++++++++++++++++
 .../apache/solr/client/solrj/io/TestLang.java   |  2 +-
 .../solrj/io/stream/MathExpressionTest.java     | 32 +++++++
 4 files changed, 127 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6a702ee1/solr/solrj/src/java/org/apache/solr/client/solrj/io/Lang.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/Lang.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/Lang.java
index 28c4c66..75131ca 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/Lang.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/Lang.java
@@ -270,6 +270,7 @@ public class Lang {
         .withFunctionName("getCenter", GetCenterEvaluator.class)
         .withFunctionName("getRadius", GetRadiusEvaluator.class)
         .withFunctionName("getSupportPoints", GetSupportPointsEvaluator.class)
+        .withFunctionName("pairSort", PairSortEvaluator.class)
         // Boolean Stream Evaluators
 
         .withFunctionName("and", AndEvaluator.class)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6a702ee1/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/PairSortEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/PairSortEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/PairSortEvaluator.java
new file mode 100644
index 0000000..1774755
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/PairSortEvaluator.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Locale;
+import java.util.Collections;
+
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class PairSortEvaluator extends RecursiveNumericEvaluator implements TwoValueWorker {
+  protected static final long serialVersionUID = 1L;
+
+  public PairSortEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+  }
+
+  @Override
+  public Object doWork(Object first, Object second) throws IOException{
+    if(null == first){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - null found for the first value",toExpression(constructingFactory)));
+    }
+    if(null == second){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - null found for the second value",toExpression(constructingFactory)));
+    }
+    if(!(first instanceof List<?>)){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - found type %s for the first value, expecting a list of numbers",toExpression(constructingFactory), first.getClass().getSimpleName()));
+    }
+    if(!(second instanceof List<?>)){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - found type %s for the second value, expecting a list of numbers",toExpression(constructingFactory), first.getClass().getSimpleName()));
+    }
+
+    List<Number> l1 = (List<Number>)first;
+    List<Number> l2 = (List<Number>)second;
+
+    if(l2.size() != l1.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - first list (%d) has a different size than the second list (%d)",toExpression(constructingFactory), l1.size(), l2.size()));
+    }
+
+    List<double[]> pairs = new ArrayList();
+    for(int idx = 0; idx < l1.size(); ++idx){
+      double[] pair = new double[2];
+      pair[0]= l1.get(idx).doubleValue();
+      pair[1] = l2.get(idx).doubleValue();
+      pairs.add(pair);
+    }
+
+    Collections.sort(pairs, new PairComp());
+    double[][] data = new double[2][pairs.size()];
+    for(int i=0; i<pairs.size(); i++) {
+      data[0][i] = pairs.get(i)[0];
+      data[1][i] = pairs.get(i)[1];
+    }
+
+    return new Matrix(data);
+  }
+
+  private class PairComp implements Comparator<double[]> {
+    public int compare(double[] a, double[] b) {
+      if(a[0] > b[0]) {
+        return 1;
+      } else if(a[0] < b[0]) {
+        return -1;
+      } else {
+        if(a[1] > b[1]) {
+          return 1;
+        } else if(a[1] < b[1]){
+          return -1;
+        } else {
+          return 0;
+        }
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6a702ee1/solr/solrj/src/test/org/apache/solr/client/solrj/io/TestLang.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/TestLang.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/TestLang.java
index 3ae5547..85ddd93 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/TestLang.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/TestLang.java
@@ -73,7 +73,7 @@ public class TestLang extends LuceneTestCase {
       "outliers", "stream", "getCache", "putCache", "listCache", "removeCache", "zscores", "latlonVectors",
       "convexHull", "getVertices", "getBaryCenter", "getArea", "getBoundarySize","oscillate",
       "getAmplitude", "getPhase", "getAngularFrequency", "enclosingDisk", "getCenter", "getRadius",
-      "getSupportPoints"};
+      "getSupportPoints", "pairSort"};
 
   @Test
   public void testLang() {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6a702ee1/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
index deb4522..a095dd8 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
@@ -1154,6 +1154,38 @@ public class MathExpressionTest extends SolrCloudTestCase {
     assertTrue(out.get(5).doubleValue() == 500.23D);
   }
 
+  @Test
+  public void testPairSort() throws Exception {
+    String cexpr = "let(a=array(4.5, 7.7, 2.1, 2.1, 6.3)," +
+        "               b=array(1, 2, 3, 4, 5)," +
+        "               c=pairSort(a, b))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<List<Number>> out = (List<List<Number>>)tuples.get(0).get("c");
+    System.out.println("###### out:"+out);
+    assertEquals(out.size(), 2);
+    List<Number> row1 = out.get(0);
+    assertEquals(row1.get(0).doubleValue(), 2.1, 0);
+    assertEquals(row1.get(1).doubleValue(), 2.1, 0);
+    assertEquals(row1.get(2).doubleValue(), 4.5, 0);
+    assertEquals(row1.get(3).doubleValue(), 6.3, 0);
+    assertEquals(row1.get(4).doubleValue(), 7.7, 0);
+
+    List<Number> row2 = out.get(1);
+    assertEquals(row2.get(0).doubleValue(), 3, 0);
+    assertEquals(row2.get(1).doubleValue(), 4, 0);
+    assertEquals(row2.get(2).doubleValue(), 1, 0);
+    assertEquals(row2.get(3).doubleValue(), 5, 0);
+    assertEquals(row2.get(4).doubleValue(), 2, 0);
+  }
+
 
   @Test
   public void testOnes() throws Exception {


[46/50] [abbrv] lucene-solr:jira/http2: SOLR-12876: upon failure report exception message in ShardParamsTest.testGetShardsTolerantAsBool

Posted by da...@apache.org.
SOLR-12876: upon failure report exception message in ShardParamsTest.testGetShardsTolerantAsBool


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/7fa19d26
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/7fa19d26
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/7fa19d26

Branch: refs/heads/jira/http2
Commit: 7fa19d2676afc8a9545d903239c942044d7b2765
Parents: 761f8aa
Author: Christine Poerschke <cp...@apache.org>
Authored: Tue Oct 16 10:52:11 2018 -0400
Committer: Christine Poerschke <cp...@apache.org>
Committed: Tue Oct 16 10:52:11 2018 -0400

----------------------------------------------------------------------
 .../src/test/org/apache/solr/common/params/ShardParamsTest.java    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7fa19d26/solr/solrj/src/test/org/apache/solr/common/params/ShardParamsTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/common/params/ShardParamsTest.java b/solr/solrj/src/test/org/apache/solr/common/params/ShardParamsTest.java
index 5694972..b17f217 100644
--- a/solr/solrj/src/test/org/apache/solr/common/params/ShardParamsTest.java
+++ b/solr/solrj/src/test/org/apache/solr/common/params/ShardParamsTest.java
@@ -77,6 +77,6 @@ public class ShardParamsTest extends LuceneTestCase
     // values that aren't "requireZkConnected" or boolean should throw an exception
     params.set(ShardParams.SHARDS_TOLERANT, "bogusValue");
     Exception exception = expectThrows(SolrException.class, () -> ShardParams.getShardsTolerantAsBool(params));
-    assertTrue(exception.getMessage().startsWith("invalid boolean value: "));
+    assertTrue(exception.getMessage(), exception.getMessage().startsWith("invalid boolean value: "));
   }
 }


[21/50] [abbrv] lucene-solr:jira/http2: SOLR-12739: Release the policy session as soon as we're done with the computation.

Posted by da...@apache.org.
SOLR-12739: Release the policy session as soon as we're done with the computation.

This fixes the CollectionsAPIDistributedZkTest.testCoresAreDistributedAcrossNodes test failures. Due to the various tests for exceptional conditions, there were times where the session was not released causing stale data to remain in the policy session cache.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/50d1c7b4
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/50d1c7b4
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/50d1c7b4

Branch: refs/heads/jira/http2
Commit: 50d1c7b4816baefe4b47fd59271001d5d590cd3f
Parents: 940a730
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Wed Oct 10 17:12:50 2018 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Wed Oct 10 17:12:50 2018 +0530

----------------------------------------------------------------------
 .../cloud/api/collections/AddReplicaCmd.java     | 19 +++++++++++--------
 1 file changed, 11 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/50d1c7b4/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
index 6e851db..8b72cdf 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
@@ -141,10 +141,17 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
     }
 
     AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
-    List<CreateReplica> createReplicas = buildReplicaPositions(ocmh.cloudManager, clusterState, collectionName, message, replicaTypesVsCount, sessionWrapper)
-        .stream()
-        .map(replicaPosition -> assignReplicaDetails(ocmh.cloudManager, clusterState, message, replicaPosition))
-        .collect(Collectors.toList());
+    List<CreateReplica> createReplicas;
+    try {
+      createReplicas = buildReplicaPositions(ocmh.cloudManager, clusterState, collectionName, message, replicaTypesVsCount, sessionWrapper)
+          .stream()
+          .map(replicaPosition -> assignReplicaDetails(ocmh.cloudManager, clusterState, message, replicaPosition))
+          .collect(Collectors.toList());
+    } finally {
+      if (sessionWrapper.get() != null) {
+        sessionWrapper.get().release();
+      }
+    }
 
     ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
     ZkStateReader zkStateReader = ocmh.zkStateReader;
@@ -162,10 +169,6 @@ public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
       for (CreateReplica replica : createReplicas) {
         ocmh.waitForCoreNodeName(collectionName, replica.node, replica.coreName);
       }
-
-      if (sessionWrapper.get() != null) {
-        sessionWrapper.get().release();
-      }
       if (onComplete != null) onComplete.run();
     };
 


[19/50] [abbrv] lucene-solr:jira/http2: SOLR-12739: Fix failures in AutoAddReplicasIntegrationTest and its sub-class.

Posted by da...@apache.org.
SOLR-12739: Fix failures in AutoAddReplicasIntegrationTest and its sub-class.

This test too makes assumptions about how replicas are placed. In the legacy assignment strategy, the replica of a given collection are spread equally across all nodes but with the new policy based strategy, all cores across collections are spread out. Therefore the assumptions in this test were wrong. I've changed this test to use the legacy assignment policy because testing the autoAddReplicas feature doesn't have to depend on new replica assignment strategies. This change also fixes a bug in Assign which used "collection" key instead of "cluster" to figure out which strategy to use.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/9f34a7c7
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/9f34a7c7
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/9f34a7c7

Branch: refs/heads/jira/http2
Commit: 9f34a7c776c0977c9e901fa3ca42b54309aebe8f
Parents: a66a7f3
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Wed Oct 10 15:38:52 2018 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Wed Oct 10 15:38:52 2018 +0530

----------------------------------------------------------------------
 .../org/apache/solr/cloud/api/collections/Assign.java     |  2 +-
 .../org/apache/solr/cloud/api/collections/AssignTest.java |  4 ++--
 .../cloud/autoscaling/AutoAddReplicasIntegrationTest.java | 10 +++++++++-
 .../apache/solr/common/params/CollectionAdminParams.java  |  8 +++++++-
 4 files changed, 19 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9f34a7c7/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
index 841ee93..fd09a3f 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
@@ -269,7 +269,7 @@ public class Assign {
     Map<String, Object> clusterProperties = cloudManager.getClusterStateProvider().getClusterProperties();
     if (clusterProperties.containsKey(CollectionAdminParams.DEFAULTS))  {
       Map<String, Object> defaults = (Map<String, Object>) clusterProperties.get(CollectionAdminParams.DEFAULTS);
-      Map<String, Object> collectionDefaults = (Map<String, Object>) defaults.getOrDefault(CollectionAdminParams.COLLECTION, Collections.emptyMap());
+      Map<String, Object> collectionDefaults = (Map<String, Object>) defaults.getOrDefault(CollectionAdminParams.CLUSTER, Collections.emptyMap());
       useLegacyAssignment = (boolean) collectionDefaults.getOrDefault(CollectionAdminParams.USE_LEGACY_REPLICA_ASSIGNMENT, false);
     }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9f34a7c7/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java
index 91e37b2..d5197ca 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java
@@ -169,13 +169,13 @@ public class AssignTest extends SolrTestCaseJ4 {
     ClusterStateProvider clusterStateProvider = mock(ClusterStateProvider.class);
     when(solrCloudManager.getClusterStateProvider()).thenReturn(clusterStateProvider);
     // first we set useLegacyReplicaAssignment=false, so autoscaling should always be used
-    when(clusterStateProvider.getClusterProperties()).thenReturn(Utils.makeMap("defaults", Utils.makeMap("collection", Utils.makeMap("useLegacyReplicaAssignment", false))));
+    when(clusterStateProvider.getClusterProperties()).thenReturn(Utils.makeMap("defaults", Utils.makeMap("cluster", Utils.makeMap("useLegacyReplicaAssignment", false))));
     // verify
     boolean usePolicyFramework = Assign.usePolicyFramework(solrCloudManager);
     assertTrue(usePolicyFramework);
 
     // now we set useLegacyReplicaAssignment=true, so autoscaling can only be used if an explicit policy or preference exists
-    when(clusterStateProvider.getClusterProperties()).thenReturn(Utils.makeMap("defaults", Utils.makeMap("collection", Utils.makeMap("useLegacyReplicaAssignment", true))));
+    when(clusterStateProvider.getClusterProperties()).thenReturn(Utils.makeMap("defaults", Utils.makeMap("cluster", Utils.makeMap("useLegacyReplicaAssignment", true))));
     DistribStateManager distribStateManager = mock(DistribStateManager.class);
     when(solrCloudManager.getDistribStateManager()).thenReturn(distribStateManager);
     when(distribStateManager.getAutoScalingConfig()).thenReturn(new AutoScalingConfig(Collections.emptyMap()));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9f34a7c7/solr/core/src/test/org/apache/solr/cloud/autoscaling/AutoAddReplicasIntegrationTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/AutoAddReplicasIntegrationTest.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/AutoAddReplicasIntegrationTest.java
index b1ac6ee..3c40d8b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/AutoAddReplicasIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/AutoAddReplicasIntegrationTest.java
@@ -23,10 +23,12 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.V2Request;
 import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.common.cloud.ClusterStateUtil;
 import org.apache.solr.common.cloud.DocCollection;
@@ -44,7 +46,7 @@ import org.junit.Test;
 
 import static org.apache.solr.common.util.Utils.makeMap;
 
-@LogLevel("org.apache.solr.cloud.autoscaling=DEBUG;org.apache.solr.client.solrj.cloud.autoscaling=DEBUG")
+@LogLevel("org.apache.solr.cloud.autoscaling=DEBUG;org.apache.solr.client.solrj.cloud.autoscaling=DEBUG;org.apache.solr.cloud=DEBUG;org.apache.solr.cloud.Overseer=DEBUG;org.apache.solr.cloud.overseer=DEBUG;")
 public class AutoAddReplicasIntegrationTest extends SolrCloudTestCase {
   private static final String COLLECTION1 =  "testSimple1";
   private static final String COLLECTION2 =  "testSimple2";
@@ -55,6 +57,12 @@ public class AutoAddReplicasIntegrationTest extends SolrCloudTestCase {
         .addConfig("conf", configset("cloud-minimal"))
         .withSolrXml(TEST_PATH().resolve("solr.xml"))
         .configure();
+
+    new V2Request.Builder("/cluster")
+        .withMethod(SolrRequest.METHOD.POST)
+        .withPayload("{set-obj-property:{defaults : {cluster: {useLegacyReplicaAssignment:true}}}}}")
+        .build()
+        .process(cluster.getSolrClient());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9f34a7c7/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java b/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java
index c34f930..cf0faa8 100644
--- a/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java
+++ b/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java
@@ -94,11 +94,17 @@ public interface CollectionAdminParams {
   /**
    * Used by cluster properties API as a wrapper key to provide defaults for collection, cluster etc.
    *
-   * e.g. {defaults:{collection:{useLegacyReplicaAssignment:false}}}
+   * e.g. {defaults:{collection:{replicationFactor:2}}}
    */
   String DEFAULTS = "defaults";
 
   /**
+   * Cluster wide defaults can be nested under this key e.g.
+   * {defaults: {cluster:{useLegacyReplicaAssignment:false}}}
+   */
+  String CLUSTER = "cluster";
+
+  /**
    * This cluster property decides whether Solr should use the legacy round-robin replica placement strategy
    * or the autoscaling policy based strategy to assign replicas to nodes. The default is false.
    */


[15/50] [abbrv] lucene-solr:jira/http2: SOLR-11812: fix precommit

Posted by da...@apache.org.
SOLR-11812: fix precommit


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/41e3d073
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/41e3d073
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/41e3d073

Branch: refs/heads/jira/http2
Commit: 41e3d0736902ed26f9b597daba9eaa8f885fc52e
Parents: 50478ea
Author: Steve Rowe <sa...@apache.org>
Authored: Tue Oct 9 17:34:16 2018 -0400
Committer: Steve Rowe <sa...@apache.org>
Committed: Tue Oct 9 17:34:16 2018 -0400

----------------------------------------------------------------------
 solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java | 2 --
 solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java | 4 ----
 2 files changed, 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/41e3d073/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java b/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
index 997845f..5d10824 100644
--- a/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
@@ -21,7 +21,6 @@ import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.EnumSet;
-import java.util.List;
 import java.util.Set;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
@@ -46,7 +45,6 @@ import org.apache.solr.common.util.TimeSource;
 import org.apache.solr.common.util.Utils;
 import org.apache.solr.core.ZkContainer;
 import org.apache.solr.util.TimeOut;
-import org.apache.zookeeper.KeeperException;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.slf4j.Logger;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/41e3d073/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
index 70d23aa..7f77d57 100644
--- a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
@@ -22,7 +22,6 @@ import static org.apache.solr.common.cloud.Replica.State.RECOVERING;
 import java.io.File;
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
-import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -31,7 +30,6 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
-import java.util.Properties;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import org.apache.lucene.util.LuceneTestCase;
@@ -51,7 +49,6 @@ import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkCoreNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.util.NamedList;
@@ -59,7 +56,6 @@ import org.apache.solr.common.util.TimeSource;
 import org.apache.solr.core.CoreContainer;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.update.UpdateLog;
-import org.apache.solr.util.MockCoreContainer.MockCoreDescriptor;
 import org.apache.solr.util.RTimer;
 import org.apache.solr.util.TimeOut;
 import org.apache.zookeeper.KeeperException;


[18/50] [abbrv] lucene-solr:jira/http2: SOLR-12739: Fix CloudSolrClientTest.testNonRetryableRequests failures.

Posted by da...@apache.org.
SOLR-12739: Fix CloudSolrClientTest.testNonRetryableRequests failures.

The testNonRetryableRequests test makes an assumption that a collection's replicas are equally distributed among all nodes but with the policy engine it is not true. Instead the policy engine spreads out the cores belonging to all collections equally among all nodes. This is fixed by only creating the collection needed by tests in this class just-in-time.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a66a7f31
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a66a7f31
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a66a7f31

Branch: refs/heads/jira/http2
Commit: a66a7f31970fad8b55888b4b538d0dbc2ae3bcc5
Parents: 3629e76
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Wed Oct 10 13:12:26 2018 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Wed Oct 10 13:12:26 2018 +0530

----------------------------------------------------------------------
 .../client/solrj/impl/CloudSolrClientTest.java  | 35 ++++++++++++++------
 1 file changed, 24 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a66a7f31/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
index 8570e8e..0e4c6c2 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
@@ -102,14 +102,16 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
         .addConfig("conf", getFile("solrj").toPath().resolve("solr").resolve("configsets").resolve("streaming").resolve("conf"))
         .configure();
 
-    CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1).process(cluster.getSolrClient());
-    AbstractDistribZkTestBase.waitForRecoveriesToFinish(COLLECTION, cluster.getSolrClient().getZkStateReader(),
-        false, true, TIMEOUT);
-    
     final List<String> solrUrls = new ArrayList<>();
     solrUrls.add(cluster.getJettySolrRunner(0).getBaseUrl().toString());
     httpBasedCloudSolrClient = new CloudSolrClient.Builder(solrUrls).build();
   }
+  
+  @Before
+  public void setUp() throws Exception  {
+    super.setUp();
+    cluster.deleteAllCollections();
+  }
 
   @AfterClass
   public static void afterClass() {
@@ -123,13 +125,6 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
   }
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-  @Before
-  public void cleanIndex() throws Exception {
-    new UpdateRequest()
-        .deleteByQuery("*:*")
-        .commit(cluster.getSolrClient(), COLLECTION);
-  }
-
   /**
    * Randomly return the cluster's ZK based CSC, or HttpClusterProvider based CSC.
    */
@@ -139,6 +134,9 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
 
   @Test
   public void testParallelUpdateQTime() throws Exception {
+    CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1).process(cluster.getSolrClient());
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish(COLLECTION, cluster.getSolrClient().getZkStateReader(),
+        false, true, TIMEOUT);
     UpdateRequest req = new UpdateRequest();
     for (int i=0; i<10; i++)  {
       SolrInputDocument doc = new SolrInputDocument();
@@ -177,6 +175,9 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
 
   @Test
   public void testAliasHandling() throws Exception {
+    CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1).process(cluster.getSolrClient());
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish(COLLECTION, cluster.getSolrClient().getZkStateReader(),
+        false, true, TIMEOUT);
 
     CollectionAdminRequest.createCollection(COLLECTION2, "conf", 2, 1).process(cluster.getSolrClient());
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(COLLECTION2, cluster.getSolrClient().getZkStateReader(),
@@ -224,6 +225,9 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
 
   @Test
   public void testRouting() throws Exception {
+    CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1).process(cluster.getSolrClient());
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish(COLLECTION, cluster.getSolrClient().getZkStateReader(),
+        false, true, TIMEOUT);
     
     AbstractUpdateRequest request = new UpdateRequest()
         .add(id, "0", "a_t", "hello1")
@@ -603,6 +607,9 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
 
   @Test
   public void stateVersionParamTest() throws Exception {
+    CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1).process(cluster.getSolrClient());
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish(COLLECTION, cluster.getSolrClient().getZkStateReader(),
+        false, true, TIMEOUT);
 
     DocCollection coll = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION);
     Replica r = coll.getSlices().iterator().next().getReplicas().iterator().next();
@@ -705,6 +712,9 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
 
   @Test
   public void testVersionsAreReturned() throws Exception {
+    CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1).process(cluster.getSolrClient());
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish(COLLECTION, cluster.getSolrClient().getZkStateReader(),
+        false, true, TIMEOUT);
     
     // assert that "adds" are returned
     UpdateRequest updateRequest = new UpdateRequest()
@@ -751,6 +761,9 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
   
   @Test
   public void testInitializationWithSolrUrls() throws Exception {
+    CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1).process(cluster.getSolrClient());
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish(COLLECTION, cluster.getSolrClient().getZkStateReader(),
+        false, true, TIMEOUT);
     CloudSolrClient client = httpBasedCloudSolrClient;
     SolrInputDocument doc = new SolrInputDocument("id", "1", "title_s", "my doc");
     client.add(COLLECTION, doc);


[03/50] [abbrv] lucene-solr:jira/http2: LUCENE-8496: Selective indexing - modify BKDReader/BKDWriter to allow users to select a fewer number of dimensions to be used for creating the index than the total number of dimensions used for field encoding. i.e.

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java b/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java
index 4bd806e..aa12de7 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java
@@ -1048,7 +1048,7 @@ public class AssertingLeafReader extends FilterLeafReader {
 
     @Override
     public void intersect(IntersectVisitor visitor) throws IOException {
-      in.intersect(new AssertingIntersectVisitor(in.getNumDimensions(), in.getBytesPerDimension(), visitor));
+      in.intersect(new AssertingIntersectVisitor(in.getNumDataDimensions(), in.getNumIndexDimensions(), in.getBytesPerDimension(), visitor));
     }
 
     @Override
@@ -1069,8 +1069,13 @@ public class AssertingLeafReader extends FilterLeafReader {
     }
 
     @Override
-    public int getNumDimensions() throws IOException {
-      return in.getNumDimensions();
+    public int getNumDataDimensions() throws IOException {
+      return in.getNumDataDimensions();
+    }
+
+    @Override
+    public int getNumIndexDimensions() throws IOException {
+      return in.getNumIndexDimensions();
     }
 
     @Override
@@ -1093,7 +1098,8 @@ public class AssertingLeafReader extends FilterLeafReader {
   /** Validates in the 1D case that all points are visited in order, and point values are in bounds of the last cell checked */
   static class AssertingIntersectVisitor implements IntersectVisitor {
     final IntersectVisitor in;
-    final int numDims;
+    final int numDataDims;
+    final int numIndexDims;
     final int bytesPerDim;
     final byte[] lastDocValue;
     final byte[] lastMinPackedValue;
@@ -1102,13 +1108,14 @@ public class AssertingLeafReader extends FilterLeafReader {
     private int lastDocID = -1;
     private int docBudget;
 
-    AssertingIntersectVisitor(int numDims, int bytesPerDim, IntersectVisitor in) {
+    AssertingIntersectVisitor(int numDataDims, int numIndexDims, int bytesPerDim, IntersectVisitor in) {
       this.in = in;
-      this.numDims = numDims;
+      this.numDataDims = numDataDims;
+      this.numIndexDims = numIndexDims;
       this.bytesPerDim = bytesPerDim;
-      lastMaxPackedValue = new byte[numDims*bytesPerDim];
-      lastMinPackedValue = new byte[numDims*bytesPerDim];
-      if (numDims == 1) {
+      lastMaxPackedValue = new byte[numDataDims*bytesPerDim];
+      lastMinPackedValue = new byte[numDataDims*bytesPerDim];
+      if (numDataDims == 1) {
         lastDocValue = new byte[bytesPerDim];
       } else {
         lastDocValue = null;
@@ -1132,14 +1139,14 @@ public class AssertingLeafReader extends FilterLeafReader {
       assert lastCompareResult == PointValues.Relation.CELL_CROSSES_QUERY;
 
       // This doc's packed value should be contained in the last cell passed to compare:
-      for(int dim=0;dim<numDims;dim++) {
-        assert FutureArrays.compareUnsigned(lastMinPackedValue, dim * bytesPerDim, dim * bytesPerDim + bytesPerDim, packedValue, dim * bytesPerDim, dim * bytesPerDim + bytesPerDim) <= 0: "dim=" + dim + " of " +  numDims + " value=" + new BytesRef(packedValue);
-        assert FutureArrays.compareUnsigned(lastMaxPackedValue, dim * bytesPerDim, dim * bytesPerDim + bytesPerDim, packedValue, dim * bytesPerDim, dim * bytesPerDim + bytesPerDim) >= 0: "dim=" + dim + " of " +  numDims + " value=" + new BytesRef(packedValue);
+      for(int dim=0;dim<numIndexDims;dim++) {
+        assert FutureArrays.compareUnsigned(lastMinPackedValue, dim * bytesPerDim, dim * bytesPerDim + bytesPerDim, packedValue, dim * bytesPerDim, dim * bytesPerDim + bytesPerDim) <= 0: "dim=" + dim + " of " +  numDataDims + " value=" + new BytesRef(packedValue);
+        assert FutureArrays.compareUnsigned(lastMaxPackedValue, dim * bytesPerDim, dim * bytesPerDim + bytesPerDim, packedValue, dim * bytesPerDim, dim * bytesPerDim + bytesPerDim) >= 0: "dim=" + dim + " of " +  numDataDims + " value=" + new BytesRef(packedValue);
       }
 
       // TODO: we should assert that this "matches" whatever relation the last call to compare had returned
-      assert packedValue.length == numDims * bytesPerDim;
-      if (numDims == 1) {
+      assert packedValue.length == numDataDims * bytesPerDim;
+      if (numDataDims == 1) {
         int cmp = FutureArrays.compareUnsigned(lastDocValue, 0, bytesPerDim, packedValue, 0, bytesPerDim);
         if (cmp < 0) {
           // ok
@@ -1163,11 +1170,11 @@ public class AssertingLeafReader extends FilterLeafReader {
 
     @Override
     public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
-      for(int dim=0;dim<numDims;dim++) {
+      for(int dim=0;dim<numIndexDims;dim++) {
         assert FutureArrays.compareUnsigned(minPackedValue, dim * bytesPerDim, dim * bytesPerDim + bytesPerDim, maxPackedValue, dim * bytesPerDim, dim * bytesPerDim + bytesPerDim) <= 0;
       }
-      System.arraycopy(maxPackedValue, 0, lastMaxPackedValue, 0, numDims*bytesPerDim);
-      System.arraycopy(minPackedValue, 0, lastMinPackedValue, 0, numDims*bytesPerDim);
+      System.arraycopy(maxPackedValue, 0, lastMaxPackedValue, 0, numIndexDims*bytesPerDim);
+      System.arraycopy(minPackedValue, 0, lastMinPackedValue, 0, numIndexDims*bytesPerDim);
       lastCompareResult = in.compare(minPackedValue, maxPackedValue);
       return lastCompareResult;
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
index 03f01e7..05ed0ff 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
@@ -325,7 +325,7 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase {
     FieldInfo proto = oneDocReader.getFieldInfos().fieldInfo("field");
     FieldInfo field = new FieldInfo(proto.name, proto.number, proto.hasVectors(), proto.omitsNorms(), proto.hasPayloads(), 
                                     proto.getIndexOptions(), proto.getDocValuesType(), proto.getDocValuesGen(), new HashMap<>(),
-                                    proto.getPointDimensionCount(), proto.getPointNumBytes(), proto.isSoftDeletesField());
+                                    proto.getPointDataDimensionCount(), proto.getPointIndexDimensionCount(), proto.getPointNumBytes(), proto.isSoftDeletesField());
 
     FieldInfos fieldInfos = new FieldInfos(new FieldInfo[] { field } );
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/test-framework/src/java/org/apache/lucene/index/BasePointsFormatTestCase.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BasePointsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BasePointsFormatTestCase.java
index b2e3e4e..71a9f26 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BasePointsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BasePointsFormatTestCase.java
@@ -28,6 +28,7 @@ import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.document.BinaryPoint;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.IntPoint;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.StringField;
@@ -520,13 +521,14 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa
   private void doTestRandomBinary(int count) throws Exception {
     int numDocs = TestUtil.nextInt(random(), count, count*2);
     int numBytesPerDim = TestUtil.nextInt(random(), 2, PointValues.MAX_NUM_BYTES);
-    int numDims = TestUtil.nextInt(random(), 1, PointValues.MAX_DIMENSIONS);
+    int numDataDims = TestUtil.nextInt(random(), 1, PointValues.MAX_DIMENSIONS);
+    int numIndexDims = TestUtil.nextInt(random(), 1, numDataDims);
 
     byte[][][] docValues = new byte[numDocs][][];
 
     for(int docID=0;docID<numDocs;docID++) {
-      byte[][] values = new byte[numDims][];
-      for(int dim=0;dim<numDims;dim++) {
+      byte[][] values = new byte[numDataDims][];
+      for(int dim=0;dim<numDataDims;dim++) {
         values[dim] = new byte[numBytesPerDim];
         // TODO: sometimes test on a "small" volume too, so we test the high density cases, higher chance of boundary, etc. cases:
         random().nextBytes(values[dim]);
@@ -534,17 +536,22 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa
       docValues[docID] = values;
     }
 
-    verify(docValues, null, numDims, numBytesPerDim);
+    verify(docValues, null, numDataDims, numIndexDims, numBytesPerDim);
   }
 
-  /** docIDs can be null, for the single valued case, else it maps value to docID, but all values for one doc must be adjacent */
   private void verify(byte[][][] docValues, int[] docIDs, int numDims, int numBytesPerDim) throws Exception {
+    verify(docValues, docIDs, numDims, numDims, numBytesPerDim);
+  }
+
+  /** docIDs can be null, for the single valued case, else it maps value to docID, but all values for one doc must be adjacent */
+  private void verify(byte[][][] docValues, int[] docIDs, int numDataDims, int numIndexDims, int numBytesPerDim) throws Exception {
     try (Directory dir = getDirectory(docValues.length)) {
       while (true) {
         try {
-          verify(dir, docValues, docIDs, numDims, numBytesPerDim, false);
+          verify(dir, docValues, docIDs, numDataDims, numIndexDims, numBytesPerDim, false);
           return;
         } catch (IllegalArgumentException iae) {
+          iae.printStackTrace();
           // This just means we got a too-small maxMB for the maxPointsInLeafNode; just retry
           assertTrue(iae.getMessage().contains("either increase maxMBSortInHeap or decrease maxPointsInLeafNode"));
         }
@@ -553,9 +560,22 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa
   }
 
   private void verify(Directory dir, byte[][][] docValues, int[] ids, int numDims, int numBytesPerDim, boolean expectExceptions) throws Exception {
+    verify(dir, docValues, ids, numDims, numDims, numBytesPerDim, expectExceptions);
+  }
+
+  private byte[] flattenBinaryPoint(byte[][] value, int numDataDims, int numBytesPerDim) {
+    byte[] result = new byte[value.length * numBytesPerDim];
+    for (int d = 0; d < numDataDims; ++d) {
+      System.arraycopy(value[d], 0, result, d * numBytesPerDim, numBytesPerDim);
+    }
+    return result;
+  }
+
+  /** test selective indexing */
+  private void verify(Directory dir, byte[][][] docValues, int[] ids, int numDataDims, int numIndexDims, int numBytesPerDim, boolean expectExceptions) throws Exception {
     int numValues = docValues.length;
     if (VERBOSE) {
-      System.out.println("TEST: numValues=" + numValues + " numDims=" + numDims + " numBytesPerDim=" + numBytesPerDim);
+      System.out.println("TEST: numValues=" + numValues + " numDataDims=" + numDataDims + " numIndexDims=" + numIndexDims + " numBytesPerDim=" + numBytesPerDim);
     }
 
     // RandomIndexWriter is too slow:
@@ -578,10 +598,10 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa
     DirectoryReader r = null;
 
     // Compute actual min/max values:
-    byte[][] expectedMinValues = new byte[numDims][];
-    byte[][] expectedMaxValues = new byte[numDims][];
+    byte[][] expectedMinValues = new byte[numDataDims][];
+    byte[][] expectedMaxValues = new byte[numDataDims][];
     for(int ord=0;ord<docValues.length;ord++) {
-      for(int dim=0;dim<numDims;dim++) {
+      for(int dim=0;dim<numDataDims;dim++) {
         if (ord == 0) {
           expectedMinValues[dim] = new byte[numBytesPerDim];
           System.arraycopy(docValues[ord][dim], 0, expectedMinValues[dim], 0, numBytesPerDim);
@@ -629,6 +649,10 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa
 
     try {
 
+      FieldType fieldType = new FieldType();
+      fieldType.setDimensions(numDataDims, numIndexDims, numBytesPerDim);
+      fieldType.freeze();
+
       Document doc = null;
       int lastID = -1;
       for(int ord=0;ord<numValues;ord++) {
@@ -649,7 +673,10 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa
           doc = new Document();
           doc.add(new NumericDocValuesField("id", id));
         }
-        doc.add(new BinaryPoint("field", docValues[ord]));
+        // pack the binary point
+        byte[] val = flattenBinaryPoint(docValues[ord], numDataDims, numBytesPerDim);
+
+        doc.add(new BinaryPoint("field", val, fieldType));
         lastID = id;
 
         if (random().nextInt(30) == 17) {
@@ -667,7 +694,8 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa
         if (random().nextInt(30) == 17) {
           // randomly index some documents with this field, but we will delete them:
           Document xdoc = new Document();
-          xdoc.add(new BinaryPoint("field", docValues[ord]));
+          val = flattenBinaryPoint(docValues[ord], numDataDims, numBytesPerDim);
+          xdoc.add(new BinaryPoint("field", val, fieldType));
           xdoc.add(new StringField("nukeme", "yes", Field.Store.NO));
           if (useRealWriter) {
             w.w.addDocument(xdoc);
@@ -689,7 +717,7 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa
 
         if (VERBOSE) {
           System.out.println("  ord=" + ord + " id=" + id);
-          for(int dim=0;dim<numDims;dim++) {
+          for(int dim=0;dim<numDataDims;dim++) {
             System.out.println("    dim=" + dim + " value=" + new BytesRef(docValues[ord][dim]));
           }
         }
@@ -731,10 +759,10 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa
       Bits liveDocs = MultiFields.getLiveDocs(r);
 
       // Verify min/max values are correct:
-      byte[] minValues = new byte[numDims*numBytesPerDim];
+      byte[] minValues = new byte[numIndexDims*numBytesPerDim];
       Arrays.fill(minValues, (byte) 0xff);
 
-      byte[] maxValues = new byte[numDims*numBytesPerDim];
+      byte[] maxValues = new byte[numIndexDims*numBytesPerDim];
 
       for(LeafReaderContext ctx : r.leaves()) {
         PointValues dimValues = ctx.reader().getPointValues("field");
@@ -744,7 +772,7 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa
 
         byte[] leafMinValues = dimValues.getMinPackedValue();
         byte[] leafMaxValues = dimValues.getMaxPackedValue();
-        for(int dim=0;dim<numDims;dim++) {
+        for(int dim=0;dim<numIndexDims;dim++) {
           if (FutureArrays.compareUnsigned(leafMinValues, dim * numBytesPerDim, dim * numBytesPerDim + numBytesPerDim, minValues, dim * numBytesPerDim, dim * numBytesPerDim + numBytesPerDim) < 0) {
             System.arraycopy(leafMinValues, dim*numBytesPerDim, minValues, dim*numBytesPerDim, numBytesPerDim);
           }
@@ -755,7 +783,7 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa
       }
 
       byte[] scratch = new byte[numBytesPerDim];
-      for(int dim=0;dim<numDims;dim++) {
+      for(int dim=0;dim<numIndexDims;dim++) {
         System.arraycopy(minValues, dim*numBytesPerDim, scratch, 0, numBytesPerDim);
         //System.out.println("dim=" + dim + " expectedMin=" + new BytesRef(expectedMinValues[dim]) + " min=" + new BytesRef(scratch));
         assertTrue(Arrays.equals(expectedMinValues[dim], scratch));
@@ -771,9 +799,9 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa
         }
 
         // Random N dims rect query:
-        byte[][] queryMin = new byte[numDims][];
-        byte[][] queryMax = new byte[numDims][];    
-        for(int dim=0;dim<numDims;dim++) {    
+        byte[][] queryMin = new byte[numIndexDims][];
+        byte[][] queryMax = new byte[numIndexDims][];
+        for(int dim=0;dim<numIndexDims;dim++) {
           queryMin[dim] = new byte[numBytesPerDim];
           random().nextBytes(queryMin[dim]);
           queryMax[dim] = new byte[numBytesPerDim];
@@ -786,7 +814,7 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa
         }
 
         if (VERBOSE) {
-          for(int dim=0;dim<numDims;dim++) {
+          for(int dim=0;dim<numIndexDims;dim++) {
             System.out.println("  dim=" + dim + "\n    queryMin=" + new BytesRef(queryMin[dim]) + "\n    queryMax=" + new BytesRef(queryMax[dim]));
           }
         }
@@ -816,7 +844,7 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa
                   return;
                 }
 
-                for(int dim=0;dim<numDims;dim++) {
+                for(int dim=0;dim<numIndexDims;dim++) {
                   //System.out.println("  dim=" + dim + " value=" + new BytesRef(packedValue, dim*numBytesPerDim, numBytesPerDim));
                   if (FutureArrays.compareUnsigned(packedValue, dim * numBytesPerDim, dim * numBytesPerDim + numBytesPerDim, queryMin[dim], 0, numBytesPerDim) < 0 ||
                       FutureArrays.compareUnsigned(packedValue, dim * numBytesPerDim, dim * numBytesPerDim + numBytesPerDim, queryMax[dim], 0, numBytesPerDim) > 0) {
@@ -833,7 +861,7 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa
               public Relation compare(byte[] minPacked, byte[] maxPacked) {
                 boolean crosses = false;
                 //System.out.println("compare");
-                for(int dim=0;dim<numDims;dim++) {
+                for(int dim=0;dim<numIndexDims;dim++) {
                   if (FutureArrays.compareUnsigned(maxPacked, dim * numBytesPerDim, dim * numBytesPerDim + numBytesPerDim, queryMin[dim], 0, numBytesPerDim) < 0 ||
                       FutureArrays.compareUnsigned(minPacked, dim * numBytesPerDim, dim * numBytesPerDim + numBytesPerDim, queryMax[dim], 0, numBytesPerDim) > 0) {
                     //System.out.println("  query_outside_cell");
@@ -858,7 +886,7 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa
         BitSet expected = new BitSet();
         for(int ord=0;ord<numValues;ord++) {
           boolean matches = true;
-          for(int dim=0;dim<numDims;dim++) {
+          for(int dim=0;dim<numIndexDims;dim++) {
             byte[] x = docValues[ord][dim];
             if (FutureArrays.compareUnsigned(x, 0, numBytesPerDim, queryMin[dim], 0, numBytesPerDim) < 0 ||
                 FutureArrays.compareUnsigned(x, 0, numBytesPerDim, queryMax[dim], 0, numBytesPerDim) > 0) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/test-framework/src/java/org/apache/lucene/index/MismatchedLeafReader.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/MismatchedLeafReader.java b/lucene/test-framework/src/java/org/apache/lucene/index/MismatchedLeafReader.java
index 2c74677..691f19e 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/MismatchedLeafReader.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/MismatchedLeafReader.java
@@ -76,7 +76,8 @@ public class MismatchedLeafReader extends FilterLeafReader {
                                         oldInfo.getDocValuesType(),  // docValuesType
                                         oldInfo.getDocValuesGen(),   // dvGen
                                         oldInfo.attributes(),        // attributes
-                                        oldInfo.getPointDimensionCount(),      // dimension count
+                                        oldInfo.getPointDataDimensionCount(),      // data dimension count
+                                        oldInfo.getPointIndexDimensionCount(),      // index dimension count
                                         oldInfo.getPointNumBytes(),  // dimension numBytes
                                         oldInfo.isSoftDeletesField()); // used as soft-deletes field
       shuffled.set(i, newInfo);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java
index 3588a8e..b5a728b 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java
@@ -111,7 +111,8 @@ public class RandomCodec extends AssertingCodec {
             try (BKDWriter writer = new RandomlySplittingBKDWriter(writeState.segmentInfo.maxDoc(),
                                                                    writeState.directory,
                                                                    writeState.segmentInfo.name,
-                                                                   fieldInfo.getPointDimensionCount(),
+                                                                   fieldInfo.getPointDataDimensionCount(),
+                                                                   fieldInfo.getPointIndexDimensionCount(),
                                                                    fieldInfo.getPointNumBytes(),
                                                                    maxPointsInLeafNode,
                                                                    maxMBSortInHeap,
@@ -259,10 +260,10 @@ public class RandomCodec extends AssertingCodec {
 
     final Random random;
 
-    public RandomlySplittingBKDWriter(int maxDoc, Directory tempDir, String tempFileNamePrefix, int numDims,
+    public RandomlySplittingBKDWriter(int maxDoc, Directory tempDir, String tempFileNamePrefix, int numDataDims, int numIndexDims,
                                       int bytesPerDim, int maxPointsInLeafNode, double maxMBSortInHeap,
                                       long totalPointCount, boolean singleValuePerDoc, int randomSeed) throws IOException {
-      super(maxDoc, tempDir, tempFileNamePrefix, numDims, bytesPerDim, maxPointsInLeafNode, maxMBSortInHeap, totalPointCount,
+      super(maxDoc, tempDir, tempFileNamePrefix, numDataDims, numIndexDims, bytesPerDim, maxPointsInLeafNode, maxMBSortInHeap, totalPointCount,
             getRandomSingleValuePerDoc(singleValuePerDoc, randomSeed),
             getRandomLongOrds(totalPointCount, singleValuePerDoc, randomSeed),
             getRandomOfflineSorterBufferMB(randomSeed),
@@ -291,7 +292,7 @@ public class RandomCodec extends AssertingCodec {
     @Override
     protected int split(byte[] minPackedValue, byte[] maxPackedValue, int[] parentDims) {
       // BKD normally defaults by the widest dimension, to try to make as squarish cells as possible, but we just pick a random one ;)
-      return random.nextInt(numDims);
+      return random.nextInt(numIndexDims);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java
index 9f2d9b7..6da2761 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java
@@ -130,7 +130,7 @@ public class RandomPostingsTester {
       fieldInfoArray[fieldUpto] = new FieldInfo(field, fieldUpto, false, false, true,
                                                 IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS,
                                                 DocValuesType.NONE, -1, new HashMap<>(),
-                                                0, 0, false);
+                                                0, 0, 0, false);
       fieldUpto++;
 
       SortedMap<BytesRef,SeedAndOrd> postings = new TreeMap<>();
@@ -651,7 +651,7 @@ public class RandomPostingsTester {
                                                    DocValuesType.NONE,
                                                    -1,
                                                    new HashMap<>(),
-                                                   0, 0, false);
+                                                   0, 0, 0, false);
     }
 
     FieldInfos newFieldInfos = new FieldInfos(newFieldInfoArray);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
index 68e7190..95150fb 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
@@ -2627,10 +2627,12 @@ public abstract class LuceneTestCase extends Assert {
     FieldInfos fieldInfos1 = MultiFields.getMergedFieldInfos(leftReader);
     FieldInfos fieldInfos2 = MultiFields.getMergedFieldInfos(rightReader);
     for(FieldInfo fieldInfo1 : fieldInfos1) {
-      if (fieldInfo1.getPointDimensionCount() != 0) {
+      if (fieldInfo1.getPointDataDimensionCount() != 0) {
         FieldInfo fieldInfo2 = fieldInfos2.fieldInfo(fieldInfo1.name);
-        // same dimension count?
-        assertEquals(info, fieldInfo2.getPointDimensionCount(), fieldInfo2.getPointDimensionCount());
+        // same data dimension count?
+        assertEquals(info, fieldInfo2.getPointDataDimensionCount(), fieldInfo2.getPointDataDimensionCount());
+        // same index dimension count?
+        assertEquals(info, fieldInfo2.getPointIndexDimensionCount(), fieldInfo2.getPointIndexDimensionCount());
         // same bytes per dimension?
         assertEquals(info, fieldInfo2.getPointNumBytes(), fieldInfo2.getPointNumBytes());
 
@@ -2642,10 +2644,12 @@ public abstract class LuceneTestCase extends Assert {
 
     // make sure FieldInfos2 doesn't have any point fields that FieldInfo1 didn't have
     for(FieldInfo fieldInfo2 : fieldInfos2) {
-      if (fieldInfo2.getPointDimensionCount() != 0) {
+      if (fieldInfo2.getPointDataDimensionCount() != 0) {
         FieldInfo fieldInfo1 = fieldInfos1.fieldInfo(fieldInfo2.name);
-        // same dimension count?
-        assertEquals(info, fieldInfo2.getPointDimensionCount(), fieldInfo1.getPointDimensionCount());
+        // same data dimension count?
+        assertEquals(info, fieldInfo2.getPointDataDimensionCount(), fieldInfo1.getPointDataDimensionCount());
+        // same index dimension count?
+        assertEquals(info, fieldInfo2.getPointIndexDimensionCount(), fieldInfo1.getPointIndexDimensionCount());
         // same bytes per dimension?
         assertEquals(info, fieldInfo2.getPointNumBytes(), fieldInfo1.getPointNumBytes());
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
index 65329b5..1aa0300 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
@@ -1082,7 +1082,7 @@ public final class TestUtil {
       final Field field1 = (Field) f;
       final Field field2;
       final DocValuesType dvType = field1.fieldType().docValuesType();
-      final int dimCount = field1.fieldType().pointDimensionCount();
+      final int dimCount = field1.fieldType().pointDataDimensionCount();
       if (dvType != DocValuesType.NONE) {
         switch(dvType) {
           case NUMERIC:

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
index e79161d..2cbe703 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
@@ -795,7 +795,8 @@ public class ExpandComponent extends SearchComponent implements PluginInfoInitia
               DocValuesType.NONE,
               fieldInfo.getDocValuesGen(),
               fieldInfo.attributes(),
-              fieldInfo.getPointDimensionCount(),
+              fieldInfo.getPointDataDimensionCount(),
+              fieldInfo.getPointIndexDimensionCount(),
               fieldInfo.getPointNumBytes(),
               fieldInfo.isSoftDeletesField());
           newInfos.add(f);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/solr/core/src/java/org/apache/solr/legacy/BBoxStrategy.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/legacy/BBoxStrategy.java b/solr/core/src/java/org/apache/solr/legacy/BBoxStrategy.java
index 84f90ef..b2f239e 100644
--- a/solr/core/src/java/org/apache/solr/legacy/BBoxStrategy.java
+++ b/solr/core/src/java/org/apache/solr/legacy/BBoxStrategy.java
@@ -173,7 +173,7 @@ public class BBoxStrategy extends SpatialStrategy {
     if ((this.hasDocVals = fieldType.docValuesType() != DocValuesType.NONE)) {
       numQuads++;
     }
-    if ((this.hasPointVals = fieldType.pointDimensionCount() > 0)) {
+    if ((this.hasPointVals = fieldType.pointDataDimensionCount() > 0)) {
       numQuads++;
     }
     if (fieldType.indexOptions() != IndexOptions.NONE && fieldType instanceof LegacyFieldType && ((LegacyFieldType)fieldType).numericType() != null) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/solr/core/src/java/org/apache/solr/legacy/PointVectorStrategy.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/legacy/PointVectorStrategy.java b/solr/core/src/java/org/apache/solr/legacy/PointVectorStrategy.java
index da48fcb..4df76ca 100644
--- a/solr/core/src/java/org/apache/solr/legacy/PointVectorStrategy.java
+++ b/solr/core/src/java/org/apache/solr/legacy/PointVectorStrategy.java
@@ -152,7 +152,7 @@ public class PointVectorStrategy extends SpatialStrategy {
     if ((this.hasDocVals = fieldType.docValuesType() != DocValuesType.NONE)) {
       numPairs++;
     }
-    if ((this.hasPointVals = fieldType.pointDimensionCount() > 0)) {
+    if ((this.hasPointVals = fieldType.pointDataDimensionCount() > 0)) {
       numPairs++;
     }
     if (fieldType.indexOptions() != IndexOptions.NONE && fieldType instanceof LegacyFieldType && ((LegacyFieldType)fieldType).numericType() != null) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/solr/core/src/java/org/apache/solr/schema/SchemaField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/SchemaField.java b/solr/core/src/java/org/apache/solr/schema/SchemaField.java
index 256cbae..e28629e 100644
--- a/solr/core/src/java/org/apache/solr/schema/SchemaField.java
+++ b/solr/core/src/java/org/apache/solr/schema/SchemaField.java
@@ -415,7 +415,12 @@ public final class SchemaField extends FieldProperties implements IndexableField
   }
 
   @Override
-  public int pointDimensionCount() {
+  public int pointDataDimensionCount() {
+    return 0;
+  }
+
+  @Override
+  public int pointIndexDimensionCount() {
     return 0;
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
index 8875afc..57d3d43 100644
--- a/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
+++ b/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
@@ -431,7 +431,7 @@ public class CollapsingQParserPlugin extends QParserPlugin {
                                       DocValuesType.NONE,
                                       fieldInfo.getDocValuesGen(),
                                       fieldInfo.attributes(),
-                                      0, 0, fieldInfo.isSoftDeletesField());
+                                      0, 0, 0, fieldInfo.isSoftDeletesField());
           newInfos.add(f);
 
         } else {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/solr/core/src/java/org/apache/solr/search/Insanity.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/Insanity.java b/solr/core/src/java/org/apache/solr/search/Insanity.java
index 67d1320..7da7a5c 100644
--- a/solr/core/src/java/org/apache/solr/search/Insanity.java
+++ b/solr/core/src/java/org/apache/solr/search/Insanity.java
@@ -67,7 +67,7 @@ public class Insanity {
         if (fi.name.equals(insaneField)) {
           filteredInfos.add(new FieldInfo(fi.name, fi.number, fi.hasVectors(), fi.omitsNorms(),
                                           fi.hasPayloads(), fi.getIndexOptions(), DocValuesType.NONE, -1, Collections.emptyMap(),
-                                          fi.getPointDimensionCount(), fi.getPointNumBytes(), fi.isSoftDeletesField()));
+                                          fi.getPointDataDimensionCount(), fi.getPointIndexDimensionCount(), fi.getPointNumBytes(), fi.isSoftDeletesField()));
         } else {
           filteredInfos.add(fi);
         }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/solr/core/src/java/org/apache/solr/uninverting/FieldCacheImpl.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/uninverting/FieldCacheImpl.java b/solr/core/src/java/org/apache/solr/uninverting/FieldCacheImpl.java
index e6e8fda..42c34ab 100644
--- a/solr/core/src/java/org/apache/solr/uninverting/FieldCacheImpl.java
+++ b/solr/core/src/java/org/apache/solr/uninverting/FieldCacheImpl.java
@@ -592,11 +592,11 @@ public class FieldCacheImpl implements FieldCache {
       if (parser instanceof PointParser) {
         // points case
         // no points in this segment
-        if (info.getPointDimensionCount() == 0) {
+        if (info.getPointDataDimensionCount() == 0) {
           return DocValues.emptyNumeric();
         }
-        if (info.getPointDimensionCount() != 1) {
-          throw new IllegalStateException("Type mismatch: " + field + " was indexed with dimensions=" + info.getPointDimensionCount());
+        if (info.getPointDataDimensionCount() != 1) {
+          throw new IllegalStateException("Type mismatch: " + field + " was indexed with dimensions=" + info.getPointDataDimensionCount());
         }
         PointValues values = reader.getPointValues(field);
         // no actual points for this field (e.g. all points deleted)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java b/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java
index 176795d..e804635 100644
--- a/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java
+++ b/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java
@@ -237,12 +237,12 @@ public class UninvertingReader extends FilterLeafReader {
       DocValuesType type = fi.getDocValuesType();
       // fields which currently don't have docValues, but are uninvertable (indexed or points data present)
       if (type == DocValuesType.NONE &&
-          (fi.getIndexOptions() != IndexOptions.NONE || (fi.getPointNumBytes() > 0 && fi.getPointDimensionCount() == 1))) {
+          (fi.getIndexOptions() != IndexOptions.NONE || (fi.getPointNumBytes() > 0 && fi.getPointDataDimensionCount() == 1))) {
         Type t = mapping.apply(fi.name); // could definitely return null, thus still can't uninvert it
         if (t != null) {
           if (t == Type.INTEGER_POINT || t == Type.LONG_POINT || t == Type.FLOAT_POINT || t == Type.DOUBLE_POINT) {
             // type uses points
-            if (fi.getPointDimensionCount() == 0) {
+            if (fi.getPointDataDimensionCount() == 0) {
               continue;
             }
           } else {
@@ -284,7 +284,7 @@ public class UninvertingReader extends FilterLeafReader {
         wrap = true;
         newFieldInfos.add(new FieldInfo(fi.name, fi.number, fi.hasVectors(), fi.omitsNorms(),
             fi.hasPayloads(), fi.getIndexOptions(), type, fi.getDocValuesGen(), fi.attributes(),
-            fi.getPointDimensionCount(), fi.getPointNumBytes(), fi.isSoftDeletesField()));
+            fi.getPointDataDimensionCount(), fi.getPointIndexDimensionCount(), fi.getPointNumBytes(), fi.isSoftDeletesField()));
       } else {
         newFieldInfos.add(fi);
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/solr/core/src/test/org/apache/solr/uninverting/TestUninvertingReader.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/uninverting/TestUninvertingReader.java b/solr/core/src/test/org/apache/solr/uninverting/TestUninvertingReader.java
index 9ec1234..05a1254 100644
--- a/solr/core/src/test/org/apache/solr/uninverting/TestUninvertingReader.java
+++ b/solr/core/src/test/org/apache/solr/uninverting/TestUninvertingReader.java
@@ -379,12 +379,14 @@ public class TestUninvertingReader extends LuceneTestCase {
 
     FieldInfo intFInfo = fieldInfos.fieldInfo("int");
     assertEquals(DocValuesType.NUMERIC, intFInfo.getDocValuesType());
-    assertEquals(0, intFInfo.getPointDimensionCount());
+    assertEquals(0, intFInfo.getPointDataDimensionCount());
+    assertEquals(0, intFInfo.getPointIndexDimensionCount());
     assertEquals(0, intFInfo.getPointNumBytes());
 
     FieldInfo dintFInfo = fieldInfos.fieldInfo("dint");
     assertEquals(DocValuesType.NUMERIC, dintFInfo.getDocValuesType());
-    assertEquals(1, dintFInfo.getPointDimensionCount());
+    assertEquals(1, dintFInfo.getPointDataDimensionCount());
+    assertEquals(1, dintFInfo.getPointIndexDimensionCount());
     assertEquals(4, dintFInfo.getPointNumBytes());
 
     FieldInfo dvFInfo = fieldInfos.fieldInfo("dv");


[08/50] [abbrv] lucene-solr:jira/http2: SOLR-12843: Implement a MultiContentWriter in SolrJ to post multiple files/payload at once

Posted by da...@apache.org.
SOLR-12843: Implement a MultiContentWriter in SolrJ to post multiple files/payload at once


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/b4d9b25f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/b4d9b25f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/b4d9b25f

Branch: refs/heads/jira/http2
Commit: b4d9b25f4430d1c6491986d6f1805210bf1cfd39
Parents: dbed8ba
Author: Noble Paul <no...@apache.org>
Authored: Tue Oct 9 17:44:40 2018 +1100
Committer: Noble Paul <no...@apache.org>
Committed: Tue Oct 9 17:44:40 2018 +1100

----------------------------------------------------------------------
 solr/CHANGES.txt                                |   2 +
 .../solr/handler/UpdateRequestHandler.java      |   2 +-
 .../solr/handler/loader/JavabinLoader.java      |  58 +++++++++
 .../request/MultiContentWriterRequest.java      | 123 +++++++++++++++++++
 .../solr/client/solrj/SolrExampleTests.java     |  41 ++++++-
 5 files changed, 224 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b4d9b25f/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 9bf6080..49e425d 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -138,6 +138,8 @@ New Features
 
 * SOLR-12815: Implement maxOps limit for IndexSizeTrigger. (ab)
 
+* SOLR-12843: Implement a MultiContentWriter in SolrJ to post multiple files/payload at once (noble)
+
 Other Changes
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b4d9b25f/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java
index 3c7ffda..cbe2cba 100644
--- a/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java
@@ -142,7 +142,7 @@ public class UpdateRequestHandler extends ContentStreamHandlerBase implements Pe
     registry.put("application/xml", new XMLLoader().init(p) );
     registry.put("application/json", new JsonLoader().init(p) );
     registry.put("application/csv", new CSVLoader().init(p) );
-    registry.put("application/javabin", new JavabinLoader().init(p) );
+    registry.put("application/javabin", new JavabinLoader(instance).init(p) );
     registry.put("text/csv", registry.get("application/csv") );
     registry.put("text/xml", registry.get("application/xml") );
     registry.put("text/json", registry.get("application/json"));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b4d9b25f/solr/core/src/java/org/apache/solr/handler/loader/JavabinLoader.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/loader/JavabinLoader.java b/solr/core/src/java/org/apache/solr/handler/loader/JavabinLoader.java
index f502a8e..01f5f60 100644
--- a/solr/core/src/java/org/apache/solr/handler/loader/JavabinLoader.java
+++ b/solr/core/src/java/org/apache/solr/handler/loader/JavabinLoader.java
@@ -19,6 +19,8 @@ package org.apache.solr.handler.loader;
 import java.io.EOFException;
 import java.io.IOException;
 import java.io.InputStream;
+import java.util.Collections;
+import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
@@ -31,7 +33,11 @@ import org.apache.solr.common.params.ShardParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.params.UpdateParams;
 import org.apache.solr.common.util.ContentStream;
+import org.apache.solr.common.util.ContentStreamBase;
+import org.apache.solr.common.util.DataInputInputStream;
 import org.apache.solr.common.util.FastInputStream;
+import org.apache.solr.common.util.JavaBinCodec;
+import org.apache.solr.common.util.NamedList;
 import org.apache.solr.handler.RequestHandlerUtils;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.response.SolrQueryResponse;
@@ -46,6 +52,16 @@ import org.apache.solr.update.processor.UpdateRequestProcessor;
  * @see org.apache.solr.common.util.JavaBinCodec
  */
 public class JavabinLoader extends ContentStreamLoader {
+  final ContentStreamLoader contentStreamLoader;
+
+  public JavabinLoader() {
+    this.contentStreamLoader = this;
+  }
+
+  public JavabinLoader(ContentStreamLoader contentStreamLoader) {
+    super();
+    this.contentStreamLoader = contentStreamLoader;
+  }
 
   @Override
   public void load(SolrQueryRequest req, SolrQueryResponse rsp, ContentStream stream, UpdateRequestProcessor processor) throws Exception {
@@ -62,6 +78,10 @@ public class JavabinLoader extends ContentStreamLoader {
   
   private void parseAndLoadDocs(final SolrQueryRequest req, SolrQueryResponse rsp, InputStream stream,
                                 final UpdateRequestProcessor processor) throws IOException {
+    if (req.getParams().getBool("multistream", false)) {
+      handleMultiStream(req, rsp, stream, processor);
+      return;
+    }
     UpdateRequest update = null;
     JavaBinUpdateRequestCodec.StreamingUpdateHandler handler = new JavaBinUpdateRequestCodec.StreamingUpdateHandler() {
       private AddUpdateCommand addCmd = null;
@@ -116,6 +136,44 @@ public class JavabinLoader extends ContentStreamLoader {
     }
   }
 
+  private void handleMultiStream(SolrQueryRequest req, SolrQueryResponse rsp, InputStream stream, UpdateRequestProcessor processor)
+      throws IOException {
+    FastInputStream in = FastInputStream.wrap(stream);
+    SolrParams old = req.getParams();
+    new JavaBinCodec() {
+      SolrParams params;
+      AddUpdateCommand addCmd = null;
+
+      @Override
+      public List<Object> readIterator(DataInputInputStream fis) throws IOException {
+        while (true) {
+          Object o = readVal(fis);
+          if (o == END_OBJ) break;
+          if (o instanceof NamedList) {
+            params = ((NamedList) o).toSolrParams();
+          } else {
+            try {
+              if (o instanceof byte[]) {
+                if (params != null) req.setParams(params);
+                byte[] buf = (byte[]) o;
+                contentStreamLoader.load(req, rsp, new ContentStreamBase.ByteArrayStream(buf, null), processor);
+              } else {
+                throw new RuntimeException("unsupported type ");
+              }
+            } catch (Exception e) {
+              throw new RuntimeException(e);
+            } finally {
+              params = null;
+              req.setParams(old);
+            }
+          }
+        }
+        return Collections.emptyList();
+      }
+
+    }.unmarshal(in);
+  }
+
   private AddUpdateCommand getAddCommand(SolrQueryRequest req, SolrParams params) {
     AddUpdateCommand addCmd = new AddUpdateCommand(req);
     addCmd.overwrite = params.getBool(UpdateParams.OVERWRITE, true);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b4d9b25f/solr/solrj/src/java/org/apache/solr/client/solrj/request/MultiContentWriterRequest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/MultiContentWriterRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/MultiContentWriterRequest.java
new file mode 100644
index 0000000..1a206b8
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/MultiContentWriterRequest.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.client.solrj.request;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.io.Reader;
+import java.nio.ByteBuffer;
+import java.util.Iterator;
+
+import org.apache.solr.client.solrj.impl.BinaryRequestWriter;
+import org.apache.solr.common.IteratorWriter;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.JavaBinCodec;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.Pair;
+
+import static org.apache.solr.common.params.UpdateParams.ASSUME_CONTENT_TYPE;
+
+public class MultiContentWriterRequest extends AbstractUpdateRequest {
+
+  private final Iterator<Pair<NamedList, Object>> payload;
+
+  /**
+   *
+   * @param m HTTP method
+   * @param path path to which to post to
+   * @param payload add the per doc params, The Object could be a ByteBuffer or byte[]
+   */
+
+  public MultiContentWriterRequest(METHOD m, String path, Iterator<Pair<NamedList, Object>> payload) {
+    super(m, path);
+    params = new ModifiableSolrParams();
+    params.add("multistream", "true");
+    this.payload = payload;
+  }
+
+
+  @Override
+  public RequestWriter.ContentWriter getContentWriter(String expectedType) {
+    return new RequestWriter.ContentWriter() {
+      @Override
+      public void write(OutputStream os) throws IOException {
+        new JavaBinCodec().marshal((IteratorWriter) iw -> {
+          while (payload.hasNext()) {
+            Pair<NamedList, Object> next = payload.next();
+
+            if (next.second() instanceof ByteBuffer || next.second() instanceof byte[]) {
+              NamedList params = next.first();
+              if(params.get(ASSUME_CONTENT_TYPE) == null){
+                String detectedType = detect(next.second());
+                if(detectedType==null){
+                  throw new RuntimeException("Unknown content type");
+                }
+                params.add(ASSUME_CONTENT_TYPE, detectedType);
+              }
+              iw.add(params);
+              iw.add(next.second());
+            }  else {
+              throw new RuntimeException("payload value must be byte[] or ByteBuffer");
+            }
+          }
+        }, os);
+      }
+
+      @Override
+      public String getContentType() {
+        return "application/javabin";
+      }
+    };
+  }
+  public static String detect(Object o) throws IOException {
+    Reader rdr = null;
+    byte[] bytes = null;
+    if (o instanceof byte[]) bytes = (byte[]) o;
+    else if (o instanceof ByteBuffer) bytes = ((ByteBuffer) o).array();
+    rdr = new InputStreamReader(new ByteArrayInputStream(bytes));
+    String detectedContentType = null;
+    for (;;) {
+      int ch = rdr.read();
+      if (Character.isWhitespace(ch)) {
+        continue;
+      }
+      int nextChar = -1;
+      // first non-whitespace chars
+      if (ch == '#'                         // single line comment
+          || (ch == '/' && ((nextChar = rdr.read()) == '/' || nextChar == '*'))  // single line or multi-line comment
+          || (ch == '{' || ch == '[')       // start of JSON object
+          )
+      {
+        detectedContentType = "application/json";
+      } else if (ch == '<') {
+        detectedContentType = "text/xml";
+      }
+      break;
+    }
+    return detectedContentType;
+  }
+
+  public static ByteBuffer readByteBuffer(InputStream is) throws IOException {
+    BinaryRequestWriter.BAOS baos = new BinaryRequestWriter.BAOS();
+    org.apache.commons.io.IOUtils.copy(is, baos);
+    return ByteBuffer.wrap(baos.getbuf(), 0, baos.size());
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b4d9b25f/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java b/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java
index 1dabe5d..cb375d6 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java
@@ -17,8 +17,11 @@
 package org.apache.solr.client.solrj;
 
 
+import java.io.FileInputStream;
 import java.io.IOException;
+import java.io.InputStream;
 import java.lang.invoke.MethodHandles;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -43,6 +46,7 @@ import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
 import org.apache.solr.client.solrj.request.AbstractUpdateRequest.ACTION;
 import org.apache.solr.client.solrj.request.ContentStreamUpdateRequest;
 import org.apache.solr.client.solrj.request.LukeRequest;
+import org.apache.solr.client.solrj.request.MultiContentWriterRequest;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.request.StreamingUpdateRequest;
 import org.apache.solr.client.solrj.request.UpdateRequest;
@@ -64,11 +68,14 @@ import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.FacetParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.Pair;
+import org.apache.solr.common.util.Utils;
 import org.junit.Test;
 import org.noggit.JSONParser;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.solr.common.params.UpdateParams.ASSUME_CONTENT_TYPE;
 import static org.junit.internal.matchers.StringContains.containsString;
 
 /**
@@ -671,7 +678,39 @@ abstract public class SolrExampleTests extends SolrExampleTestsBase
     Assert.assertEquals( 10, rsp.getResults().getNumFound() );
   }
 
- @Test
+  @Test
+  public void testMultiContentWriterRequest() throws Exception {
+    SolrClient client = getSolrClient();
+    client.deleteByQuery("*:*");// delete everything!
+    client.commit();
+    QueryResponse rsp = client.query(new SolrQuery("*:*"));
+    Assert.assertEquals(0, rsp.getResults().getNumFound());
+
+    List<Pair<NamedList, Object>> docs = new ArrayList<>();
+    NamedList params = new NamedList();
+    docs.add(new Pair(params, getFileContent(params, "solrj/docs1.xml")));
+
+    params = new NamedList();
+    params.add(ASSUME_CONTENT_TYPE, "application/csv");
+    docs.add(new Pair(params, getFileContent(params, "solrj/books.csv")));
+
+    MultiContentWriterRequest up = new MultiContentWriterRequest(SolrRequest.METHOD.POST, "/update", docs.iterator());
+    up.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);
+    NamedList<Object> result = client.request(up);
+    System.out.println(result.jsonStr());
+    rsp = client.query(new SolrQuery("*:*"));
+    Assert.assertEquals(12, rsp.getResults().getNumFound());
+
+  }
+
+  private ByteBuffer getFileContent(NamedList nl, String name) throws IOException {
+    try (InputStream is = new FileInputStream(getFile(name))) {
+      return MultiContentWriterRequest.readByteBuffer(is);
+    }
+  }
+
+
+  @Test
  public void testMultiContentStreamRequest() throws Exception {
     SolrClient client = getSolrClient();
     client.deleteByQuery("*:*");// delete everything!


[45/50] [abbrv] lucene-solr:jira/http2: SOLR-12740: adding a link

Posted by da...@apache.org.
SOLR-12740: adding a link


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/761f8aaf
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/761f8aaf
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/761f8aaf

Branch: refs/heads/jira/http2
Commit: 761f8aaf796d3447722dc95918b3ed0eb828c0f7
Parents: f1a30bf
Author: Noble Paul <no...@apache.org>
Authored: Wed Oct 17 00:21:07 2018 +1100
Committer: Noble Paul <no...@apache.org>
Committed: Wed Oct 17 00:22:54 2018 +1100

----------------------------------------------------------------------
 solr/solr-ref-guide/src/migrate-to-policy-rule.adoc | 6 +++---
 solr/solr-ref-guide/src/solrcloud-autoscaling.adoc  | 3 ++-
 2 files changed, 5 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/761f8aaf/solr/solr-ref-guide/src/migrate-to-policy-rule.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/migrate-to-policy-rule.adoc b/solr/solr-ref-guide/src/migrate-to-policy-rule.adoc
index c91e272..6ad3c59 100644
--- a/solr/solr-ref-guide/src/migrate-to-policy-rule.adoc
+++ b/solr/solr-ref-guide/src/migrate-to-policy-rule.adoc
@@ -37,7 +37,7 @@ The equivalent new syntax is
 The new policy rules have to be created separately using an API call  <<solrcloud-autoscaling-api.adoc#Create and Modify Cluster Policies, See examples>>
 
 
-=== Rule Operators
+== Rule Operators
 
 All the following operators can be directly used in the new policy syntax and they mean the same.
 
@@ -70,7 +70,7 @@ Tag values are provided by the framework and these tags mean the same the new sy
 * *ip_1, ip_2, ip_3, ip_4*: These are ip fragments for each node. For example, in a host with ip `192.168.1.2`, `ip_1 = 2`, `ip_2 =1`, `ip_3 = 168` and` ip_4 = 192`
 * *sysprop.\{PROPERTY_NAME}*: These are values available from system properties. `sysprop.key` means a value that is passed to the node as `-Dkey=keyValue` during the node startup. It is possible to use rules like `sysprop.key:expectedVal,shard:*`
 
-=== Snitches
+== Snitches
 There is no equivalent for a snitch in the new policy framework
 
 == Porting existing Replica placement rules
@@ -167,4 +167,4 @@ host:!192.45.67.3
 
 == Defining Rules
 
-Rules are always defined in along with the collection in the legacy system. The new syntax allows you to specify rules globally as well as on a <<solrcloud-autoscaling-policy-preferences.adoc#Defining Collection-Specific Policies,per collection basis>>
+Rules are always defined in along with the collection in the legacy system. The new syntax allows you to specify rules globally as well as on a <<solrcloud-autoscaling-policy-preferences.adoc#collection-specific-policy, per collection basis>>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/761f8aaf/solr/solr-ref-guide/src/solrcloud-autoscaling.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/solrcloud-autoscaling.adoc b/solr/solr-ref-guide/src/solrcloud-autoscaling.adoc
index da6d31b..61b8b6e 100644
--- a/solr/solr-ref-guide/src/solrcloud-autoscaling.adoc
+++ b/solr/solr-ref-guide/src/solrcloud-autoscaling.adoc
@@ -1,5 +1,5 @@
 = SolrCloud Autoscaling
-:page-children: solrcloud-autoscaling-overview, solrcloud-autoscaling-policy-preferences, solrcloud-autoscaling-triggers, solrcloud-autoscaling-trigger-actions, solrcloud-autoscaling-listeners, solrcloud-autoscaling-auto-add-replicas, solrcloud-autoscaling-fault-tolerance, solrcloud-autoscaling-api
+:page-children: solrcloud-autoscaling-overview, solrcloud-autoscaling-policy-preferences, solrcloud-autoscaling-triggers, solrcloud-autoscaling-trigger-actions, solrcloud-autoscaling-listeners, solrcloud-autoscaling-auto-add-replicas, solrcloud-autoscaling-fault-tolerance, solrcloud-autoscaling-api, migrate-to-policy-rule
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -32,3 +32,4 @@ The following sections describe the autoscaling features of SolrCloud:
 * <<solrcloud-autoscaling-auto-add-replicas.adoc#solrcloud-autoscaling-auto-add-replicas,Automatically Adding Replicas>>
 * <<solrcloud-autoscaling-fault-tolerance.adoc#solrcloud-autoscaling-fault-tolerance,Autoscaling Fault Tolerance>>
 * <<solrcloud-autoscaling-api.adoc#solrcloud-autoscaling-api,Autoscaling API>>
+* <<migrate-to-policy-rule.adoc#migrate-to-policy-rule, Migrating from legacy rules>>


[25/50] [abbrv] lucene-solr:jira/http2: SOLR-12739: Use legacy assignment in AutoAddReplicasPlanActionTest

Posted by da...@apache.org.
SOLR-12739: Use legacy assignment in AutoAddReplicasPlanActionTest


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/971a0e3f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/971a0e3f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/971a0e3f

Branch: refs/heads/jira/http2
Commit: 971a0e3f4afddab4687642834037c52fef0c6758
Parents: 80011d6
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Thu Oct 11 08:58:47 2018 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Thu Oct 11 08:58:47 2018 +0530

----------------------------------------------------------------------
 .../solr/cloud/autoscaling/AutoAddReplicasPlanActionTest.java | 7 +++++++
 1 file changed, 7 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/971a0e3f/solr/core/src/test/org/apache/solr/cloud/autoscaling/AutoAddReplicasPlanActionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/AutoAddReplicasPlanActionTest.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/AutoAddReplicasPlanActionTest.java
index 1e25014..31bd2fd 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/AutoAddReplicasPlanActionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/AutoAddReplicasPlanActionTest.java
@@ -28,6 +28,7 @@ import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.V2Request;
 import org.apache.solr.cloud.CloudDescriptor;
 import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.common.cloud.ClusterStateUtil;
@@ -51,6 +52,12 @@ public class AutoAddReplicasPlanActionTest extends SolrCloudTestCase{
     configureCluster(3)
         .addConfig("conf", configset("cloud-minimal"))
         .configure();
+
+    new V2Request.Builder("/cluster")
+        .withMethod(SolrRequest.METHOD.POST)
+        .withPayload("{set-obj-property:{defaults : {cluster: {useLegacyReplicaAssignment:true}}}}}")
+        .build()
+        .process(cluster.getSolrClient());
   }
 
   @Test


[02/50] [abbrv] lucene-solr:jira/http2: SOLR-12749: Update CHANGES.txt

Posted by da...@apache.org.
SOLR-12749: Update CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/df07a43b
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/df07a43b
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/df07a43b

Branch: refs/heads/jira/http2
Commit: df07a43b56ee42f01d2af6d6357f2fd9a7d03aa2
Parents: 6a702ee
Author: Joel Bernstein <jb...@apache.org>
Authored: Mon Oct 8 13:05:26 2018 -0400
Committer: Joel Bernstein <jb...@apache.org>
Committed: Mon Oct 8 13:05:26 2018 -0400

----------------------------------------------------------------------
 solr/CHANGES.txt | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/df07a43b/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index ac93a63..a760f5e 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -436,6 +436,8 @@ Bug Fixes
 
 * SOLR-12765: Incorrect format of JMX cache stats. (Bojan Smid, ab)
 
+* SOLR-12749: timeseries() expression missing sum() results for empty buckets (Joel Bernstein)
+
 Optimizations
 ----------------------
 


[16/50] [abbrv] lucene-solr:jira/http2: SOLR-12843: fix precommit

Posted by da...@apache.org.
SOLR-12843: fix precommit


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/8d205ecd
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/8d205ecd
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/8d205ecd

Branch: refs/heads/jira/http2
Commit: 8d205ecd1c6a133f7cb9a4352388ec30d00b4bdb
Parents: 41e3d07
Author: Steve Rowe <sa...@apache.org>
Authored: Tue Oct 9 17:42:41 2018 -0400
Committer: Steve Rowe <sa...@apache.org>
Committed: Tue Oct 9 17:42:41 2018 -0400

----------------------------------------------------------------------
 .../src/test/org/apache/solr/client/solrj/SolrExampleTests.java     | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d205ecd/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java b/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java
index cb375d6..b83be83 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java
@@ -69,7 +69,6 @@ import org.apache.solr.common.params.FacetParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.Pair;
-import org.apache.solr.common.util.Utils;
 import org.junit.Test;
 import org.noggit.JSONParser;
 import org.slf4j.Logger;


[35/50] [abbrv] lucene-solr:jira/http2: Add entry in CHANGES.txt

Posted by da...@apache.org.
Add entry in CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a312c0db
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a312c0db
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a312c0db

Branch: refs/heads/jira/http2
Commit: a312c0db055b9ac7926c043b3737545cac0d4f55
Parents: a5665d8
Author: iverase <iv...@apache.org>
Authored: Mon Oct 15 09:00:09 2018 +0200
Committer: iverase <iv...@apache.org>
Committed: Mon Oct 15 09:00:09 2018 +0200

----------------------------------------------------------------------
 lucene/CHANGES.txt | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a312c0db/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 941e603..7a181bd 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -192,6 +192,9 @@ Bug fixes:
 * LUCENE-8479: QueryBuilder#analyzeGraphPhrase now throws TooManyClause exception
   if the number of expanded path reaches the BooleanQuery#maxClause limit. (Jim Ferenczi)
 
+* LUCENE-8522: throw InvalidShapeException when constructing a polygon and
+  all points are coplanar. (Ignacio Vera)
+
 New Features
 
 * LUCENE-8496: Selective indexing - modify BKDReader/BKDWriter to allow users


[05/50] [abbrv] lucene-solr:jira/http2: LUCENE-8496: Selective indexing - modify BKDReader/BKDWriter to allow users to select a fewer number of dimensions to be used for creating the index than the total number of dimensions used for field encoding. i.e.

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/document/RangeFieldQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/RangeFieldQuery.java b/lucene/core/src/java/org/apache/lucene/document/RangeFieldQuery.java
index 773878a..f623701 100644
--- a/lucene/core/src/java/org/apache/lucene/document/RangeFieldQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/document/RangeFieldQuery.java
@@ -255,9 +255,9 @@ abstract class RangeFieldQuery extends Query {
 
   /** Check indexed field info against the provided query data. */
   private void checkFieldInfo(FieldInfo fieldInfo) {
-    if (fieldInfo.getPointDimensionCount()/2 != numDims) {
+    if (fieldInfo.getPointDataDimensionCount()/2 != numDims) {
       throw new IllegalArgumentException("field=\"" + field + "\" was indexed with numDims="
-          + fieldInfo.getPointDimensionCount()/2 + " but this query has numDims=" + numDims);
+          + fieldInfo.getPointDataDimensionCount()/2 + " but this query has numDims=" + numDims);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
index aa01723..aa799f2 100644
--- a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
+++ b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
@@ -1900,7 +1900,7 @@ public final class CheckIndex implements Closeable {
           throw new RuntimeException("there are fields with points, but reader.getPointsReader() is null");
         }
         for (FieldInfo fieldInfo : fieldInfos) {
-          if (fieldInfo.getPointDimensionCount() > 0) {
+          if (fieldInfo.getPointDataDimensionCount() > 0) {
             PointValues values = pointsReader.getValues(fieldInfo.name);
             if (values == null) {
               continue;
@@ -1970,7 +1970,9 @@ public final class CheckIndex implements Closeable {
     private final byte[] globalMinPackedValue;
     private final byte[] globalMaxPackedValue;
     private final int packedBytesCount;
-    private final int numDims;
+    private final int packedIndexBytesCount;
+    private final int numDataDims;
+    private final int numIndexDims;
     private final int bytesPerDim;
     private final String fieldName;
 
@@ -1978,14 +1980,16 @@ public final class CheckIndex implements Closeable {
     public VerifyPointsVisitor(String fieldName, int maxDoc, PointValues values) throws IOException {
       this.maxDoc = maxDoc;
       this.fieldName = fieldName;
-      numDims = values.getNumDimensions();
+      numDataDims = values.getNumDataDimensions();
+      numIndexDims = values.getNumIndexDimensions();
       bytesPerDim = values.getBytesPerDimension();
-      packedBytesCount = numDims * bytesPerDim;
+      packedBytesCount = numDataDims * bytesPerDim;
+      packedIndexBytesCount = numIndexDims * bytesPerDim;
       globalMinPackedValue = values.getMinPackedValue();
       globalMaxPackedValue = values.getMaxPackedValue();
       docsSeen = new FixedBitSet(maxDoc);
-      lastMinPackedValue = new byte[packedBytesCount];
-      lastMaxPackedValue = new byte[packedBytesCount];
+      lastMinPackedValue = new byte[packedIndexBytesCount];
+      lastMaxPackedValue = new byte[packedIndexBytesCount];
       lastPackedValue = new byte[packedBytesCount];
 
       if (values.getDocCount() > values.size()) {
@@ -2000,14 +2004,14 @@ public final class CheckIndex implements Closeable {
         if (values.size() != 0) {
           throw new RuntimeException("getMinPackedValue is null points for field \"" + fieldName + "\" yet size=" + values.size());
         }
-      } else if (globalMinPackedValue.length != packedBytesCount) {
+      } else if (globalMinPackedValue.length != packedIndexBytesCount) {
         throw new RuntimeException("getMinPackedValue for field \"" + fieldName + "\" return length=" + globalMinPackedValue.length + " array, but should be " + packedBytesCount);
       }
       if (globalMaxPackedValue == null) {
         if (values.size() != 0) {
           throw new RuntimeException("getMaxPackedValue is null points for field \"" + fieldName + "\" yet size=" + values.size());
         }
-      } else if (globalMaxPackedValue.length != packedBytesCount) {
+      } else if (globalMaxPackedValue.length != packedIndexBytesCount) {
         throw new RuntimeException("getMaxPackedValue for field \"" + fieldName + "\" return length=" + globalMaxPackedValue.length + " array, but should be " + packedBytesCount);
       }
     }
@@ -2033,7 +2037,7 @@ public final class CheckIndex implements Closeable {
       pointCountSeen++;
       docsSeen.set(docID);
 
-      for(int dim=0;dim<numDims;dim++) {
+      for(int dim=0;dim<numIndexDims;dim++) {
         int offset = bytesPerDim * dim;
 
         // Compare to last cell:
@@ -2048,9 +2052,10 @@ public final class CheckIndex implements Closeable {
         }
       }
 
-      // In the 1D case, PointValues must make a single in-order sweep through all values, and tie-break by
+      // In the 1D data case, PointValues must make a single in-order sweep through all values, and tie-break by
       // increasing docID:
-      if (numDims == 1) {
+      // for data dimension > 1, leaves are sorted by the dimension with the lowest cardinality to improve block compression
+      if (numDataDims == 1) {
         int cmp = FutureArrays.compareUnsigned(lastPackedValue, 0, bytesPerDim, packedValue, 0, bytesPerDim);
         if (cmp > 0) {
           throw new RuntimeException("packed points value " + Arrays.toString(packedValue) + " for field=\"" + fieldName + "\", for docID=" + docID + " is out-of-order vs the previous document's value " + Arrays.toString(lastPackedValue));
@@ -2067,11 +2072,11 @@ public final class CheckIndex implements Closeable {
     @Override
     public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
       checkPackedValue("min packed value", minPackedValue, -1);
-      System.arraycopy(minPackedValue, 0, lastMinPackedValue, 0, packedBytesCount);
+      System.arraycopy(minPackedValue, 0, lastMinPackedValue, 0, packedIndexBytesCount);
       checkPackedValue("max packed value", maxPackedValue, -1);
-      System.arraycopy(maxPackedValue, 0, lastMaxPackedValue, 0, packedBytesCount);
+      System.arraycopy(maxPackedValue, 0, lastMaxPackedValue, 0, packedIndexBytesCount);
 
-      for(int dim=0;dim<numDims;dim++) {
+      for(int dim=0;dim<numIndexDims;dim++) {
         int offset = bytesPerDim * dim;
 
         if (FutureArrays.compareUnsigned(minPackedValue, offset, offset + bytesPerDim, maxPackedValue, offset, offset + bytesPerDim) > 0) {
@@ -2110,8 +2115,8 @@ public final class CheckIndex implements Closeable {
         throw new RuntimeException(desc + " is null for docID=" + docID + " field=\"" + fieldName + "\"");
       }
 
-      if (packedValue.length != packedBytesCount) {
-        throw new RuntimeException(desc + " has incorrect length=" + packedValue.length + " vs expected=" + packedBytesCount + " for docID=" + docID + " field=\"" + fieldName + "\"");
+      if (packedValue.length != (docID < 0 ? packedIndexBytesCount : packedBytesCount)) {
+        throw new RuntimeException(desc + " has incorrect length=" + packedValue.length + " vs expected=" + packedIndexBytesCount + " for docID=" + docID + " field=\"" + fieldName + "\"");
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/index/CodecReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/CodecReader.java b/lucene/core/src/java/org/apache/lucene/index/CodecReader.java
index 50aaa42..ca6ac7d 100644
--- a/lucene/core/src/java/org/apache/lucene/index/CodecReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/CodecReader.java
@@ -194,7 +194,7 @@ public abstract class CodecReader extends LeafReader implements Accountable {
   public final PointValues getPointValues(String field) throws IOException {
     ensureOpen();
     FieldInfo fi = getFieldInfos().fieldInfo(field);
-    if (fi == null || fi.getPointDimensionCount() == 0) {
+    if (fi == null || fi.getPointDataDimensionCount() == 0) {
       // Field does not exist or does not index points
       return null;
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java b/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
index d0a6974..4cc981d 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
@@ -201,7 +201,7 @@ final class DefaultIndexingChain extends DocConsumer {
         PerField perField = fieldHash[i];
         while (perField != null) {
           if (perField.pointValuesWriter != null) {
-            if (perField.fieldInfo.getPointDimensionCount() == 0) {
+            if (perField.fieldInfo.getPointDataDimensionCount() == 0) {
               // BUG
               throw new AssertionError("segment=" + state.segmentInfo + ": field=\"" + perField.fieldInfo.name + "\" has no points but wrote them");
             }
@@ -216,7 +216,7 @@ final class DefaultIndexingChain extends DocConsumer {
 
             perField.pointValuesWriter.flush(state, sortMap, pointsWriter);
             perField.pointValuesWriter = null;
-          } else if (perField.fieldInfo.getPointDimensionCount() != 0) {
+          } else if (perField.fieldInfo.getPointDataDimensionCount() != 0) {
             // BUG
             throw new AssertionError("segment=" + state.segmentInfo + ": field=\"" + perField.fieldInfo.name + "\" has points but did not write them");
           }
@@ -477,7 +477,7 @@ final class DefaultIndexingChain extends DocConsumer {
       }
       indexDocValue(fp, dvType, field);
     }
-    if (fieldType.pointDimensionCount() != 0) {
+    if (fieldType.pointDataDimensionCount() != 0) {
       if (fp == null) {
         fp = getOrAddField(fieldName, fieldType, false);
       }
@@ -508,17 +508,18 @@ final class DefaultIndexingChain extends DocConsumer {
 
   /** Called from processDocument to index one field's point */
   private void indexPoint(PerField fp, IndexableField field) throws IOException {
-    int pointDimensionCount = field.fieldType().pointDimensionCount();
+    int pointDataDimensionCount = field.fieldType().pointDataDimensionCount();
+    int pointIndexDimensionCount = field.fieldType().pointIndexDimensionCount();
 
     int dimensionNumBytes = field.fieldType().pointNumBytes();
 
     // Record dimensions for this field; this setter will throw IllegalArgExc if
     // the dimensions were already set to something different:
-    if (fp.fieldInfo.getPointDimensionCount() == 0) {
-      fieldInfos.globalFieldNumbers.setDimensions(fp.fieldInfo.number, fp.fieldInfo.name, pointDimensionCount, dimensionNumBytes);
+    if (fp.fieldInfo.getPointDataDimensionCount() == 0) {
+      fieldInfos.globalFieldNumbers.setDimensions(fp.fieldInfo.number, fp.fieldInfo.name, pointDataDimensionCount, pointIndexDimensionCount, dimensionNumBytes);
     }
 
-    fp.fieldInfo.setPointDimensions(pointDimensionCount, dimensionNumBytes);
+    fp.fieldInfo.setPointDimensions(pointDataDimensionCount, pointIndexDimensionCount, dimensionNumBytes);
 
     if (fp.pointValuesWriter == null) {
       fp.pointValuesWriter = new PointValuesWriter(docWriter, fp.fieldInfo);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/index/FieldInfo.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/FieldInfo.java b/lucene/core/src/java/org/apache/lucene/index/FieldInfo.java
index b50cb12..c5d85bc 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FieldInfo.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FieldInfo.java
@@ -50,7 +50,8 @@ public final class FieldInfo {
 
   /** If both of these are positive it means this field indexed points
    *  (see {@link org.apache.lucene.codecs.PointsFormat}). */
-  private int pointDimensionCount;
+  private int pointDataDimensionCount;
+  private int pointIndexDimensionCount;
   private int pointNumBytes;
 
   // whether this field is used as the soft-deletes field
@@ -63,7 +64,7 @@ public final class FieldInfo {
    */
   public FieldInfo(String name, int number, boolean storeTermVector, boolean omitNorms, boolean storePayloads,
                    IndexOptions indexOptions, DocValuesType docValues, long dvGen, Map<String,String> attributes,
-                   int pointDimensionCount, int pointNumBytes, boolean softDeletesField) {
+                   int pointDataDimensionCount, int pointIndexDimensionCount, int pointNumBytes, boolean softDeletesField) {
     this.name = Objects.requireNonNull(name);
     this.number = number;
     this.docValuesType = Objects.requireNonNull(docValues, "DocValuesType must not be null (field: \"" + name + "\")");
@@ -79,7 +80,8 @@ public final class FieldInfo {
     }
     this.dvGen = dvGen;
     this.attributes = Objects.requireNonNull(attributes);
-    this.pointDimensionCount = pointDimensionCount;
+    this.pointDataDimensionCount = pointDataDimensionCount;
+    this.pointIndexDimensionCount = pointIndexDimensionCount;
     this.pointNumBytes = pointNumBytes;
     this.softDeletesField = softDeletesField;
     assert checkConsistency();
@@ -107,20 +109,28 @@ public final class FieldInfo {
       }
     }
 
-    if (pointDimensionCount < 0) {
-      throw new IllegalStateException("pointDimensionCount must be >= 0; got " + pointDimensionCount);
+    if (pointDataDimensionCount < 0) {
+      throw new IllegalStateException("pointDataDimensionCount must be >= 0; got " + pointDataDimensionCount);
+    }
+
+    if (pointIndexDimensionCount < 0) {
+      throw new IllegalStateException("pointIndexDimensionCount must be >= 0; got " + pointIndexDimensionCount);
     }
 
     if (pointNumBytes < 0) {
       throw new IllegalStateException("pointNumBytes must be >= 0; got " + pointNumBytes);
     }
 
-    if (pointDimensionCount != 0 && pointNumBytes == 0) {
-      throw new IllegalStateException("pointNumBytes must be > 0 when pointDimensionCount=" + pointDimensionCount);
+    if (pointDataDimensionCount != 0 && pointNumBytes == 0) {
+      throw new IllegalStateException("pointNumBytes must be > 0 when pointDataDimensionCount=" + pointDataDimensionCount);
+    }
+
+    if (pointIndexDimensionCount != 0 && pointDataDimensionCount == 0) {
+      throw new IllegalStateException("pointIndexDimensionCount must be 0 when pointDataDimensionCount=0");
     }
 
-    if (pointNumBytes != 0 && pointDimensionCount == 0) {
-      throw new IllegalStateException("pointDimensionCount must be > 0 when pointNumBytes=" + pointNumBytes);
+    if (pointNumBytes != 0 && pointDataDimensionCount == 0) {
+      throw new IllegalStateException("pointDataDimensionCount must be > 0 when pointNumBytes=" + pointNumBytes);
     }
     
     if (dvGen != -1 && docValuesType == DocValuesType.NONE) {
@@ -132,7 +142,7 @@ public final class FieldInfo {
 
   // should only be called by FieldInfos#addOrUpdate
   void update(boolean storeTermVector, boolean omitNorms, boolean storePayloads, IndexOptions indexOptions,
-              int dimensionCount, int dimensionNumBytes) {
+              int dataDimensionCount, int indexDimensionCount, int dimensionNumBytes) {
     if (indexOptions == null) {
       throw new NullPointerException("IndexOptions must not be null (field: \"" + name + "\")");
     }
@@ -145,11 +155,12 @@ public final class FieldInfo {
       }
     }
 
-    if (this.pointDimensionCount == 0 && dimensionCount != 0) {
-      this.pointDimensionCount = dimensionCount;
+    if (this.pointDataDimensionCount == 0 && dataDimensionCount != 0) {
+      this.pointDataDimensionCount = dataDimensionCount;
+      this.pointIndexDimensionCount = indexDimensionCount;
       this.pointNumBytes = dimensionNumBytes;
-    } else if (dimensionCount != 0 && (this.pointDimensionCount != dimensionCount || this.pointNumBytes != dimensionNumBytes)) {
-      throw new IllegalArgumentException("cannot change field \"" + name + "\" from points dimensionCount=" + this.pointDimensionCount + ", numBytes=" + this.pointNumBytes + " to inconsistent dimensionCount=" + dimensionCount + ", numBytes=" + dimensionNumBytes);
+    } else if (dataDimensionCount != 0 && (this.pointDataDimensionCount != dataDimensionCount || this.pointIndexDimensionCount != indexDimensionCount || this.pointNumBytes != dimensionNumBytes)) {
+      throw new IllegalArgumentException("cannot change field \"" + name + "\" from points dataDimensionCount=" + this.pointDataDimensionCount + ", indexDimensionCount=" + this.pointIndexDimensionCount + ", numBytes=" + this.pointNumBytes + " to inconsistent dataDimensionCount=" + dataDimensionCount +", indexDimensionCount=" + indexDimensionCount + ", numBytes=" + dimensionNumBytes);
     }
 
     if (this.indexOptions != IndexOptions.NONE) { // if updated field data is not for indexing, leave the updates out
@@ -170,12 +181,15 @@ public final class FieldInfo {
 
   /** Record that this field is indexed with points, with the
    *  specified number of dimensions and bytes per dimension. */
-  public void setPointDimensions(int count, int numBytes) {
-    if (count <= 0) {
-      throw new IllegalArgumentException("point dimension count must be >= 0; got " + count + " for field=\"" + name + "\"");
+  public void setPointDimensions(int dataDimensionCount, int indexDimensionCount, int numBytes) {
+    if (dataDimensionCount <= 0) {
+      throw new IllegalArgumentException("point data dimension count must be >= 0; got " + dataDimensionCount + " for field=\"" + name + "\"");
     }
-    if (count > PointValues.MAX_DIMENSIONS) {
-      throw new IllegalArgumentException("point dimension count must be < PointValues.MAX_DIMENSIONS (= " + PointValues.MAX_DIMENSIONS + "); got " + count + " for field=\"" + name + "\"");
+    if (dataDimensionCount > PointValues.MAX_DIMENSIONS) {
+      throw new IllegalArgumentException("point data dimension count must be < PointValues.MAX_DIMENSIONS (= " + PointValues.MAX_DIMENSIONS + "); got " + dataDimensionCount + " for field=\"" + name + "\"");
+    }
+    if (indexDimensionCount > dataDimensionCount) {
+      throw new IllegalArgumentException("point index dimension count must be <= point data dimension count (= " + dataDimensionCount + "); got " + indexDimensionCount + " for field=\"" + name + "\"");
     }
     if (numBytes <= 0) {
       throw new IllegalArgumentException("point numBytes must be >= 0; got " + numBytes + " for field=\"" + name + "\"");
@@ -183,22 +197,31 @@ public final class FieldInfo {
     if (numBytes > PointValues.MAX_NUM_BYTES) {
       throw new IllegalArgumentException("point numBytes must be <= PointValues.MAX_NUM_BYTES (= " + PointValues.MAX_NUM_BYTES + "); got " + numBytes + " for field=\"" + name + "\"");
     }
-    if (pointDimensionCount != 0 && pointDimensionCount != count) {
-      throw new IllegalArgumentException("cannot change point dimension count from " + pointDimensionCount + " to " + count + " for field=\"" + name + "\"");
+    if (pointDataDimensionCount != 0 && pointDataDimensionCount != dataDimensionCount) {
+      throw new IllegalArgumentException("cannot change point data dimension count from " + pointDataDimensionCount + " to " + dataDimensionCount + " for field=\"" + name + "\"");
+    }
+    if (pointIndexDimensionCount != 0 && pointIndexDimensionCount != indexDimensionCount) {
+      throw new IllegalArgumentException("cannot change point index dimension count from " + pointIndexDimensionCount + " to " + indexDimensionCount + " for field=\"" + name + "\"");
     }
     if (pointNumBytes != 0 && pointNumBytes != numBytes) {
       throw new IllegalArgumentException("cannot change point numBytes from " + pointNumBytes + " to " + numBytes + " for field=\"" + name + "\"");
     }
 
-    pointDimensionCount = count;
+    pointDataDimensionCount = dataDimensionCount;
+    pointIndexDimensionCount = indexDimensionCount;
     pointNumBytes = numBytes;
 
     assert checkConsistency();
   }
 
-  /** Return point dimension count */
-  public int getPointDimensionCount() {
-    return pointDimensionCount;
+  /** Return point data dimension count */
+  public int getPointDataDimensionCount() {
+    return pointDataDimensionCount;
+  }
+
+  /** Return point data dimension count */
+  public int getPointIndexDimensionCount() {
+    return pointIndexDimensionCount;
   }
 
   /** Return number of bytes per dimension */

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java b/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java
index 0a0ff5e..1e0ef94 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java
@@ -93,7 +93,7 @@ public class FieldInfos implements Iterable<FieldInfo> {
       hasNorms |= info.hasNorms();
       hasDocValues |= info.getDocValuesType() != DocValuesType.NONE;
       hasPayloads |= info.hasPayloads();
-      hasPointValues |= (info.getPointDimensionCount() != 0);
+      hasPointValues |= (info.getPointDataDimensionCount() != 0);
       if (info.isSoftDeletesField()) {
         if (softDeletesField != null && softDeletesField.equals(info.name) == false) {
           throw new IllegalArgumentException("multiple soft-deletes fields [" + info.name + ", " + softDeletesField + "]");
@@ -210,11 +210,13 @@ public class FieldInfos implements Iterable<FieldInfo> {
   }
 
   static final class FieldDimensions {
-    public final int dimensionCount;
+    public final int dataDimensionCount;
+    public final int indexDimensionCount;
     public final int dimensionNumBytes;
 
-    public FieldDimensions(int dimensionCount, int dimensionNumBytes) {
-      this.dimensionCount = dimensionCount;
+    public FieldDimensions(int dataDimensionCount, int indexDimensionCount, int dimensionNumBytes) {
+      this.dataDimensionCount = dataDimensionCount;
+      this.indexDimensionCount = indexDimensionCount;
       this.dimensionNumBytes = dimensionNumBytes;
     }
   }
@@ -254,7 +256,7 @@ public class FieldInfos implements Iterable<FieldInfo> {
      * number assigned if possible otherwise the first unassigned field number
      * is used as the field number.
      */
-    synchronized int addOrGet(String fieldName, int preferredFieldNumber, IndexOptions indexOptions, DocValuesType dvType, int dimensionCount, int dimensionNumBytes, boolean isSoftDeletesField) {
+    synchronized int addOrGet(String fieldName, int preferredFieldNumber, IndexOptions indexOptions, DocValuesType dvType, int dataDimensionCount, int indexDimensionCount, int dimensionNumBytes, boolean isSoftDeletesField) {
       if (indexOptions != IndexOptions.NONE) {
         IndexOptions currentOpts = this.indexOptions.get(fieldName);
         if (currentOpts == null) {
@@ -271,17 +273,20 @@ public class FieldInfos implements Iterable<FieldInfo> {
           throw new IllegalArgumentException("cannot change DocValues type from " + currentDVType + " to " + dvType + " for field \"" + fieldName + "\"");
         }
       }
-      if (dimensionCount != 0) {
+      if (dataDimensionCount != 0) {
         FieldDimensions dims = dimensions.get(fieldName);
         if (dims != null) {
-          if (dims.dimensionCount != dimensionCount) {
-            throw new IllegalArgumentException("cannot change point dimension count from " + dims.dimensionCount + " to " + dimensionCount + " for field=\"" + fieldName + "\"");
+          if (dims.dataDimensionCount != dataDimensionCount) {
+            throw new IllegalArgumentException("cannot change point data dimension count from " + dims.dataDimensionCount + " to " + dataDimensionCount + " for field=\"" + fieldName + "\"");
+          }
+          if (dims.indexDimensionCount != indexDimensionCount) {
+            throw new IllegalArgumentException("cannot change point index dimension count from " + dims.indexDimensionCount + " to " + indexDimensionCount + " for field=\"" + fieldName + "\"");
           }
           if (dims.dimensionNumBytes != dimensionNumBytes) {
             throw new IllegalArgumentException("cannot change point numBytes from " + dims.dimensionNumBytes + " to " + dimensionNumBytes + " for field=\"" + fieldName + "\"");
           }
         } else {
-          dimensions.put(fieldName, new FieldDimensions(dimensionCount, dimensionNumBytes));
+          dimensions.put(fieldName, new FieldDimensions(dataDimensionCount, indexDimensionCount, dimensionNumBytes));
         }
       }
       Integer fieldNumber = nameToNumber.get(fieldName);
@@ -341,7 +346,7 @@ public class FieldInfos implements Iterable<FieldInfo> {
       }
     }
 
-    synchronized void verifyConsistentDimensions(Integer number, String name, int dimensionCount, int dimensionNumBytes) {
+    synchronized void verifyConsistentDimensions(Integer number, String name, int dataDimensionCount, int indexDimensionCount, int dimensionNumBytes) {
       if (name.equals(numberToName.get(number)) == false) {
         throw new IllegalArgumentException("field number " + number + " is already mapped to field name \"" + numberToName.get(number) + "\", not \"" + name + "\"");
       }
@@ -350,8 +355,11 @@ public class FieldInfos implements Iterable<FieldInfo> {
       }
       FieldDimensions dim = dimensions.get(name);
       if (dim != null) {
-        if (dim.dimensionCount != dimensionCount) {
-          throw new IllegalArgumentException("cannot change point dimension count from " + dim.dimensionCount + " to " + dimensionCount + " for field=\"" + name + "\"");
+        if (dim.dataDimensionCount != dataDimensionCount) {
+          throw new IllegalArgumentException("cannot change point data dimension count from " + dim.dataDimensionCount + " to " + dataDimensionCount + " for field=\"" + name + "\"");
+        }
+        if (dim.indexDimensionCount != indexDimensionCount) {
+          throw new IllegalArgumentException("cannot change point index dimension count from " + dim.indexDimensionCount + " to " + indexDimensionCount + " for field=\"" + name + "\"");
         }
         if (dim.dimensionNumBytes != dimensionNumBytes) {
           throw new IllegalArgumentException("cannot change point numBytes from " + dim.dimensionNumBytes + " to " + dimensionNumBytes + " for field=\"" + name + "\"");
@@ -395,15 +403,18 @@ public class FieldInfos implements Iterable<FieldInfo> {
       docValuesType.put(name, dvType);
     }
 
-    synchronized void setDimensions(int number, String name, int dimensionCount, int dimensionNumBytes) {
+    synchronized void setDimensions(int number, String name, int dataDimensionCount, int indexDimensionCount, int dimensionNumBytes) {
       if (dimensionNumBytes > PointValues.MAX_NUM_BYTES) {
         throw new IllegalArgumentException("dimension numBytes must be <= PointValues.MAX_NUM_BYTES (= " + PointValues.MAX_NUM_BYTES + "); got " + dimensionNumBytes + " for field=\"" + name + "\"");
       }
-      if (dimensionCount > PointValues.MAX_DIMENSIONS) {
-        throw new IllegalArgumentException("pointDimensionCount must be <= PointValues.MAX_DIMENSIONS (= " + PointValues.MAX_DIMENSIONS + "); got " + dimensionCount + " for field=\"" + name + "\"");
+      if (dataDimensionCount > PointValues.MAX_DIMENSIONS) {
+        throw new IllegalArgumentException("pointDataDimensionCount must be <= PointValues.MAX_DIMENSIONS (= " + PointValues.MAX_DIMENSIONS + "); got " + dataDimensionCount + " for field=\"" + name + "\"");
+      }
+      if (indexDimensionCount > dataDimensionCount) {
+        throw new IllegalArgumentException("pointIndexDimensionCount must be <= pointDataDimensionCount (= " + dataDimensionCount + "); got " + indexDimensionCount + " for field=\"" + name + "\"");
       }
-      verifyConsistentDimensions(number, name, dimensionCount, dimensionNumBytes);
-      dimensions.put(name, new FieldDimensions(dimensionCount, dimensionNumBytes));
+      verifyConsistentDimensions(number, name, dataDimensionCount, indexDimensionCount, dimensionNumBytes);
+      dimensions.put(name, new FieldDimensions(dataDimensionCount, indexDimensionCount, dimensionNumBytes));
     }
   }
   
@@ -438,8 +449,8 @@ public class FieldInfos implements Iterable<FieldInfo> {
         // before then we'll get the same name and number,
         // else we'll allocate a new one:
         final boolean isSoftDeletesField = name.equals(globalFieldNumbers.softDeletesFieldName);
-        final int fieldNumber = globalFieldNumbers.addOrGet(name, -1, IndexOptions.NONE, DocValuesType.NONE, 0, 0, isSoftDeletesField);
-        fi = new FieldInfo(name, fieldNumber, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, new HashMap<>(), 0, 0, isSoftDeletesField);
+        final int fieldNumber = globalFieldNumbers.addOrGet(name, -1, IndexOptions.NONE, DocValuesType.NONE, 0, 0, 0, isSoftDeletesField);
+        fi = new FieldInfo(name, fieldNumber, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, new HashMap<>(), 0, 0, 0, isSoftDeletesField);
         assert !byName.containsKey(fi.name);
         globalFieldNumbers.verifyConsistent(Integer.valueOf(fi.number), fi.name, DocValuesType.NONE);
         byName.put(fi.name, fi);
@@ -452,7 +463,8 @@ public class FieldInfos implements Iterable<FieldInfo> {
                                           boolean storeTermVector,
                                           boolean omitNorms, boolean storePayloads, IndexOptions indexOptions,
                                           DocValuesType docValues, long dvGen,
-                                          int dimensionCount, int dimensionNumBytes, boolean isSoftDeletesField) {
+                                          int dataDimensionCount, int indexDimensionCount, int dimensionNumBytes,
+                                          boolean isSoftDeletesField) {
       assert assertNotFinished();
       if (docValues == null) {
         throw new NullPointerException("DocValuesType must not be null");
@@ -464,13 +476,13 @@ public class FieldInfos implements Iterable<FieldInfo> {
         // number for this field.  If the field was seen
         // before then we'll get the same name and number,
         // else we'll allocate a new one:
-        final int fieldNumber = globalFieldNumbers.addOrGet(name, preferredFieldNumber, indexOptions, docValues, dimensionCount, dimensionNumBytes, isSoftDeletesField);
-        fi = new FieldInfo(name, fieldNumber, storeTermVector, omitNorms, storePayloads, indexOptions, docValues, dvGen, new HashMap<>(), dimensionCount, dimensionNumBytes, isSoftDeletesField);
+        final int fieldNumber = globalFieldNumbers.addOrGet(name, preferredFieldNumber, indexOptions, docValues, dataDimensionCount, indexDimensionCount, dimensionNumBytes, isSoftDeletesField);
+        fi = new FieldInfo(name, fieldNumber, storeTermVector, omitNorms, storePayloads, indexOptions, docValues, dvGen, new HashMap<>(), dataDimensionCount, indexDimensionCount, dimensionNumBytes, isSoftDeletesField);
         assert !byName.containsKey(fi.name);
         globalFieldNumbers.verifyConsistent(Integer.valueOf(fi.number), fi.name, fi.getDocValuesType());
         byName.put(fi.name, fi);
       } else {
-        fi.update(storeTermVector, omitNorms, storePayloads, indexOptions, dimensionCount, dimensionNumBytes);
+        fi.update(storeTermVector, omitNorms, storePayloads, indexOptions, dataDimensionCount, indexDimensionCount, dimensionNumBytes);
 
         if (docValues != DocValuesType.NONE) {
           // Only pay the synchronization cost if fi does not already have a DVType
@@ -498,7 +510,8 @@ public class FieldInfos implements Iterable<FieldInfo> {
       return addOrUpdateInternal(fi.name, fi.number, fi.hasVectors(),
                                  fi.omitsNorms(), fi.hasPayloads(),
                                  fi.getIndexOptions(), fi.getDocValuesType(), dvGen,
-                                 fi.getPointDimensionCount(), fi.getPointNumBytes(), fi.isSoftDeletesField());
+                                 fi.getPointDataDimensionCount(), fi.getPointIndexDimensionCount(), fi.getPointNumBytes(),
+                                 fi.isSoftDeletesField());
     }
     
     public FieldInfo fieldInfo(String fieldName) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
index db6ef9f..34424af 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
@@ -991,7 +991,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable,
     for(SegmentCommitInfo info : segmentInfos) {
       FieldInfos fis = readFieldInfos(info);
       for(FieldInfo fi : fis) {
-        map.addOrGet(fi.name, fi.number, fi.getIndexOptions(), fi.getDocValuesType(), fi.getPointDimensionCount(), fi.getPointNumBytes(), fi.isSoftDeletesField());
+        map.addOrGet(fi.name, fi.number, fi.getIndexOptions(), fi.getDocValuesType(), fi.getPointDataDimensionCount(), fi.getPointIndexDimensionCount(), fi.getPointNumBytes(), fi.isSoftDeletesField());
       }
     }
 
@@ -1813,7 +1813,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable,
       if (globalFieldNumberMap.contains(f.name(), dvType) == false) {
         // if this field doesn't exists we try to add it. if it exists and the DV type doesn't match we
         // get a consistent error message as if you try to do that during an indexing operation.
-        globalFieldNumberMap.addOrGet(f.name(), -1, IndexOptions.NONE, dvType, 0, 0, f.name().equals(config.softDeletesField));
+        globalFieldNumberMap.addOrGet(f.name(), -1, IndexOptions.NONE, dvType, 0, 0, 0, f.name().equals(config.softDeletesField));
         assert globalFieldNumberMap.contains(f.name(), dvType);
       }
       if (config.getIndexSortFields().contains(f.name())) {
@@ -2849,7 +2849,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable,
             FieldInfos fis = readFieldInfos(info);
             for(FieldInfo fi : fis) {
               // This will throw exceptions if any of the incoming fields have an illegal schema change:
-              globalFieldNumberMap.addOrGet(fi.name, fi.number, fi.getIndexOptions(), fi.getDocValuesType(), fi.getPointDimensionCount(), fi.getPointNumBytes(), fi.isSoftDeletesField());
+              globalFieldNumberMap.addOrGet(fi.name, fi.number, fi.getIndexOptions(), fi.getDocValuesType(), fi.getPointDataDimensionCount(), fi.getPointIndexDimensionCount(), fi.getPointNumBytes(), fi.isSoftDeletesField());
             }
             infos.add(copySegmentAsIs(info, newSegName, context));
           }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/index/IndexableFieldType.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexableFieldType.java b/lucene/core/src/java/org/apache/lucene/index/IndexableFieldType.java
index a797cf8..b2b2e77 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexableFieldType.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexableFieldType.java
@@ -98,9 +98,14 @@ public interface IndexableFieldType {
   public DocValuesType docValuesType();
 
   /**
-   * If this is positive, the field is indexed as a point.
+   * If this is positive (representing the number of point data dimensions), the field is indexed as a point.
    */
-  public int pointDimensionCount();
+  public int pointDataDimensionCount();
+
+  /**
+   * The number of dimensions used for the index key
+   */
+  public int pointIndexDimensionCount();
 
   /**
    * The number of bytes in each dimension's values.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/index/PointValues.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/PointValues.java b/lucene/core/src/java/org/apache/lucene/index/PointValues.java
index df433d2..51b12d1 100644
--- a/lucene/core/src/java/org/apache/lucene/index/PointValues.java
+++ b/lucene/core/src/java/org/apache/lucene/index/PointValues.java
@@ -136,7 +136,7 @@ public abstract class PointValues {
       if (minValue == null) {
         minValue = leafMinValue.clone();
       } else {
-        final int numDimensions = values.getNumDimensions();
+        final int numDimensions = values.getNumIndexDimensions();
         final int numBytesPerDimension = values.getBytesPerDimension();
         for (int i = 0; i < numDimensions; ++i) {
           int offset = i * numBytesPerDimension;
@@ -167,7 +167,7 @@ public abstract class PointValues {
       if (maxValue == null) {
         maxValue = leafMaxValue.clone();
       } else {
-        final int numDimensions = values.getNumDimensions();
+        final int numDimensions = values.getNumIndexDimensions();
         final int numBytesPerDimension = values.getBytesPerDimension();
         for (int i = 0; i < numDimensions; ++i) {
           int offset = i * numBytesPerDimension;
@@ -233,8 +233,11 @@ public abstract class PointValues {
   /** Returns maximum value for each dimension, packed, or null if {@link #size} is <code>0</code> */
   public abstract byte[] getMaxPackedValue() throws IOException;
 
-  /** Returns how many dimensions were indexed */
-  public abstract int getNumDimensions() throws IOException;
+  /** Returns how many data dimensions are represented in the values */
+  public abstract int getNumDataDimensions() throws IOException;
+
+  /** Returns how many dimensions are used for the index */
+  public abstract int getNumIndexDimensions() throws IOException;
 
   /** Returns the number of bytes per dimension */
   public abstract int getBytesPerDimension() throws IOException;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/index/PointValuesWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/PointValuesWriter.java b/lucene/core/src/java/org/apache/lucene/index/PointValuesWriter.java
index 4aaf095..eb11ec3 100644
--- a/lucene/core/src/java/org/apache/lucene/index/PointValuesWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/PointValuesWriter.java
@@ -43,7 +43,7 @@ class PointValuesWriter {
     this.bytes = new ByteBlockPool(docWriter.byteBlockAllocator);
     docIDs = new int[16];
     iwBytesUsed.addAndGet(16 * Integer.BYTES);
-    packedBytesLength = fieldInfo.getPointDimensionCount() * fieldInfo.getPointNumBytes();
+    packedBytesLength = fieldInfo.getPointDataDimensionCount() * fieldInfo.getPointNumBytes();
   }
 
   // TODO: if exactly the same value is added to exactly the same doc, should we dedup?
@@ -52,7 +52,7 @@ class PointValuesWriter {
       throw new IllegalArgumentException("field=" + fieldInfo.name + ": point value must not be null");
     }
     if (value.length != packedBytesLength) {
-      throw new IllegalArgumentException("field=" + fieldInfo.name + ": this field's value has length=" + value.length + " but should be " + (fieldInfo.getPointDimensionCount() * fieldInfo.getPointNumBytes()));
+      throw new IllegalArgumentException("field=" + fieldInfo.name + ": this field's value has length=" + value.length + " but should be " + (fieldInfo.getPointDataDimensionCount() * fieldInfo.getPointNumBytes()));
     }
 
     if (docIDs.length == numPoints) {
@@ -106,7 +106,12 @@ class PointValuesWriter {
       }
 
       @Override
-      public int getNumDimensions() {
+      public int getNumDataDimensions() {
+        throw new UnsupportedOperationException();
+      }
+
+      @Override
+      public int getNumIndexDimensions() {
         throw new UnsupportedOperationException();
       }
 
@@ -229,8 +234,13 @@ class PointValuesWriter {
     }
 
     @Override
-    public int getNumDimensions() throws IOException {
-      return in.getNumDimensions();
+    public int getNumDataDimensions() throws IOException {
+      return in.getNumDataDimensions();
+    }
+
+    @Override
+    public int getNumIndexDimensions() throws IOException {
+      return in.getNumIndexDimensions();
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java b/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java
index 55e4d20..d9ed94b 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java
@@ -344,8 +344,13 @@ class SortingLeafReader extends FilterLeafReader {
     }
 
     @Override
-    public int getNumDimensions() throws IOException {
-      return in.getNumDimensions();
+    public int getNumDataDimensions() throws IOException {
+      return in.getNumDataDimensions();
+    }
+
+    @Override
+    public int getNumIndexDimensions() throws IOException {
+      return in.getNumIndexDimensions();
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/search/PointInSetQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/PointInSetQuery.java b/lucene/core/src/java/org/apache/lucene/search/PointInSetQuery.java
index eca38a3..487bdfb 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PointInSetQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PointInSetQuery.java
@@ -123,8 +123,8 @@ public abstract class PointInSetQuery extends Query {
           return null;
         }
 
-        if (values.getNumDimensions() != numDims) {
-          throw new IllegalArgumentException("field=\"" + field + "\" was indexed with numDims=" + values.getNumDimensions() + " but this query has numDims=" + numDims);
+        if (values.getNumIndexDimensions() != numDims) {
+          throw new IllegalArgumentException("field=\"" + field + "\" was indexed with numIndexDims=" + values.getNumIndexDimensions() + " but this query has numIndexDims=" + numDims);
         }
         if (values.getBytesPerDimension() != bytesPerDim) {
           throw new IllegalArgumentException("field=\"" + field + "\" was indexed with bytesPerDim=" + values.getBytesPerDimension() + " but this query has bytesPerDim=" + bytesPerDim);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java b/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
index 031ca38..688a31f 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
@@ -234,8 +234,8 @@ public abstract class PointRangeQuery extends Query {
           return null;
         }
 
-        if (values.getNumDimensions() != numDims) {
-          throw new IllegalArgumentException("field=\"" + field + "\" was indexed with numDims=" + values.getNumDimensions() + " but this query has numDims=" + numDims);
+        if (values.getNumIndexDimensions() != numDims) {
+          throw new IllegalArgumentException("field=\"" + field + "\" was indexed with numIndexDimensions=" + values.getNumIndexDimensions() + " but this query has numDims=" + numDims);
         }
         if (bytesPerDim != values.getBytesPerDimension()) {
           throw new IllegalArgumentException("field=\"" + field + "\" was indexed with bytesPerDim=" + values.getBytesPerDimension() + " but this query has bytesPerDim=" + bytesPerDim);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
index 00a1d7d..3cbb054 100644
--- a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
+++ b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
@@ -35,7 +35,8 @@ import org.apache.lucene.util.MathUtil;
 public final class BKDReader extends PointValues implements Accountable {
   // Packed array of byte[] holding all split values in the full binary tree:
   final int leafNodeOffset;
-  final int numDims;
+  final int numDataDims;
+  final int numIndexDims;
   final int bytesPerDim;
   final int numLeaves;
   final IndexInput in;
@@ -46,29 +47,36 @@ public final class BKDReader extends PointValues implements Accountable {
   final int docCount;
   final int version;
   protected final int packedBytesLength;
+  protected final int packedIndexBytesLength;
 
   final byte[] packedIndex;
 
   /** Caller must pre-seek the provided {@link IndexInput} to the index location that {@link BKDWriter#finish} returned */
   public BKDReader(IndexInput in) throws IOException {
     version = CodecUtil.checkHeader(in, BKDWriter.CODEC_NAME, BKDWriter.VERSION_START, BKDWriter.VERSION_CURRENT);
-    numDims = in.readVInt();
+    numDataDims = in.readVInt();
+    if (version >= BKDWriter.VERSION_SELECTIVE_INDEXING) {
+      numIndexDims = in.readVInt();
+    } else {
+      numIndexDims = numDataDims;
+    }
     maxPointsInLeafNode = in.readVInt();
     bytesPerDim = in.readVInt();
-    packedBytesLength = numDims * bytesPerDim;
+    packedBytesLength = numDataDims * bytesPerDim;
+    packedIndexBytesLength = numIndexDims * bytesPerDim;
 
     // Read index:
     numLeaves = in.readVInt();
     assert numLeaves > 0;
     leafNodeOffset = numLeaves;
 
-    minPackedValue = new byte[packedBytesLength];
-    maxPackedValue = new byte[packedBytesLength];
+    minPackedValue = new byte[packedIndexBytesLength];
+    maxPackedValue = new byte[packedIndexBytesLength];
 
-    in.readBytes(minPackedValue, 0, packedBytesLength);
-    in.readBytes(maxPackedValue, 0, packedBytesLength);
+    in.readBytes(minPackedValue, 0, packedIndexBytesLength);
+    in.readBytes(maxPackedValue, 0, packedIndexBytesLength);
 
-    for(int dim=0;dim<numDims;dim++) {
+    for(int dim=0;dim<numIndexDims;dim++) {
       if (FutureArrays.compareUnsigned(minPackedValue, dim * bytesPerDim, dim * bytesPerDim + bytesPerDim, maxPackedValue, dim * bytesPerDim, dim * bytesPerDim + bytesPerDim) > 0) {
         throw new CorruptIndexException("minPackedValue " + new BytesRef(minPackedValue) + " is > maxPackedValue " + new BytesRef(maxPackedValue) + " for dim=" + dim, in);
       }
@@ -122,16 +130,16 @@ public final class BKDReader extends PointValues implements Accountable {
       splitPackedValueStack = new byte[treeDepth+1][];
       nodeID = 1;
       level = 1;
-      splitPackedValueStack[level] = new byte[packedBytesLength];
+      splitPackedValueStack[level] = new byte[packedIndexBytesLength];
       leafBlockFPStack = new long[treeDepth+1];
       leftNodePositions = new int[treeDepth+1];
       rightNodePositions = new int[treeDepth+1];
       splitValuesStack = new byte[treeDepth+1][];
       splitDims = new int[treeDepth+1];
-      negativeDeltas = new boolean[numDims*(treeDepth+1)];
+      negativeDeltas = new boolean[numIndexDims*(treeDepth+1)];
 
       in = new ByteArrayDataInput(packedIndex);
-      splitValuesStack[0] = new byte[packedBytesLength];
+      splitValuesStack[0] = new byte[packedIndexBytesLength];
       readNodeData(false);
       scratch = new BytesRef();
       scratch.length = bytesPerDim;
@@ -142,11 +150,11 @@ public final class BKDReader extends PointValues implements Accountable {
       nodeID *= 2;
       level++;
       if (splitPackedValueStack[level] == null) {
-        splitPackedValueStack[level] = new byte[packedBytesLength];
+        splitPackedValueStack[level] = new byte[packedIndexBytesLength];
       }
-      System.arraycopy(negativeDeltas, (level-1)*numDims, negativeDeltas, level*numDims, numDims);
+      System.arraycopy(negativeDeltas, (level-1)*numIndexDims, negativeDeltas, level*numIndexDims, numIndexDims);
       assert splitDim != -1;
-      negativeDeltas[level*numDims+splitDim] = true;
+      negativeDeltas[level*numIndexDims+splitDim] = true;
       in.setPosition(nodePosition);
       readNodeData(true);
     }
@@ -162,7 +170,7 @@ public final class BKDReader extends PointValues implements Accountable {
       index.leftNodePositions[level] = leftNodePositions[level];
       index.rightNodePositions[level] = rightNodePositions[level];
       index.splitValuesStack[index.level] = splitValuesStack[index.level].clone();
-      System.arraycopy(negativeDeltas, level*numDims, index.negativeDeltas, level*numDims, numDims);
+      System.arraycopy(negativeDeltas, level*numIndexDims, index.negativeDeltas, level*numIndexDims, numIndexDims);
       index.splitDims[level] = splitDims[level];
       return index;
     }
@@ -172,11 +180,11 @@ public final class BKDReader extends PointValues implements Accountable {
       nodeID = nodeID * 2 + 1;
       level++;
       if (splitPackedValueStack[level] == null) {
-        splitPackedValueStack[level] = new byte[packedBytesLength];
+        splitPackedValueStack[level] = new byte[packedIndexBytesLength];
       }
-      System.arraycopy(negativeDeltas, (level-1)*numDims, negativeDeltas, level*numDims, numDims);
+      System.arraycopy(negativeDeltas, (level-1)*numIndexDims, negativeDeltas, level*numIndexDims, numIndexDims);
       assert splitDim != -1;
-      negativeDeltas[level*numDims+splitDim] = false;
+      negativeDeltas[level*numIndexDims+splitDim] = false;
       in.setPosition(nodePosition);
       readNodeData(false);
     }
@@ -276,19 +284,19 @@ public final class BKDReader extends PointValues implements Accountable {
 
         // read split dim, prefix, firstDiffByteDelta encoded as int:
         int code = in.readVInt();
-        splitDim = code % numDims;
+        splitDim = code % numIndexDims;
         splitDims[level] = splitDim;
-        code /= numDims;
+        code /= numIndexDims;
         int prefix = code % (1+bytesPerDim);
         int suffix = bytesPerDim - prefix;
 
         if (splitValuesStack[level] == null) {
-          splitValuesStack[level] = new byte[packedBytesLength];
+          splitValuesStack[level] = new byte[packedIndexBytesLength];
         }
-        System.arraycopy(splitValuesStack[level-1], 0, splitValuesStack[level], 0, packedBytesLength);
+        System.arraycopy(splitValuesStack[level-1], 0, splitValuesStack[level], 0, packedIndexBytesLength);
         if (suffix > 0) {
           int firstDiffByteDelta = code / (1+bytesPerDim);
-          if (negativeDeltas[level*numDims + splitDim]) {
+          if (negativeDeltas[level*numIndexDims + splitDim]) {
             firstDiffByteDelta = -firstDiffByteDelta;
           }
           int oldByte = splitValuesStack[level][splitDim*bytesPerDim+prefix] & 0xFF;
@@ -325,7 +333,7 @@ public final class BKDReader extends PointValues implements Accountable {
   public static final class IntersectState {
     final IndexInput in;
     final int[] scratchDocIDs;
-    final byte[] scratchPackedValue1, scratchPackedValue2;
+    final byte[] scratchDataPackedValue, scratchMinIndexPackedValue, scratchMaxIndexPackedValue;
     final int[] commonPrefixLengths;
 
     final IntersectVisitor visitor;
@@ -333,6 +341,7 @@ public final class BKDReader extends PointValues implements Accountable {
 
     public IntersectState(IndexInput in, int numDims,
                           int packedBytesLength,
+                          int packedIndexBytesLength,
                           int maxPointsInLeafNode,
                           IntersectVisitor visitor,
                           IndexTree indexVisitor) {
@@ -340,8 +349,9 @@ public final class BKDReader extends PointValues implements Accountable {
       this.visitor = visitor;
       this.commonPrefixLengths = new int[numDims];
       this.scratchDocIDs = new int[maxPointsInLeafNode];
-      this.scratchPackedValue1 = new byte[packedBytesLength];
-      this.scratchPackedValue2 = new byte[packedBytesLength];
+      this.scratchDataPackedValue = new byte[packedBytesLength];
+      this.scratchMinIndexPackedValue = new byte[packedIndexBytesLength];
+      this.scratchMaxIndexPackedValue = new byte[packedIndexBytesLength];
       this.index = indexVisitor;
     }
   }
@@ -389,8 +399,9 @@ public final class BKDReader extends PointValues implements Accountable {
   /** Create a new {@link IntersectState} */
   public IntersectState getIntersectState(IntersectVisitor visitor) {
     IndexTree index = new IndexTree();
-    return new IntersectState(in.clone(), numDims,
+    return new IntersectState(in.clone(), numDataDims,
                               packedBytesLength,
+                              packedIndexBytesLength,
                               maxPointsInLeafNode,
                               visitor,
                               index);
@@ -403,7 +414,7 @@ public final class BKDReader extends PointValues implements Accountable {
     int count = readDocIDs(state.in, index.getLeafBlockFP(), state.scratchDocIDs);
 
     // Again, this time reading values and checking with the visitor
-    visitDocValues(state.commonPrefixLengths, state.scratchPackedValue1, state.scratchPackedValue2, state.in, state.scratchDocIDs, count, state.visitor);
+    visitDocValues(state.commonPrefixLengths, state.scratchDataPackedValue, state.scratchMinIndexPackedValue, state.scratchMaxIndexPackedValue, state.in, state.scratchDocIDs, count, state.visitor);
   }
 
   private void visitDocIDs(IndexInput in, long blockFP, IntersectVisitor visitor) throws IOException {
@@ -428,17 +439,19 @@ public final class BKDReader extends PointValues implements Accountable {
     return count;
   }
 
-  void visitDocValues(int[] commonPrefixLengths, byte[] scratchPackedValue1, byte[] scratchPackedValue2, IndexInput in, int[] docIDs, int count, IntersectVisitor visitor) throws IOException {
+  void visitDocValues(int[] commonPrefixLengths, byte[] scratchDataPackedValue, byte[] scratchMinIndexPackedValue, byte[] scratchMaxIndexPackedValue,
+                      IndexInput in, int[] docIDs, int count, IntersectVisitor visitor) throws IOException {
 
 
-    readCommonPrefixes(commonPrefixLengths, scratchPackedValue1, in);
+    readCommonPrefixes(commonPrefixLengths, scratchDataPackedValue, in);
 
-    if (numDims != 1 && version >= BKDWriter.VERSION_LEAF_STORES_BOUNDS) {
-      byte[] minPackedValue = scratchPackedValue1;
-      byte[] maxPackedValue = scratchPackedValue2;
+    if (numIndexDims != 1 && version >= BKDWriter.VERSION_LEAF_STORES_BOUNDS) {
+      byte[] minPackedValue = scratchMinIndexPackedValue;
+      System.arraycopy(scratchDataPackedValue, 0, minPackedValue, 0, packedIndexBytesLength);
+      byte[] maxPackedValue = scratchMaxIndexPackedValue;
       //Copy common prefixes before reading adjusted
       // box
-      System.arraycopy(minPackedValue, 0, maxPackedValue, 0, packedBytesLength);
+      System.arraycopy(minPackedValue, 0, maxPackedValue, 0, packedIndexBytesLength);
       readMinMax(commonPrefixLengths, minPackedValue, maxPackedValue, in);
 
       // The index gives us range of values for each dimension, but the actual range of values
@@ -467,24 +480,24 @@ public final class BKDReader extends PointValues implements Accountable {
     int compressedDim = readCompressedDim(in);
 
     if (compressedDim == -1) {
-      visitRawDocValues(commonPrefixLengths, scratchPackedValue1, in, docIDs, count, visitor);
+      visitRawDocValues(commonPrefixLengths, scratchDataPackedValue, in, docIDs, count, visitor);
     } else {
-      visitCompressedDocValues(commonPrefixLengths, scratchPackedValue1, in, docIDs, count, visitor, compressedDim);
+      visitCompressedDocValues(commonPrefixLengths, scratchDataPackedValue, in, docIDs, count, visitor, compressedDim);
     }
   }
 
-    private void readMinMax(int[] commonPrefixLengths, byte[] minPackedValue, byte[] maxPackedValue, IndexInput in) throws IOException {
-      for (int dim = 0; dim < numDims; dim++) {
-        int prefix = commonPrefixLengths[dim];
-        in.readBytes(minPackedValue, dim * bytesPerDim + prefix, bytesPerDim - prefix);
-        in.readBytes(maxPackedValue, dim * bytesPerDim + prefix, bytesPerDim - prefix);
-      }
+  private void readMinMax(int[] commonPrefixLengths, byte[] minPackedValue, byte[] maxPackedValue, IndexInput in) throws IOException {
+    for (int dim = 0; dim < numIndexDims; dim++) {
+      int prefix = commonPrefixLengths[dim];
+      in.readBytes(minPackedValue, dim * bytesPerDim + prefix, bytesPerDim - prefix);
+      in.readBytes(maxPackedValue, dim * bytesPerDim + prefix, bytesPerDim - prefix);
     }
+  }
 
   // Just read suffixes for every dimension
   private void visitRawDocValues(int[] commonPrefixLengths, byte[] scratchPackedValue, IndexInput in, int[] docIDs, int count, IntersectVisitor visitor) throws IOException {
     for (int i = 0; i < count; ++i) {
-      for(int dim=0;dim<numDims;dim++) {
+      for(int dim=0;dim<numDataDims;dim++) {
         int prefix = commonPrefixLengths[dim];
         in.readBytes(scratchPackedValue, dim*bytesPerDim + prefix, bytesPerDim - prefix);
       }
@@ -502,7 +515,7 @@ public final class BKDReader extends PointValues implements Accountable {
       scratchPackedValue[compressedByteOffset] = in.readByte();
       final int runLen = Byte.toUnsignedInt(in.readByte());
       for (int j = 0; j < runLen; ++j) {
-        for(int dim=0;dim<numDims;dim++) {
+        for(int dim=0;dim<numDataDims;dim++) {
           int prefix = commonPrefixLengths[dim];
           in.readBytes(scratchPackedValue, dim*bytesPerDim + prefix, bytesPerDim - prefix);
         }
@@ -517,14 +530,14 @@ public final class BKDReader extends PointValues implements Accountable {
 
   private int readCompressedDim(IndexInput in) throws IOException {
     int compressedDim = in.readByte();
-    if (compressedDim < -1 || compressedDim >= numDims) {
+    if (compressedDim < -1 || compressedDim >= numDataDims) {
       throw new CorruptIndexException("Got compressedDim="+compressedDim, in);
     }
     return compressedDim;
   }
 
   private void readCommonPrefixes(int[] commonPrefixLengths, byte[] scratchPackedValue, IndexInput in) throws IOException {
-    for(int dim=0;dim<numDims;dim++) {
+    for(int dim=0;dim<numDataDims;dim++) {
       int prefix = in.readVInt();
       commonPrefixLengths[dim] = prefix;
       if (prefix > 0) {
@@ -561,15 +574,15 @@ public final class BKDReader extends PointValues implements Accountable {
         int count = readDocIDs(state.in, state.index.getLeafBlockFP(), state.scratchDocIDs);
 
         // Again, this time reading values and checking with the visitor
-        visitDocValues(state.commonPrefixLengths, state.scratchPackedValue1, state.scratchPackedValue2, state.in, state.scratchDocIDs, count, state.visitor);
+        visitDocValues(state.commonPrefixLengths, state.scratchDataPackedValue, state.scratchMinIndexPackedValue, state.scratchMaxIndexPackedValue, state.in, state.scratchDocIDs, count, state.visitor);
       }
 
     } else {
       
       // Non-leaf node: recurse on the split left and right nodes
       int splitDim = state.index.getSplitDim();
-      assert splitDim >= 0: "splitDim=" + splitDim;
-      assert splitDim < numDims;
+      assert splitDim >= 0: "splitDim=" + splitDim + ", numIndexDims=" + numIndexDims;
+      assert splitDim < numIndexDims: "splitDim=" + splitDim + ", numIndexDims=" + numIndexDims;
 
       byte[] splitPackedValue = state.index.getSplitPackedValue();
       BytesRef splitDimValue = state.index.getSplitDimValue();
@@ -577,11 +590,11 @@ public final class BKDReader extends PointValues implements Accountable {
       //System.out.println("  splitDimValue=" + splitDimValue + " splitDim=" + splitDim);
 
       // make sure cellMin <= splitValue <= cellMax:
-      assert FutureArrays.compareUnsigned(cellMinPacked, splitDim * bytesPerDim, splitDim * bytesPerDim + bytesPerDim, splitDimValue.bytes, splitDimValue.offset, splitDimValue.offset + bytesPerDim) <= 0: "bytesPerDim=" + bytesPerDim + " splitDim=" + splitDim + " numDims=" + numDims;
-      assert FutureArrays.compareUnsigned(cellMaxPacked, splitDim * bytesPerDim, splitDim * bytesPerDim + bytesPerDim, splitDimValue.bytes, splitDimValue.offset, splitDimValue.offset + bytesPerDim) >= 0: "bytesPerDim=" + bytesPerDim + " splitDim=" + splitDim + " numDims=" + numDims;
+      assert FutureArrays.compareUnsigned(cellMinPacked, splitDim * bytesPerDim, splitDim * bytesPerDim + bytesPerDim, splitDimValue.bytes, splitDimValue.offset, splitDimValue.offset + bytesPerDim) <= 0: "bytesPerDim=" + bytesPerDim + " splitDim=" + splitDim + " numIndexDims=" + numIndexDims + " numDataDims=" + numDataDims;
+      assert FutureArrays.compareUnsigned(cellMaxPacked, splitDim * bytesPerDim, splitDim * bytesPerDim + bytesPerDim, splitDimValue.bytes, splitDimValue.offset, splitDimValue.offset + bytesPerDim) >= 0: "bytesPerDim=" + bytesPerDim + " splitDim=" + splitDim + " numIndexDims=" + numIndexDims + " numDataDims=" + numDataDims;
 
       // Recurse on left sub-tree:
-      System.arraycopy(cellMaxPacked, 0, splitPackedValue, 0, packedBytesLength);
+      System.arraycopy(cellMaxPacked, 0, splitPackedValue, 0, packedIndexBytesLength);
       System.arraycopy(splitDimValue.bytes, splitDimValue.offset, splitPackedValue, splitDim*bytesPerDim, bytesPerDim);
       state.index.pushLeft();
       intersect(state, cellMinPacked, splitPackedValue);
@@ -591,7 +604,7 @@ public final class BKDReader extends PointValues implements Accountable {
       System.arraycopy(splitPackedValue, splitDim*bytesPerDim, splitDimValue.bytes, splitDimValue.offset, bytesPerDim);
 
       // Recurse on right sub-tree:
-      System.arraycopy(cellMinPacked, 0, splitPackedValue, 0, packedBytesLength);
+      System.arraycopy(cellMinPacked, 0, splitPackedValue, 0, packedIndexBytesLength);
       System.arraycopy(splitDimValue.bytes, splitDimValue.offset, splitPackedValue, splitDim*bytesPerDim, bytesPerDim);
       state.index.pushRight();
       intersect(state, splitPackedValue, cellMaxPacked);
@@ -622,8 +635,8 @@ public final class BKDReader extends PointValues implements Accountable {
       
       // Non-leaf node: recurse on the split left and right nodes
       int splitDim = state.index.getSplitDim();
-      assert splitDim >= 0: "splitDim=" + splitDim;
-      assert splitDim < numDims;
+      assert splitDim >= 0: "splitDim=" + splitDim + ", numIndexDims=" + numIndexDims;
+      assert splitDim < numIndexDims: "splitDim=" + splitDim + ", numIndexDims=" + numIndexDims;
 
       byte[] splitPackedValue = state.index.getSplitPackedValue();
       BytesRef splitDimValue = state.index.getSplitDimValue();
@@ -631,11 +644,11 @@ public final class BKDReader extends PointValues implements Accountable {
       //System.out.println("  splitDimValue=" + splitDimValue + " splitDim=" + splitDim);
 
       // make sure cellMin <= splitValue <= cellMax:
-      assert FutureArrays.compareUnsigned(cellMinPacked, splitDim * bytesPerDim, splitDim * bytesPerDim + bytesPerDim, splitDimValue.bytes, splitDimValue.offset, splitDimValue.offset + bytesPerDim) <= 0: "bytesPerDim=" + bytesPerDim + " splitDim=" + splitDim + " numDims=" + numDims;
-      assert FutureArrays.compareUnsigned(cellMaxPacked, splitDim * bytesPerDim, splitDim * bytesPerDim + bytesPerDim, splitDimValue.bytes, splitDimValue.offset, splitDimValue.offset + bytesPerDim) >= 0: "bytesPerDim=" + bytesPerDim + " splitDim=" + splitDim + " numDims=" + numDims;
+      assert FutureArrays.compareUnsigned(cellMinPacked, splitDim * bytesPerDim, splitDim * bytesPerDim + bytesPerDim, splitDimValue.bytes, splitDimValue.offset, splitDimValue.offset + bytesPerDim) <= 0: "bytesPerDim=" + bytesPerDim + " splitDim=" + splitDim + " numIndexDims=" + numIndexDims + " numDataDims=" + numDataDims;
+      assert FutureArrays.compareUnsigned(cellMaxPacked, splitDim * bytesPerDim, splitDim * bytesPerDim + bytesPerDim, splitDimValue.bytes, splitDimValue.offset, splitDimValue.offset + bytesPerDim) >= 0: "bytesPerDim=" + bytesPerDim + " splitDim=" + splitDim + " numIndexDims=" + numIndexDims + " numDataDims=" + numDataDims;
 
       // Recurse on left sub-tree:
-      System.arraycopy(cellMaxPacked, 0, splitPackedValue, 0, packedBytesLength);
+      System.arraycopy(cellMaxPacked, 0, splitPackedValue, 0, packedIndexBytesLength);
       System.arraycopy(splitDimValue.bytes, splitDimValue.offset, splitPackedValue, splitDim*bytesPerDim, bytesPerDim);
       state.index.pushLeft();
       final long leftCost = estimatePointCount(state, cellMinPacked, splitPackedValue);
@@ -645,7 +658,7 @@ public final class BKDReader extends PointValues implements Accountable {
       System.arraycopy(splitPackedValue, splitDim*bytesPerDim, splitDimValue.bytes, splitDimValue.offset, bytesPerDim);
 
       // Recurse on right sub-tree:
-      System.arraycopy(cellMinPacked, 0, splitPackedValue, 0, packedBytesLength);
+      System.arraycopy(cellMinPacked, 0, splitPackedValue, 0, packedIndexBytesLength);
       System.arraycopy(splitDimValue.bytes, splitDimValue.offset, splitPackedValue, splitDim*bytesPerDim, bytesPerDim);
       state.index.pushRight();
       final long rightCost = estimatePointCount(state, splitPackedValue, cellMaxPacked);
@@ -670,8 +683,13 @@ public final class BKDReader extends PointValues implements Accountable {
   }
 
   @Override
-  public int getNumDimensions() {
-    return numDims;
+  public int getNumDataDimensions() {
+    return numDataDims;
+  }
+
+  @Override
+  public int getNumIndexDimensions() {
+    return numIndexDims;
   }
 
   @Override


[04/50] [abbrv] lucene-solr:jira/http2: LUCENE-8496: Selective indexing - modify BKDReader/BKDWriter to allow users to select a fewer number of dimensions to be used for creating the index than the total number of dimensions used for field encoding. i.e.

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
index 014d470..d7db94b 100644
--- a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
@@ -86,7 +86,8 @@ public class BKDWriter implements Closeable {
   public static final int VERSION_START = 4; // version used by Lucene 7.0
   //public static final int VERSION_CURRENT = VERSION_START;
   public static final int VERSION_LEAF_STORES_BOUNDS = 5;
-  public static final int VERSION_CURRENT = VERSION_LEAF_STORES_BOUNDS;
+  public static final int VERSION_SELECTIVE_INDEXING = 6;
+  public static final int VERSION_CURRENT = VERSION_SELECTIVE_INDEXING;
 
   /** How many bytes each docs takes in the fixed-width offline format */
   private final int bytesPerDoc;
@@ -100,15 +101,21 @@ public class BKDWriter implements Closeable {
   /** Maximum number of dimensions */
   public static final int MAX_DIMS = 8;
 
-  /** How many dimensions we are indexing */
-  protected final int numDims;
+  /** How many dimensions we are storing at the leaf (data) nodes */
+  protected final int numDataDims;
+
+  /** How many dimensions we are indexing in the internal nodes */
+  protected final int numIndexDims;
 
   /** How many bytes each value in each dimension takes. */
   protected final int bytesPerDim;
 
-  /** numDims * bytesPerDim */
+  /** numDataDims * bytesPerDim */
   protected final int packedBytesLength;
 
+  /** numIndexDims * bytesPerDim */
+  protected final int packedIndexBytesLength;
+
   final TrackingDirectoryWrapper tempDir;
   final String tempFileNamePrefix;
   final double maxMBSortInHeap;
@@ -154,37 +161,39 @@ public class BKDWriter implements Closeable {
 
   private final int maxDoc;
 
-  public BKDWriter(int maxDoc, Directory tempDir, String tempFileNamePrefix, int numDims, int bytesPerDim,
+  public BKDWriter(int maxDoc, Directory tempDir, String tempFileNamePrefix, int numDataDims, int numIndexDims, int bytesPerDim,
                    int maxPointsInLeafNode, double maxMBSortInHeap, long totalPointCount, boolean singleValuePerDoc) throws IOException {
-    this(maxDoc, tempDir, tempFileNamePrefix, numDims, bytesPerDim, maxPointsInLeafNode, maxMBSortInHeap, totalPointCount, singleValuePerDoc,
+    this(maxDoc, tempDir, tempFileNamePrefix, numDataDims, numIndexDims, bytesPerDim, maxPointsInLeafNode, maxMBSortInHeap, totalPointCount, singleValuePerDoc,
          totalPointCount > Integer.MAX_VALUE, Math.max(1, (long) maxMBSortInHeap), OfflineSorter.MAX_TEMPFILES);
   }
 
-  protected BKDWriter(int maxDoc, Directory tempDir, String tempFileNamePrefix, int numDims, int bytesPerDim,
+  protected BKDWriter(int maxDoc, Directory tempDir, String tempFileNamePrefix, int numDataDims, int numIndexDims, int bytesPerDim,
                       int maxPointsInLeafNode, double maxMBSortInHeap, long totalPointCount,
                       boolean singleValuePerDoc, boolean longOrds, long offlineSorterBufferMB, int offlineSorterMaxTempFiles) throws IOException {
-    verifyParams(numDims, maxPointsInLeafNode, maxMBSortInHeap, totalPointCount);
+    verifyParams(numDataDims, numIndexDims, maxPointsInLeafNode, maxMBSortInHeap, totalPointCount);
     // We use tracking dir to deal with removing files on exception, so each place that
     // creates temp files doesn't need crazy try/finally/sucess logic:
     this.tempDir = new TrackingDirectoryWrapper(tempDir);
     this.tempFileNamePrefix = tempFileNamePrefix;
     this.maxPointsInLeafNode = maxPointsInLeafNode;
-    this.numDims = numDims;
+    this.numDataDims = numDataDims;
+    this.numIndexDims = numIndexDims;
     this.bytesPerDim = bytesPerDim;
     this.totalPointCount = totalPointCount;
     this.maxDoc = maxDoc;
     this.offlineSorterBufferMB = OfflineSorter.BufferSize.megabytes(offlineSorterBufferMB);
     this.offlineSorterMaxTempFiles = offlineSorterMaxTempFiles;
     docsSeen = new FixedBitSet(maxDoc);
-    packedBytesLength = numDims * bytesPerDim;
+    packedBytesLength = numDataDims * bytesPerDim;
+    packedIndexBytesLength = numIndexDims * bytesPerDim;
 
     scratchDiff = new byte[bytesPerDim];
     scratch1 = new byte[packedBytesLength];
     scratch2 = new byte[packedBytesLength];
-    commonPrefixLengths = new int[numDims];
+    commonPrefixLengths = new int[numDataDims];
 
-    minPackedValue = new byte[packedBytesLength];
-    maxPackedValue = new byte[packedBytesLength];
+    minPackedValue = new byte[packedIndexBytesLength];
+    maxPackedValue = new byte[packedIndexBytesLength];
 
     // If we may have more than 1+Integer.MAX_VALUE values, then we must encode ords with long (8 bytes), else we can use int (4 bytes).
     this.longOrds = longOrds;
@@ -211,7 +220,7 @@ public class BKDWriter implements Closeable {
     // bytes to points here.  Each dimension has its own sorted partition, so
     // we must divide by numDims as wel.
 
-    maxPointsSortInHeap = (int) (0.5 * (maxMBSortInHeap * 1024 * 1024) / (bytesPerDoc * numDims));
+    maxPointsSortInHeap = (int) (0.5 * (maxMBSortInHeap * 1024 * 1024) / (bytesPerDoc * numDataDims));
 
     // Finally, we must be able to hold at least the leaf node in heap during build:
     if (maxPointsSortInHeap < maxPointsInLeafNode) {
@@ -224,11 +233,14 @@ public class BKDWriter implements Closeable {
     this.maxMBSortInHeap = maxMBSortInHeap;
   }
 
-  public static void verifyParams(int numDims, int maxPointsInLeafNode, double maxMBSortInHeap, long totalPointCount) {
+  public static void verifyParams(int numDataDims, int numIndexDims, int maxPointsInLeafNode, double maxMBSortInHeap, long totalPointCount) {
     // We encode dim in a single byte in the splitPackedValues, but we only expose 4 bits for it now, in case we want to use
     // remaining 4 bits for another purpose later
-    if (numDims < 1 || numDims > MAX_DIMS) {
-      throw new IllegalArgumentException("numDims must be 1 .. " + MAX_DIMS + " (got: " + numDims + ")");
+    if (numDataDims < 1 || numDataDims > MAX_DIMS) {
+      throw new IllegalArgumentException("numDataDims must be 1 .. " + MAX_DIMS + " (got: " + numDataDims + ")");
+    }
+    if (numIndexDims < 1 || numIndexDims > numDataDims) {
+      throw new IllegalArgumentException("numIndexDims must be 1 .. " + numDataDims + " (got: " + numIndexDims + ")");
     }
     if (maxPointsInLeafNode <= 0) {
       throw new IllegalArgumentException("maxPointsInLeafNode must be > 0; got " + maxPointsInLeafNode);
@@ -277,10 +289,10 @@ public class BKDWriter implements Closeable {
 
     // TODO: we could specialize for the 1D case:
     if (pointCount == 0) {
-      System.arraycopy(packedValue, 0, minPackedValue, 0, packedBytesLength);
-      System.arraycopy(packedValue, 0, maxPackedValue, 0, packedBytesLength);
+      System.arraycopy(packedValue, 0, minPackedValue, 0, packedIndexBytesLength);
+      System.arraycopy(packedValue, 0, maxPackedValue, 0, packedIndexBytesLength);
     } else {
-      for(int dim=0;dim<numDims;dim++) {
+      for(int dim=0;dim<numIndexDims;dim++) {
         int offset = dim*bytesPerDim;
         if (FutureArrays.compareUnsigned(packedValue, offset, offset + bytesPerDim, minPackedValue, offset, offset + bytesPerDim) < 0) {
           System.arraycopy(packedValue, offset, minPackedValue, offset, bytesPerDim);
@@ -325,8 +337,9 @@ public class BKDWriter implements Closeable {
     public MergeReader(BKDReader bkd, MergeState.DocMap docMap) throws IOException {
       this.bkd = bkd;
       state = new BKDReader.IntersectState(bkd.in.clone(),
-                                           bkd.numDims,
+                                           bkd.numDataDims,
                                            bkd.packedBytesLength,
+                                           bkd.packedIndexBytesLength,
                                            bkd.maxPointsInLeafNode,
                                            null,
                                            null);
@@ -347,7 +360,7 @@ public class BKDWriter implements Closeable {
           docsInBlock = bkd.readDocIDs(state.in, state.in.getFilePointer(), state.scratchDocIDs);
           assert docsInBlock > 0;
           docBlockUpto = 0;
-          bkd.visitDocValues(state.commonPrefixLengths, state.scratchPackedValue1, state.scratchPackedValue2, state.in, state.scratchDocIDs, docsInBlock, new IntersectVisitor() {
+          bkd.visitDocValues(state.commonPrefixLengths, state.scratchDataPackedValue, state.scratchMinIndexPackedValue, state.scratchMaxIndexPackedValue, state.in, state.scratchDocIDs, docsInBlock, new IntersectVisitor() {
             int i = 0;
 
             @Override
@@ -385,7 +398,7 @@ public class BKDWriter implements Closeable {
         if (mappedDocID != -1) {
           // Not deleted!
           docID = mappedDocID;
-          System.arraycopy(packedValues, index * bkd.packedBytesLength, state.scratchPackedValue1, 0, bkd.packedBytesLength);
+          System.arraycopy(packedValues, index * bkd.packedBytesLength, state.scratchDataPackedValue, 0, bkd.packedBytesLength);
           return true;
         }
       }
@@ -404,7 +417,7 @@ public class BKDWriter implements Closeable {
     public boolean lessThan(MergeReader a, MergeReader b) {
       assert a != b;
 
-      int cmp = FutureArrays.compareUnsigned(a.state.scratchPackedValue1, 0, bytesPerDim, b.state.scratchPackedValue1, 0, bytesPerDim);
+      int cmp = FutureArrays.compareUnsigned(a.state.scratchDataPackedValue, 0, bytesPerDim, b.state.scratchDataPackedValue, 0, bytesPerDim);
       if (cmp < 0) {
         return true;
       } else if (cmp > 0) {
@@ -422,7 +435,7 @@ public class BKDWriter implements Closeable {
    *  disk. This method does not use transient disk in order to reorder points.
    */
   public long writeField(IndexOutput out, String fieldName, MutablePointValues reader) throws IOException {
-    if (numDims == 1) {
+    if (numIndexDims == 1) {
       return writeField1Dim(out, fieldName, reader);
     } else {
       return writeFieldNDims(out, fieldName, reader);
@@ -465,7 +478,7 @@ public class BKDWriter implements Closeable {
     Arrays.fill(maxPackedValue, (byte) 0);
     for (int i = 0; i < Math.toIntExact(pointCount); ++i) {
       values.getValue(i, scratchBytesRef1);
-      for(int dim=0;dim<numDims;dim++) {
+      for(int dim=0;dim<numIndexDims;dim++) {
         int offset = dim*bytesPerDim;
         if (FutureArrays.compareUnsigned(scratchBytesRef1.bytes, scratchBytesRef1.offset + offset, scratchBytesRef1.offset + offset + bytesPerDim, minPackedValue, offset, offset + bytesPerDim) < 0) {
           System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + offset, minPackedValue, offset, bytesPerDim);
@@ -478,12 +491,12 @@ public class BKDWriter implements Closeable {
       docsSeen.set(values.getDocID(i));
     }
 
-    final int[] parentSplits = new int[numDims];
+    final int[] parentSplits = new int[numIndexDims];
     build(1, numLeaves, values, 0, Math.toIntExact(pointCount), out,
           minPackedValue, maxPackedValue, parentSplits,
           splitPackedValues, leafBlockFPs,
           new int[maxPointsInLeafNode]);
-    assert Arrays.equals(parentSplits, new int[numDims]);
+    assert Arrays.equals(parentSplits, new int[numIndexDims]);
 
     long indexFP = out.getFilePointer();
     writeIndex(out, Math.toIntExact(countPerLeaf), leafBlockFPs, splitPackedValues);
@@ -493,7 +506,7 @@ public class BKDWriter implements Closeable {
   /* In the 1D case, we can simply sort points in ascending order and use the
    * same writing logic as we use at merge time. */
   private long writeField1Dim(IndexOutput out, String fieldName, MutablePointValues reader) throws IOException {
-    MutablePointsReaderUtils.sort(maxDoc, packedBytesLength, reader, 0, Math.toIntExact(reader.size()));
+    MutablePointsReaderUtils.sort(maxDoc, packedIndexBytesLength, reader, 0, Math.toIntExact(reader.size()));
 
     final OneDimensionBKDWriter oneDimWriter = new OneDimensionBKDWriter(out);
 
@@ -546,7 +559,7 @@ public class BKDWriter implements Closeable {
       MergeReader reader = queue.top();
       // System.out.println("iter reader=" + reader);
 
-      oneDimWriter.add(reader.state.scratchPackedValue1, reader.docID);
+      oneDimWriter.add(reader.state.scratchDataPackedValue, reader.docID);
 
       if (reader.next()) {
         queue.updateTop();
@@ -573,8 +586,8 @@ public class BKDWriter implements Closeable {
     private int leafCount;
 
     OneDimensionBKDWriter(IndexOutput out) {
-      if (numDims != 1) {
-        throw new UnsupportedOperationException("numDims must be 1 but got " + numDims);
+      if (numIndexDims != 1) {
+        throw new UnsupportedOperationException("numIndexDims must be 1 but got " + numIndexDims);
       }
       if (pointCount != 0) {
         throw new IllegalStateException("cannot mix add and merge");
@@ -651,9 +664,9 @@ public class BKDWriter implements Closeable {
     private void writeLeafBlock() throws IOException {
       assert leafCount != 0;
       if (valueCount == 0) {
-        System.arraycopy(leafValues, 0, minPackedValue, 0, packedBytesLength);
+        System.arraycopy(leafValues, 0, minPackedValue, 0, packedIndexBytesLength);
       }
-      System.arraycopy(leafValues, (leafCount - 1) * packedBytesLength, maxPackedValue, 0, packedBytesLength);
+      System.arraycopy(leafValues, (leafCount - 1) * packedBytesLength, maxPackedValue, 0, packedIndexBytesLength);
 
       valueCount += leafCount;
 
@@ -828,7 +841,7 @@ public class BKDWriter implements Closeable {
   */
 
   private PointWriter sort(int dim) throws IOException {
-    assert dim >= 0 && dim < numDims;
+    assert dim >= 0 && dim < numDataDims;
 
     if (heapPointWriter != null) {
 
@@ -861,7 +874,7 @@ public class BKDWriter implements Closeable {
       final int offset = bytesPerDim * dim;
 
       Comparator<BytesRef> cmp;
-      if (dim == numDims - 1) {
+      if (dim == numDataDims - 1) {
         // in that case the bytes for the dimension and for the doc id are contiguous,
         // so we don't need a branch
         cmp = new BytesRefComparator(bytesPerDim + Integer.BYTES) {
@@ -946,7 +959,7 @@ public class BKDWriter implements Closeable {
     }
 
     LongBitSet ordBitSet;
-    if (numDims > 1) {
+    if (numDataDims > 1) {
       if (singleValuePerDoc) {
         ordBitSet = new LongBitSet(maxDoc);
       } else {
@@ -981,7 +994,7 @@ public class BKDWriter implements Closeable {
     assert pointCount / numLeaves <= maxPointsInLeafNode: "pointCount=" + pointCount + " numLeaves=" + numLeaves + " maxPointsInLeafNode=" + maxPointsInLeafNode;
 
     // Sort all docs once by each dimension:
-    PathSlice[] sortedPointWriters = new PathSlice[numDims];
+    PathSlice[] sortedPointWriters = new PathSlice[numDataDims];
 
     // This is only used on exception; on normal code paths we close all files we opened:
     List<Closeable> toCloseHeroically = new ArrayList<>();
@@ -989,7 +1002,9 @@ public class BKDWriter implements Closeable {
     boolean success = false;
     try {
       //long t0 = System.nanoTime();
-      for(int dim=0;dim<numDims;dim++) {
+      // even with selective indexing we create the sortedPointWriters so we can compress
+      // the leaf node data by common prefix
+      for(int dim=0;dim<numDataDims;dim++) {
         sortedPointWriters[dim] = new PathSlice(sort(dim), 0, pointCount);
       }
       //long t1 = System.nanoTime();
@@ -1003,7 +1018,7 @@ public class BKDWriter implements Closeable {
         heapPointWriter = null;
       }
 
-      final int[] parentSplits = new int[numDims];
+      final int[] parentSplits = new int[numIndexDims];
       build(1, numLeaves, sortedPointWriters,
             ordBitSet, out,
             minPackedValue, maxPackedValue,
@@ -1011,7 +1026,7 @@ public class BKDWriter implements Closeable {
             splitPackedValues,
             leafBlockFPs,
             toCloseHeroically);
-      assert Arrays.equals(parentSplits, new int[numDims]);
+      assert Arrays.equals(parentSplits, new int[numIndexDims]);
 
       for(PathSlice slice : sortedPointWriters) {
         slice.writer.destroy();
@@ -1046,7 +1061,7 @@ public class BKDWriter implements Closeable {
     // Possibly rotate the leaf block FPs, if the index not fully balanced binary tree (only happens
     // if it was created by OneDimensionBKDWriter).  In this case the leaf nodes may straddle the two bottom
     // levels of the binary tree:
-    if (numDims == 1 && numLeaves > 1) {
+    if (numIndexDims == 1 && numLeaves > 1) {
       int levelCount = 2;
       while (true) {
         if (numLeaves >= levelCount && numLeaves <= 2*levelCount) {
@@ -1072,9 +1087,9 @@ public class BKDWriter implements Closeable {
 
     // This is the "file" we append the byte[] to:
     List<byte[]> blocks = new ArrayList<>();
-    byte[] lastSplitValues = new byte[bytesPerDim * numDims];
+    byte[] lastSplitValues = new byte[bytesPerDim * numIndexDims];
     //System.out.println("\npack index");
-    int totalSize = recursePackIndex(writeBuffer, leafBlockFPs, splitPackedValues, 0l, blocks, 1, lastSplitValues, new boolean[numDims], false);
+    int totalSize = recursePackIndex(writeBuffer, leafBlockFPs, splitPackedValues, 0l, blocks, 1, lastSplitValues, new boolean[numIndexDims], false);
 
     // Compact the byte[] blocks into single byte index:
     byte[] index = new byte[totalSize];
@@ -1126,7 +1141,7 @@ public class BKDWriter implements Closeable {
       if (isLeft == false) {
         leftBlockFP = getLeftMostLeafBlockFP(leafBlockFPs, nodeID);
         long delta = leftBlockFP - minBlockFP;
-        assert nodeID == 1 || delta > 0;
+        assert nodeID == 1 || delta > 0 : "expected nodeID=1 or delta > 0; got nodeID=" + nodeID + " and delta=" + delta;
         writeBuffer.writeVLong(delta);
       } else {
         // The left tree's left most leaf block FP is always the minimal FP:
@@ -1162,7 +1177,7 @@ public class BKDWriter implements Closeable {
       }
 
       // pack the prefix, splitDim and delta first diff byte into a single vInt:
-      int code = (firstDiffByteDelta * (1+bytesPerDim) + prefix) * numDims + splitDim;
+      int code = (firstDiffByteDelta * (1+bytesPerDim) + prefix) * numIndexDims + splitDim;
 
       //System.out.println("  code=" + code);
       //System.out.println("  splitValue=" + new BytesRef(splitPackedValues, address, bytesPerDim));
@@ -1248,14 +1263,15 @@ public class BKDWriter implements Closeable {
   private void writeIndex(IndexOutput out, int countPerLeaf, int numLeaves, byte[] packedIndex) throws IOException {
     
     CodecUtil.writeHeader(out, CODEC_NAME, VERSION_CURRENT);
-    out.writeVInt(numDims);
+    out.writeVInt(numDataDims);
+    out.writeVInt(numIndexDims);
     out.writeVInt(countPerLeaf);
     out.writeVInt(bytesPerDim);
 
     assert numLeaves > 0;
     out.writeVInt(numLeaves);
-    out.writeBytes(minPackedValue, 0, packedBytesLength);
-    out.writeBytes(maxPackedValue, 0, packedBytesLength);
+    out.writeBytes(minPackedValue, 0, packedIndexBytesLength);
+    out.writeBytes(maxPackedValue, 0, packedIndexBytesLength);
 
     out.writeVLong(pointCount);
     out.writeVInt(docsSeen.cardinality());
@@ -1275,7 +1291,7 @@ public class BKDWriter implements Closeable {
       // all values in this block are equal
       out.writeByte((byte) -1);
     } else {
-      if (numDims != 1) {
+      if (numIndexDims != 1) {
         writeActualBounds(out, commonPrefixLengths, count, packedValues);
       }
       assert commonPrefixLengths[sortedDim] < bytesPerDim;
@@ -1298,7 +1314,7 @@ public class BKDWriter implements Closeable {
   }
 
   private void writeActualBounds(DataOutput out, int[] commonPrefixLengths, int count, IntFunction<BytesRef> packedValues) throws IOException {
-    for (int dim = 0; dim < numDims; ++dim) {
+    for (int dim = 0; dim < numIndexDims; ++dim) {
       int commonPrefixLength = commonPrefixLengths[dim];
       int suffixLength = bytesPerDim - commonPrefixLength;
       if (suffixLength > 0) {
@@ -1336,7 +1352,7 @@ public class BKDWriter implements Closeable {
       BytesRef ref = packedValues.apply(i);
       assert ref.length == packedBytesLength;
 
-      for(int dim=0;dim<numDims;dim++) {
+      for(int dim=0;dim<numDataDims;dim++) {
         int prefix = commonPrefixLengths[dim];
         out.writeBytes(ref.bytes, ref.offset + dim*bytesPerDim + prefix, bytesPerDim-prefix);
       }
@@ -1358,7 +1374,7 @@ public class BKDWriter implements Closeable {
   }
 
   private void writeCommonPrefixes(DataOutput out, int[] commonPrefixes, byte[] packedValue) throws IOException {
-    for(int dim=0;dim<numDims;dim++) {
+    for(int dim=0;dim<numDataDims;dim++) {
       out.writeVInt(commonPrefixes[dim]);
       //System.out.println(commonPrefixes[dim] + " of " + bytesPerDim);
       out.writeBytes(packedValue, dim*bytesPerDim, commonPrefixes[dim]);
@@ -1429,7 +1445,7 @@ public class BKDWriter implements Closeable {
       boolean result = reader.next();
       assert result: "rightCount=" + rightCount + " source.count=" + source.count + " source.writer=" + source.writer;
       System.arraycopy(reader.packedValue(), splitDim*bytesPerDim, scratch1, 0, bytesPerDim);
-      if (numDims > 1) {
+      if (numDataDims > 1) {
         assert ordBitSet.get(reader.ord()) == false;
         ordBitSet.set(reader.ord());
         // Subtract 1 from rightCount because we already did the first value above (so we could record the split value):
@@ -1444,7 +1460,7 @@ public class BKDWriter implements Closeable {
 
   /** Called only in assert */
   private boolean valueInBounds(BytesRef packedValue, byte[] minPackedValue, byte[] maxPackedValue) {
-    for(int dim=0;dim<numDims;dim++) {
+    for(int dim=0;dim<numIndexDims;dim++) {
       int offset = bytesPerDim*dim;
       if (FutureArrays.compareUnsigned(packedValue.bytes, packedValue.offset + offset, packedValue.offset + offset + bytesPerDim, minPackedValue, offset, offset + bytesPerDim) < 0) {
         return false;
@@ -1472,7 +1488,7 @@ public class BKDWriter implements Closeable {
     for (int numSplits : parentSplits) {
       maxNumSplits = Math.max(maxNumSplits, numSplits);
     }
-    for (int dim = 0; dim < numDims; ++dim) {
+    for (int dim = 0; dim < numIndexDims; ++dim) {
       final int offset = dim * bytesPerDim;
       if (parentSplits[dim] < maxNumSplits / 2 &&
           FutureArrays.compareUnsigned(minPackedValue, offset, offset + bytesPerDim, maxPackedValue, offset, offset + bytesPerDim) != 0) {
@@ -1482,7 +1498,7 @@ public class BKDWriter implements Closeable {
 
     // Find which dim has the largest span so we can split on it:
     int splitDim = -1;
-    for(int dim=0;dim<numDims;dim++) {
+    for(int dim=0;dim<numIndexDims;dim++) {
       NumericUtils.subtract(bytesPerDim, dim, maxPackedValue, minPackedValue, scratchDiff);
       if (splitDim == -1 || FutureArrays.compareUnsigned(scratchDiff, 0, bytesPerDim, scratch1, 0, bytesPerDim) > 0) {
         System.arraycopy(scratchDiff, 0, scratch1, 0, bytesPerDim);
@@ -1532,7 +1548,7 @@ public class BKDWriter implements Closeable {
       reader.getValue(from, scratchBytesRef1);
       for (int i = from + 1; i < to; ++i) {
         reader.getValue(i, scratchBytesRef2);
-        for (int dim=0;dim<numDims;dim++) {
+        for (int dim=0;dim<numDataDims;dim++) {
           final int offset = dim * bytesPerDim;
           for(int j=0;j<commonPrefixLengths[dim];j++) {
             if (scratchBytesRef1.bytes[scratchBytesRef1.offset+offset+j] != scratchBytesRef2.bytes[scratchBytesRef2.offset+offset+j]) {
@@ -1544,14 +1560,14 @@ public class BKDWriter implements Closeable {
       }
 
       // Find the dimension that has the least number of unique bytes at commonPrefixLengths[dim]
-      FixedBitSet[] usedBytes = new FixedBitSet[numDims];
-      for (int dim = 0; dim < numDims; ++dim) {
+      FixedBitSet[] usedBytes = new FixedBitSet[numDataDims];
+      for (int dim = 0; dim < numDataDims; ++dim) {
         if (commonPrefixLengths[dim] < bytesPerDim) {
           usedBytes[dim] = new FixedBitSet(256);
         }
       }
       for (int i = from + 1; i < to; ++i) {
-        for (int dim=0;dim<numDims;dim++) {
+        for (int dim=0;dim<numDataDims;dim++) {
           if (usedBytes[dim] != null) {
             byte b = reader.getByteAt(i, dim * bytesPerDim + commonPrefixLengths[dim]);
             usedBytes[dim].set(Byte.toUnsignedInt(b));
@@ -1560,7 +1576,7 @@ public class BKDWriter implements Closeable {
       }
       int sortedDim = 0;
       int sortedDimCardinality = Integer.MAX_VALUE;
-      for (int dim = 0; dim < numDims; ++dim) {
+      for (int dim = 0; dim < numDataDims; ++dim) {
         if (usedBytes[dim] != null) {
           final int cardinality = usedBytes[dim].cardinality();
           if (cardinality < sortedDimCardinality) {
@@ -1631,8 +1647,8 @@ public class BKDWriter implements Closeable {
       reader.getValue(mid, scratchBytesRef1);
       System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + splitDim * bytesPerDim, splitPackedValues, address + 1, bytesPerDim);
 
-      byte[] minSplitPackedValue = ArrayUtil.copyOfSubArray(minPackedValue, 0, packedBytesLength);
-      byte[] maxSplitPackedValue = ArrayUtil.copyOfSubArray(maxPackedValue, 0, packedBytesLength);
+      byte[] minSplitPackedValue = ArrayUtil.copyOfSubArray(minPackedValue, 0, packedIndexBytesLength);
+      byte[] maxSplitPackedValue = ArrayUtil.copyOfSubArray(maxPackedValue, 0, packedIndexBytesLength);
       System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + splitDim * bytesPerDim,
           minSplitPackedValue, splitDim * bytesPerDim, bytesPerDim);
       System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + splitDim * bytesPerDim,
@@ -1666,7 +1682,7 @@ public class BKDWriter implements Closeable {
       assert slice.count == slices[0].count;
     }
     
-    if (numDims == 1 && slices[0].writer instanceof OfflinePointWriter && slices[0].count <= maxPointsSortInHeap) {
+    if (numDataDims == 1 && slices[0].writer instanceof OfflinePointWriter && slices[0].count <= maxPointsSortInHeap) {
       // Special case for 1D, to cutover to heap once we recurse deeply enough:
       slices[0] = switchToHeap(slices[0], toCloseHeroically);
     }
@@ -1679,7 +1695,7 @@ public class BKDWriter implements Closeable {
       int sortedDim = 0;
       int sortedDimCardinality = Integer.MAX_VALUE;
 
-      for (int dim=0;dim<numDims;dim++) {
+      for (int dim=0;dim<numDataDims;dim++) {
         if (slices[dim].writer instanceof HeapPointWriter == false) {
           // Adversarial cases can cause this, e.g. very lopsided data, all equal points, such that we started
           // offline, but then kept splitting only in one dimension, and so never had to rewrite into heap writer
@@ -1767,7 +1783,7 @@ public class BKDWriter implements Closeable {
       // Inner node: partition/recurse
 
       int splitDim;
-      if (numDims > 1) {
+      if (numIndexDims > 1) {
         splitDim = split(minPackedValue, maxPackedValue, parentSplits);
       } else {
         splitDim = 0;
@@ -1788,24 +1804,24 @@ public class BKDWriter implements Closeable {
 
       // Partition all PathSlice that are not the split dim into sorted left and right sets, so we can recurse:
 
-      PathSlice[] leftSlices = new PathSlice[numDims];
-      PathSlice[] rightSlices = new PathSlice[numDims];
+      PathSlice[] leftSlices = new PathSlice[numDataDims];
+      PathSlice[] rightSlices = new PathSlice[numDataDims];
 
-      byte[] minSplitPackedValue = new byte[packedBytesLength];
-      System.arraycopy(minPackedValue, 0, minSplitPackedValue, 0, packedBytesLength);
+      byte[] minSplitPackedValue = new byte[packedIndexBytesLength];
+      System.arraycopy(minPackedValue, 0, minSplitPackedValue, 0, packedIndexBytesLength);
 
-      byte[] maxSplitPackedValue = new byte[packedBytesLength];
-      System.arraycopy(maxPackedValue, 0, maxSplitPackedValue, 0, packedBytesLength);
+      byte[] maxSplitPackedValue = new byte[packedIndexBytesLength];
+      System.arraycopy(maxPackedValue, 0, maxSplitPackedValue, 0, packedIndexBytesLength);
 
       // When we are on this dim, below, we clear the ordBitSet:
       int dimToClear;
-      if (numDims - 1 == splitDim) {
-        dimToClear = numDims - 2;
+      if (numDataDims - 1 == splitDim) {
+        dimToClear = numDataDims - 2;
       } else {
-        dimToClear = numDims - 1;
+        dimToClear = numDataDims - 1;
       }
 
-      for(int dim=0;dim<numDims;dim++) {
+      for(int dim=0;dim<numDataDims;dim++) {
 
         if (dim == splitDim) {
           // No need to partition on this dim since it's a simple slice of the incoming already sorted slice, and we
@@ -1842,7 +1858,7 @@ public class BKDWriter implements Closeable {
             ordBitSet, out,
             minPackedValue, maxSplitPackedValue, parentSplits,
             splitPackedValues, leafBlockFPs, toCloseHeroically);
-      for(int dim=0;dim<numDims;dim++) {
+      for(int dim=0;dim<numDataDims;dim++) {
         // Don't destroy the dim we split on because we just re-used what our caller above gave us for that dim:
         if (dim != splitDim) {
           leftSlices[dim].writer.destroy();
@@ -1855,7 +1871,7 @@ public class BKDWriter implements Closeable {
             ordBitSet, out,
             minSplitPackedValue, maxPackedValue, parentSplits,
             splitPackedValues, leafBlockFPs, toCloseHeroically);
-      for(int dim=0;dim<numDims;dim++) {
+      for(int dim=0;dim<numDataDims;dim++) {
         // Don't destroy the dim we split on because we just re-used what our caller above gave us for that dim:
         if (dim != splitDim) {
           rightSlices[dim].writer.destroy();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/test/org/apache/lucene/document/TestFieldType.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestFieldType.java b/lucene/core/src/test/org/apache/lucene/document/TestFieldType.java
index 9214cb9..b770e62 100644
--- a/lucene/core/src/test/org/apache/lucene/document/TestFieldType.java
+++ b/lucene/core/src/test/org/apache/lucene/document/TestFieldType.java
@@ -69,7 +69,7 @@ public class TestFieldType extends LuceneTestCase {
   public void testPointsToString() {
     FieldType ft = new FieldType();
     ft.setDimensions(1, Integer.BYTES);
-    assertEquals("pointDimensionCount=1,pointNumBytes=4", ft.toString());
+    assertEquals("pointDataDimensionCount=1,pointIndexDimensionCount=1,pointNumBytes=4", ft.toString());
   }
 
   private static Object randomValue(Class<?> clazz) {
@@ -85,15 +85,20 @@ public class TestFieldType extends LuceneTestCase {
 
   private static FieldType randomFieldType() throws Exception {
     // setDimensions handled special as values must be in-bounds.
-    Method setDimensionsMethod = FieldType.class.getMethod("setDimensions", int.class, int.class);
+    Method setDimensionsMethodA = FieldType.class.getMethod("setDimensions", int.class, int.class);
+    Method setDimensionsMethodB = FieldType.class.getMethod("setDimensions", int.class, int.class, int.class);
     FieldType ft = new FieldType();
     for (Method method : FieldType.class.getMethods()) {
       if (method.getName().startsWith("set")) {
         final Class<?>[] parameterTypes = method.getParameterTypes();
         final Object[] args = new Object[parameterTypes.length];
-        if (method.equals(setDimensionsMethod)) {
+        if (method.equals(setDimensionsMethodA)) {
           args[0] = 1 + random().nextInt(PointValues.MAX_DIMENSIONS);
           args[1] = 1 + random().nextInt(PointValues.MAX_NUM_BYTES);
+        } else if (method.equals(setDimensionsMethodB)) {
+          args[0] = 1 + random().nextInt(PointValues.MAX_DIMENSIONS);
+          args[1] = 1 + random().nextInt((Integer)args[0]);
+          args[2] = 1 + random().nextInt(PointValues.MAX_NUM_BYTES);
         } else {
           for (int i = 0; i < args.length; ++i) {
             args[i] = randomValue(parameterTypes[i]);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
index 25b48c8..1091b24 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
@@ -91,7 +91,12 @@ public class TestIndexableField extends LuceneTestCase {
       }
 
       @Override
-      public int pointDimensionCount() {
+      public int pointDataDimensionCount() {
+        return 0;
+      }
+
+      @Override
+      public int pointIndexDimensionCount() {
         return 0;
       }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/test/org/apache/lucene/index/TestPendingSoftDeletes.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPendingSoftDeletes.java b/lucene/core/src/test/org/apache/lucene/index/TestPendingSoftDeletes.java
index 7d03c7e..c882a3b 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPendingSoftDeletes.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPendingSoftDeletes.java
@@ -164,7 +164,7 @@ public class TestPendingSoftDeletes extends TestPendingDeletes {
     deletes.onNewReader(segmentReader, commitInfo);
     reader.close();
     writer.close();
-    FieldInfo fieldInfo = new FieldInfo("_soft_deletes", 1, false, false, false, IndexOptions.NONE, DocValuesType.NUMERIC, 0, Collections.emptyMap(), 0, 0, true);
+    FieldInfo fieldInfo = new FieldInfo("_soft_deletes", 1, false, false, false, IndexOptions.NONE, DocValuesType.NUMERIC, 0, Collections.emptyMap(), 0, 0, 0, true);
     List<Integer> docsDeleted = Arrays.asList(1, 3, 7, 8, DocIdSetIterator.NO_MORE_DOCS);
     List<DocValuesFieldUpdates> updates = Arrays.asList(singleUpdate(docsDeleted, 10, true));
     for (DocValuesFieldUpdates update : updates) {
@@ -185,7 +185,7 @@ public class TestPendingSoftDeletes extends TestPendingDeletes {
 
     docsDeleted = Arrays.asList(1, 2, DocIdSetIterator.NO_MORE_DOCS);
     updates = Arrays.asList(singleUpdate(docsDeleted, 10, true));
-    fieldInfo = new FieldInfo("_soft_deletes", 1, false, false, false, IndexOptions.NONE, DocValuesType.NUMERIC, 1, Collections.emptyMap(), 0, 0, true);
+    fieldInfo = new FieldInfo("_soft_deletes", 1, false, false, false, IndexOptions.NONE, DocValuesType.NUMERIC, 1, Collections.emptyMap(), 0, 0, 0, true);
     for (DocValuesFieldUpdates update : updates) {
       deletes.onDocValuesUpdate(fieldInfo, update.iterator());
     }
@@ -228,7 +228,7 @@ public class TestPendingSoftDeletes extends TestPendingDeletes {
     SegmentCommitInfo segmentInfo = segmentReader.getSegmentInfo();
     PendingDeletes deletes = newPendingDeletes(segmentInfo);
     deletes.onNewReader(segmentReader, segmentInfo);
-    FieldInfo fieldInfo = new FieldInfo("_soft_deletes", 1, false, false, false, IndexOptions.NONE, DocValuesType.NUMERIC, segmentInfo.getNextDocValuesGen(), Collections.emptyMap(), 0, 0, true);
+    FieldInfo fieldInfo = new FieldInfo("_soft_deletes", 1, false, false, false, IndexOptions.NONE, DocValuesType.NUMERIC, segmentInfo.getNextDocValuesGen(), Collections.emptyMap(), 0, 0, 0, true);
     List<Integer> docsDeleted = Arrays.asList(1, DocIdSetIterator.NO_MORE_DOCS);
     List<DocValuesFieldUpdates> updates = Arrays.asList(singleUpdate(docsDeleted, 3, true));
     for (DocValuesFieldUpdates update : updates) {
@@ -276,7 +276,7 @@ public class TestPendingSoftDeletes extends TestPendingDeletes {
     SegmentCommitInfo segmentInfo = segmentReader.getSegmentInfo();
     PendingDeletes deletes = newPendingDeletes(segmentInfo);
     deletes.onNewReader(segmentReader, segmentInfo);
-    FieldInfo fieldInfo = new FieldInfo("_soft_deletes", 1, false, false, false, IndexOptions.NONE, DocValuesType.NUMERIC, segmentInfo.getNextDocValuesGen(), Collections.emptyMap(), 0, 0, true);
+    FieldInfo fieldInfo = new FieldInfo("_soft_deletes", 1, false, false, false, IndexOptions.NONE, DocValuesType.NUMERIC, segmentInfo.getNextDocValuesGen(), Collections.emptyMap(), 0, 0, 0, true);
     List<DocValuesFieldUpdates> updates = Arrays.asList(singleUpdate(Arrays.asList(0, 1, DocIdSetIterator.NO_MORE_DOCS), 3, false));
     for (DocValuesFieldUpdates update : updates) {
       deletes.onDocValuesUpdate(fieldInfo, update.iterator());
@@ -295,7 +295,7 @@ public class TestPendingSoftDeletes extends TestPendingDeletes {
     assertEquals(0, deletes.numPendingDeletes());
 
     segmentInfo.advanceDocValuesGen();
-    fieldInfo = new FieldInfo("_soft_deletes", 1, false, false, false, IndexOptions.NONE, DocValuesType.NUMERIC, segmentInfo.getNextDocValuesGen(), Collections.emptyMap(), 0, 0, true);
+    fieldInfo = new FieldInfo("_soft_deletes", 1, false, false, false, IndexOptions.NONE, DocValuesType.NUMERIC, segmentInfo.getNextDocValuesGen(), Collections.emptyMap(), 0, 0, 0, true);
     updates = Arrays.asList(singleUpdate(Arrays.asList(1, DocIdSetIterator.NO_MORE_DOCS), 3, true));
     for (DocValuesFieldUpdates update : updates) {
       deletes.onDocValuesUpdate(fieldInfo, update.iterator());

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/test/org/apache/lucene/index/TestPointValues.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPointValues.java b/lucene/core/src/test/org/apache/lucene/index/TestPointValues.java
index 1ef7abc..ae8605d 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPointValues.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPointValues.java
@@ -73,7 +73,7 @@ public class TestPointValues extends LuceneTestCase {
     IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
       w.addDocument(doc);
     });
-    assertEquals("cannot change point dimension count from 1 to 2 for field=\"dim\"", expected.getMessage());
+    assertEquals("cannot change point data dimension count from 1 to 2 for field=\"dim\"", expected.getMessage());
     w.close();
     dir.close();
   }
@@ -91,7 +91,7 @@ public class TestPointValues extends LuceneTestCase {
     IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
       w.addDocument(doc2);
     });
-    assertEquals("cannot change point dimension count from 1 to 2 for field=\"dim\"", expected.getMessage());
+    assertEquals("cannot change point data dimension count from 1 to 2 for field=\"dim\"", expected.getMessage());
 
     w.close();
     dir.close();
@@ -111,7 +111,7 @@ public class TestPointValues extends LuceneTestCase {
     IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
       w.addDocument(doc2);
     });
-    assertEquals("cannot change point dimension count from 1 to 2 for field=\"dim\"", expected.getMessage());
+    assertEquals("cannot change point data dimension count from 1 to 2 for field=\"dim\"", expected.getMessage());
 
     w.close();
     dir.close();
@@ -133,7 +133,7 @@ public class TestPointValues extends LuceneTestCase {
     IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
       w2.addDocument(doc2);
     });
-    assertEquals("cannot change point dimension count from 1 to 2 for field=\"dim\"", expected.getMessage());
+    assertEquals("cannot change point data dimension count from 1 to 2 for field=\"dim\"", expected.getMessage());
 
     w2.close();
     dir.close();
@@ -156,7 +156,7 @@ public class TestPointValues extends LuceneTestCase {
     IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
       w2.addIndexes(new Directory[] {dir});
     });
-    assertEquals("cannot change point dimension count from 2 to 1 for field=\"dim\"", expected.getMessage());
+    assertEquals("cannot change point data dimension count from 2 to 1 for field=\"dim\"", expected.getMessage());
 
     IOUtils.close(w2, dir, dir2);
   }
@@ -179,7 +179,7 @@ public class TestPointValues extends LuceneTestCase {
     IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
         w2.addIndexes(new CodecReader[] {(CodecReader) getOnlyLeafReader(r)});
     });
-    assertEquals("cannot change point dimension count from 2 to 1 for field=\"dim\"", expected.getMessage());
+    assertEquals("cannot change point data dimension count from 2 to 1 for field=\"dim\"", expected.getMessage());
 
     IOUtils.close(r, w2, dir, dir2);
   }
@@ -203,7 +203,7 @@ public class TestPointValues extends LuceneTestCase {
     IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
       TestUtil.addIndexesSlowly(w2, r);
     });
-    assertEquals("cannot change point dimension count from 2 to 1 for field=\"dim\"", expected.getMessage());
+    assertEquals("cannot change point data dimension count from 2 to 1 for field=\"dim\"", expected.getMessage());
 
     IOUtils.close(r, w2, dir, dir2);
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java b/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java
index f76b973..90df7c3 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java
@@ -1095,7 +1095,7 @@ public class TestPointQueries extends LuceneTestCase {
     IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
       s.count(BinaryPoint.newRangeQuery("value", point, point));
     });
-    assertEquals("field=\"value\" was indexed with numDims=1 but this query has numDims=2", expected.getMessage());
+    assertEquals("field=\"value\" was indexed with numIndexDimensions=1 but this query has numDims=2", expected.getMessage());
 
     IOUtils.close(r, w, dir);
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/test/org/apache/lucene/util/TestDocIdSetBuilder.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/util/TestDocIdSetBuilder.java b/lucene/core/src/test/org/apache/lucene/util/TestDocIdSetBuilder.java
index f87a73a..a51b161 100644
--- a/lucene/core/src/test/org/apache/lucene/util/TestDocIdSetBuilder.java
+++ b/lucene/core/src/test/org/apache/lucene/util/TestDocIdSetBuilder.java
@@ -327,7 +327,12 @@ public class TestDocIdSetBuilder extends LuceneTestCase {
     }
 
     @Override
-    public int getNumDimensions() throws IOException {
+    public int getNumDataDimensions() throws IOException {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int getNumIndexDimensions() throws IOException {
       throw new UnsupportedOperationException();
     }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/test/org/apache/lucene/util/bkd/Test2BBKDPoints.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/util/bkd/Test2BBKDPoints.java b/lucene/core/src/test/org/apache/lucene/util/bkd/Test2BBKDPoints.java
index e30168c..0d57bf8 100644
--- a/lucene/core/src/test/org/apache/lucene/util/bkd/Test2BBKDPoints.java
+++ b/lucene/core/src/test/org/apache/lucene/util/bkd/Test2BBKDPoints.java
@@ -41,7 +41,7 @@ public class Test2BBKDPoints extends LuceneTestCase {
 
     final int numDocs = (Integer.MAX_VALUE / 26) + 100;
 
-    BKDWriter w = new BKDWriter(numDocs, dir, "_0", 1, Long.BYTES,
+    BKDWriter w = new BKDWriter(numDocs, dir, "_0", 1, 1, Long.BYTES,
                                 BKDWriter.DEFAULT_MAX_POINTS_IN_LEAF_NODE, BKDWriter.DEFAULT_MAX_MB_SORT_IN_HEAP, 26L * numDocs, false);
     int counter = 0;
     byte[] packedBytes = new byte[Long.BYTES];
@@ -78,7 +78,7 @@ public class Test2BBKDPoints extends LuceneTestCase {
 
     final int numDocs = (Integer.MAX_VALUE / 26) + 100;
 
-    BKDWriter w = new BKDWriter(numDocs, dir, "_0", 2, Long.BYTES,
+    BKDWriter w = new BKDWriter(numDocs, dir, "_0", 2, 2, Long.BYTES,
                                 BKDWriter.DEFAULT_MAX_POINTS_IN_LEAF_NODE, BKDWriter.DEFAULT_MAX_MB_SORT_IN_HEAP, 26L * numDocs, false);
     int counter = 0;
     byte[] packedBytes = new byte[2*Long.BYTES];

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java b/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java
index cbd4d37..d75d785 100644
--- a/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java
+++ b/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java
@@ -48,7 +48,7 @@ public class TestBKD extends LuceneTestCase {
 
   public void testBasicInts1D() throws Exception {
     try (Directory dir = getDirectory(100)) {
-      BKDWriter w = new BKDWriter(100, dir, "tmp", 1, 4, 2, 1.0f, 100, true);
+      BKDWriter w = new BKDWriter(100, dir, "tmp", 1, 1, 4, 2, 1.0f, 100, true);
       byte[] scratch = new byte[4];
       for(int docID=0;docID<100;docID++) {
         NumericUtils.intToSortableBytes(docID, scratch, 0);
@@ -121,12 +121,13 @@ public class TestBKD extends LuceneTestCase {
     int numDocs = atLeast(1000);
     try (Directory dir = getDirectory(numDocs)) {
       int numDims = TestUtil.nextInt(random(), 1, 5);
+      int numIndexDims = TestUtil.nextInt(random(), 1, numDims);
       int maxPointsInLeafNode = TestUtil.nextInt(random(), 50, 100);
       float maxMB = (float) 3.0 + (3*random().nextFloat());
-      BKDWriter w = new BKDWriter(numDocs, dir, "tmp", numDims, 4, maxPointsInLeafNode, maxMB, numDocs, true);
+      BKDWriter w = new BKDWriter(numDocs, dir, "tmp", numDims, numIndexDims, 4, maxPointsInLeafNode, maxMB, numDocs, true);
 
       if (VERBOSE) {
-        System.out.println("TEST: numDims=" + numDims + " numDocs=" + numDocs);
+        System.out.println("TEST: numDims=" + numDims + " numIndexDims=" + numIndexDims + " numDocs=" + numDocs);
       }
       int[][] docs = new int[numDocs][];
       byte[] scratch = new byte[4*numDims];
@@ -167,7 +168,7 @@ public class TestBKD extends LuceneTestCase {
 
         byte[] minPackedValue = r.getMinPackedValue();
         byte[] maxPackedValue = r.getMaxPackedValue();
-        for(int dim=0;dim<numDims;dim++) {
+        for(int dim=0;dim<numIndexDims;dim++) {
           assertEquals(minValue[dim], NumericUtils.sortableBytesToInt(minPackedValue, dim * Integer.BYTES));
           assertEquals(maxValue[dim], NumericUtils.sortableBytesToInt(maxPackedValue, dim * Integer.BYTES));
         }
@@ -181,7 +182,7 @@ public class TestBKD extends LuceneTestCase {
           // Random N dims rect query:
           int[] queryMin = new int[numDims];
           int[] queryMax = new int[numDims];    
-          for(int dim=0;dim<numDims;dim++) {
+          for(int dim=0;dim<numIndexDims;dim++) {
             queryMin[dim] = random().nextInt();
             queryMax[dim] = random().nextInt();
             if (queryMin[dim] > queryMax[dim]) {
@@ -202,7 +203,7 @@ public class TestBKD extends LuceneTestCase {
             @Override
             public void visit(int docID, byte[] packedValue) {
               //System.out.println("visit check docID=" + docID);
-              for(int dim=0;dim<numDims;dim++) {
+              for(int dim=0;dim<numIndexDims;dim++) {
                 int x = NumericUtils.sortableBytesToInt(packedValue, dim * Integer.BYTES);
                 if (x < queryMin[dim] || x > queryMax[dim]) {
                   //System.out.println("  no");
@@ -217,7 +218,7 @@ public class TestBKD extends LuceneTestCase {
             @Override
             public Relation compare(byte[] minPacked, byte[] maxPacked) {
               boolean crosses = false;
-              for(int dim=0;dim<numDims;dim++) {
+              for(int dim=0;dim<numIndexDims;dim++) {
                 int min = NumericUtils.sortableBytesToInt(minPacked, dim * Integer.BYTES);
                 int max = NumericUtils.sortableBytesToInt(maxPacked, dim * Integer.BYTES);
                 assert max >= min;
@@ -240,7 +241,7 @@ public class TestBKD extends LuceneTestCase {
           for(int docID=0;docID<numDocs;docID++) {
             int[] docValues = docs[docID];
             boolean expected = true;
-            for(int dim=0;dim<numDims;dim++) {
+            for(int dim=0;dim<numIndexDims;dim++) {
               int x = docValues[dim];
               if (x < queryMin[dim] || x > queryMax[dim]) {
                 expected = false;
@@ -264,7 +265,7 @@ public class TestBKD extends LuceneTestCase {
       int numDims = TestUtil.nextInt(random(), 1, 5);
       int maxPointsInLeafNode = TestUtil.nextInt(random(), 50, 100);
       float maxMB = (float) 3.0 + (3*random().nextFloat());
-      BKDWriter w = new BKDWriter(numDocs, dir, "tmp", numDims, numBytesPerDim, maxPointsInLeafNode, maxMB, numDocs, true);
+      BKDWriter w = new BKDWriter(numDocs, dir, "tmp", numDims, numDims, numBytesPerDim, maxPointsInLeafNode, maxMB, numDocs, true);
       BigInteger[][] docs = new BigInteger[numDocs][];
 
       byte[] scratch = new byte[numBytesPerDim*numDims];
@@ -380,13 +381,14 @@ public class TestBKD extends LuceneTestCase {
   public void testWithExceptions() throws Exception {
     int numDocs = atLeast(10000);
     int numBytesPerDim = TestUtil.nextInt(random(), 2, 30);
-    int numDims = TestUtil.nextInt(random(), 1, 5);
+    int numDataDims = TestUtil.nextInt(random(), 1, 5);
+    int numIndexDims = TestUtil.nextInt(random(), 1, numDataDims);
 
     byte[][][] docValues = new byte[numDocs][][];
 
     for(int docID=0;docID<numDocs;docID++) {
-      byte[][] values = new byte[numDims][];
-      for(int dim=0;dim<numDims;dim++) {
+      byte[][] values = new byte[numDataDims][];
+      for(int dim=0;dim<numDataDims;dim++) {
         values[dim] = new byte[numBytesPerDim];
         random().nextBytes(values[dim]);
       }
@@ -401,7 +403,7 @@ public class TestBKD extends LuceneTestCase {
       try {
         dir.setRandomIOExceptionRate(0.05);
         dir.setRandomIOExceptionRateOnOpen(0.05);
-        verify(dir, docValues, null, numDims, numBytesPerDim, 50, maxMBHeap);
+        verify(dir, docValues, null, numDataDims, numIndexDims, numBytesPerDim, 50, maxMBHeap);
       } catch (IllegalArgumentException iae) {
         // This just means we got a too-small maxMB for the maxPointsInLeafNode; just retry w/ more heap
         assertTrue(iae.getMessage().contains("either increase maxMBSortInHeap or decrease maxPointsInLeafNode"));
@@ -439,7 +441,7 @@ public class TestBKD extends LuceneTestCase {
   public void testTooLittleHeap() throws Exception { 
     try (Directory dir = getDirectory(0)) {
       IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
-        new BKDWriter(1, dir, "bkd", 1, 16, 1000000, 0.001, 0, true);
+        new BKDWriter(1, dir, "bkd", 1, 1, 16, 1000000, 0.001, 0, true);
       });
       assertTrue(expected.getMessage().contains("either increase maxMBSortInHeap or decrease maxPointsInLeafNode"));
     }
@@ -449,33 +451,35 @@ public class TestBKD extends LuceneTestCase {
     int numDocs = TestUtil.nextInt(random(), count, count*2);
     int numBytesPerDim = TestUtil.nextInt(random(), 2, 30);
 
-    int numDims = TestUtil.nextInt(random(), 1, 5);
+    int numDataDims = TestUtil.nextInt(random(), 1, 5);
+    int numIndexDims = TestUtil.nextInt(random(), 1, numDataDims);
 
     byte[][][] docValues = new byte[numDocs][][];
 
     for(int docID=0;docID<numDocs;docID++) {
-      byte[][] values = new byte[numDims][];
-      for(int dim=0;dim<numDims;dim++) {
+      byte[][] values = new byte[numDataDims][];
+      for(int dim=0;dim<numDataDims;dim++) {
         values[dim] = new byte[numBytesPerDim];
         random().nextBytes(values[dim]);
       }
       docValues[docID] = values;
     }
 
-    verify(docValues, null, numDims, numBytesPerDim);
+    verify(docValues, null, numDataDims, numIndexDims, numBytesPerDim);
   }
 
   public void testAllEqual() throws Exception {
     int numBytesPerDim = TestUtil.nextInt(random(), 2, 30);
-    int numDims = TestUtil.nextInt(random(), 1, 5);
+    int numDataDims = TestUtil.nextInt(random(), 1, 5);
+    int numIndexDims = TestUtil.nextInt(random(), 1, numDataDims);
 
     int numDocs = atLeast(1000);
     byte[][][] docValues = new byte[numDocs][][];
 
     for(int docID=0;docID<numDocs;docID++) {
       if (docID == 0) {
-        byte[][] values = new byte[numDims][];
-        for(int dim=0;dim<numDims;dim++) {
+        byte[][] values = new byte[numDataDims][];
+        for(int dim=0;dim<numDataDims;dim++) {
           values[dim] = new byte[numBytesPerDim];
           random().nextBytes(values[dim]);
         }
@@ -485,20 +489,21 @@ public class TestBKD extends LuceneTestCase {
       }
     }
 
-    verify(docValues, null, numDims, numBytesPerDim);
+    verify(docValues, null, numDataDims, numIndexDims, numBytesPerDim);
   }
 
   public void testOneDimEqual() throws Exception {
     int numBytesPerDim = TestUtil.nextInt(random(), 2, 30);
-    int numDims = TestUtil.nextInt(random(), 1, 5);
+    int numDataDims = TestUtil.nextInt(random(), 1, 5);
+    int numIndexDims = TestUtil.nextInt(random(), 1, numDataDims);
 
     int numDocs = atLeast(1000);
-    int theEqualDim = random().nextInt(numDims);
+    int theEqualDim = random().nextInt(numDataDims);
     byte[][][] docValues = new byte[numDocs][][];
 
     for(int docID=0;docID<numDocs;docID++) {
-      byte[][] values = new byte[numDims][];
-      for(int dim=0;dim<numDims;dim++) {
+      byte[][] values = new byte[numDataDims][];
+      for(int dim=0;dim<numDataDims;dim++) {
         values[dim] = new byte[numBytesPerDim];
         random().nextBytes(values[dim]);
       }
@@ -509,17 +514,18 @@ public class TestBKD extends LuceneTestCase {
     }
 
     // Use a small number of points in leaf blocks to trigger a lot of splitting
-    verify(docValues, null, numDims, numBytesPerDim, TestUtil.nextInt(random(), 20, 50));
+    verify(docValues, null, numDataDims, numIndexDims, numBytesPerDim, TestUtil.nextInt(random(), 20, 50));
   }
 
   // This triggers the logic that makes sure all dimensions get indexed
   // by looking at how many times each dim has been split
   public void testOneDimLowCard() throws Exception {
     int numBytesPerDim = TestUtil.nextInt(random(), 2, 30);
-    int numDims = TestUtil.nextInt(random(), 2, 5);
+    int numDataDims = TestUtil.nextInt(random(), 2, 5);
+    int numIndexDims = TestUtil.nextInt(random(), 2, numDataDims);
 
     int numDocs = atLeast(10000);
-    int theLowCardDim = random().nextInt(numDims);
+    int theLowCardDim = random().nextInt(numDataDims);
 
     byte[] value1 = new byte[numBytesPerDim];
     random().nextBytes(value1);
@@ -533,8 +539,8 @@ public class TestBKD extends LuceneTestCase {
     byte[][][] docValues = new byte[numDocs][][];
 
     for(int docID=0;docID<numDocs;docID++) {
-      byte[][] values = new byte[numDims][];
-      for(int dim=0;dim<numDims;dim++) {
+      byte[][] values = new byte[numDataDims][];
+      for(int dim=0;dim<numDataDims;dim++) {
         if (dim == theLowCardDim) {
           values[dim] = random().nextBoolean() ? value1 : value2;
         } else {
@@ -546,16 +552,17 @@ public class TestBKD extends LuceneTestCase {
     }
 
     // Use a small number of points in leaf blocks to trigger a lot of splitting
-    verify(docValues, null, numDims, numBytesPerDim, TestUtil.nextInt(random(), 20, 50));
+    verify(docValues, null, numDataDims, numIndexDims, numBytesPerDim, TestUtil.nextInt(random(), 20, 50));
   }
 
   // this should trigger run-length compression with lengths that are greater than 255
   public void testOneDimTwoValues() throws Exception {
     int numBytesPerDim = TestUtil.nextInt(random(), 2, 30);
-    int numDims = TestUtil.nextInt(random(), 1, 5);
+    int numDataDims = TestUtil.nextInt(random(), 1, 5);
+    int numIndexDims = TestUtil.nextInt(random(), 1, numDataDims);
 
     int numDocs = atLeast(1000);
-    int theDim = random().nextInt(numDims);
+    int theDim = random().nextInt(numDataDims);
     byte[] value1 = new byte[numBytesPerDim];
     random().nextBytes(value1);
     byte[] value2 = new byte[numBytesPerDim];
@@ -563,8 +570,8 @@ public class TestBKD extends LuceneTestCase {
     byte[][][] docValues = new byte[numDocs][][];
 
     for(int docID=0;docID<numDocs;docID++) {
-      byte[][] values = new byte[numDims][];
-      for(int dim=0;dim<numDims;dim++) {
+      byte[][] values = new byte[numDataDims][];
+      for(int dim=0;dim<numDataDims;dim++) {
         if (dim == theDim) {
           values[dim] = random().nextBoolean() ? value1 : value2;
         } else {
@@ -575,12 +582,13 @@ public class TestBKD extends LuceneTestCase {
       docValues[docID] = values;
     }
 
-    verify(docValues, null, numDims, numBytesPerDim);
+    verify(docValues, null, numDataDims, numIndexDims, numBytesPerDim);
   }
 
   public void testMultiValued() throws Exception {
     int numBytesPerDim = TestUtil.nextInt(random(), 2, 30);
-    int numDims = TestUtil.nextInt(random(), 1, 5);
+    int numDataDims = TestUtil.nextInt(random(), 1, 5);
+    int numIndexDims = TestUtil.nextInt(random(), 1, numDataDims);
 
     int numDocs = atLeast(1000);
     List<byte[][]> docValues = new ArrayList<>();
@@ -590,8 +598,8 @@ public class TestBKD extends LuceneTestCase {
       int numValuesInDoc = TestUtil.nextInt(random(), 1, 5);
       for(int ord=0;ord<numValuesInDoc;ord++) {
         docIDs.add(docID);
-        byte[][] values = new byte[numDims][];
-        for(int dim=0;dim<numDims;dim++) {
+        byte[][] values = new byte[numDataDims][];
+        for(int dim=0;dim<numDataDims;dim++) {
           values[dim] = new byte[numBytesPerDim];
           random().nextBytes(values[dim]);
         }
@@ -605,33 +613,33 @@ public class TestBKD extends LuceneTestCase {
       docIDsArray[i] = docIDs.get(i);
     }
 
-    verify(docValuesArray, docIDsArray, numDims, numBytesPerDim);
+    verify(docValuesArray, docIDsArray, numDataDims, numIndexDims, numBytesPerDim);
   }
 
   /** docIDs can be null, for the single valued case, else it maps value to docID */
-  private void verify(byte[][][] docValues, int[] docIDs, int numDims, int numBytesPerDim) throws Exception {
-    verify(docValues, docIDs, numDims, numBytesPerDim, TestUtil.nextInt(random(), 50, 1000));
+  private void verify(byte[][][] docValues, int[] docIDs, int numDataDims, int numIndexDims, int numBytesPerDim) throws Exception {
+    verify(docValues, docIDs, numDataDims, numIndexDims, numBytesPerDim, TestUtil.nextInt(random(), 50, 1000));
   }
 
-  private void verify(byte[][][] docValues, int[] docIDs, int numDims, int numBytesPerDim,
+  private void verify(byte[][][] docValues, int[] docIDs, int numDataDims, int numIndexDims, int numBytesPerDim,
       int maxPointsInLeafNode) throws Exception {
     try (Directory dir = getDirectory(docValues.length)) {
       double maxMB = (float) 3.0 + (3*random().nextDouble());
-      verify(dir, docValues, docIDs, numDims, numBytesPerDim, maxPointsInLeafNode, maxMB);
+      verify(dir, docValues, docIDs, numDataDims, numIndexDims, numBytesPerDim, maxPointsInLeafNode, maxMB);
     }
   }
 
-  private void verify(Directory dir, byte[][][] docValues, int[] docIDs, int numDims, int numBytesPerDim, int maxPointsInLeafNode, double maxMB) throws Exception {
+  private void verify(Directory dir, byte[][][] docValues, int[] docIDs, int numDataDims, int numIndexDims, int numBytesPerDim, int maxPointsInLeafNode, double maxMB) throws Exception {
     int numValues = docValues.length;
     if (VERBOSE) {
-      System.out.println("TEST: numValues=" + numValues + " numDims=" + numDims + " numBytesPerDim=" + numBytesPerDim + " maxPointsInLeafNode=" + maxPointsInLeafNode + " maxMB=" + maxMB);
+      System.out.println("TEST: numValues=" + numValues + " numDataDims=" + numDataDims + " numIndexDims=" + numIndexDims + " numBytesPerDim=" + numBytesPerDim + " maxPointsInLeafNode=" + maxPointsInLeafNode + " maxMB=" + maxMB);
     }
 
     List<Long> toMerge = null;
     List<MergeState.DocMap> docMaps = null;
     int seg = 0;
 
-    BKDWriter w = new BKDWriter(numValues, dir, "_" + seg, numDims, numBytesPerDim, maxPointsInLeafNode, maxMB, docValues.length, false);
+    BKDWriter w = new BKDWriter(numValues, dir, "_" + seg, numDataDims, numIndexDims, numBytesPerDim, maxPointsInLeafNode, maxMB, docValues.length, false);
     IndexOutput out = dir.createOutput("bkd", IOContext.DEFAULT);
     IndexInput in = null;
 
@@ -639,9 +647,9 @@ public class TestBKD extends LuceneTestCase {
 
     try {
 
-      byte[] scratch = new byte[numBytesPerDim*numDims];
+      byte[] scratch = new byte[numBytesPerDim*numDataDims];
       int lastDocIDBase = 0;
-      boolean useMerge = numDims == 1 && numValues >= 10 && random().nextBoolean();
+      boolean useMerge = numDataDims == 1 && numValues >= 10 && random().nextBoolean();
       int valuesInThisSeg;
       if (useMerge) {
         // Sometimes we will call merge with a single segment:
@@ -662,7 +670,7 @@ public class TestBKD extends LuceneTestCase {
         if (VERBOSE) {
           System.out.println("  ord=" + ord + " docID=" + docID + " lastDocIDBase=" + lastDocIDBase);
         }
-        for(int dim=0;dim<numDims;dim++) {
+        for(int dim=0;dim<numDataDims;dim++) {
           if (VERBOSE) {
             System.out.println("    " + dim + " -> " + new BytesRef(docValues[ord][dim]));
           }
@@ -691,7 +699,7 @@ public class TestBKD extends LuceneTestCase {
           seg++;
           maxPointsInLeafNode = TestUtil.nextInt(random(), 50, 1000);
           maxMB = (float) 3.0 + (3*random().nextDouble());
-          w = new BKDWriter(numValues, dir, "_" + seg, numDims, numBytesPerDim, maxPointsInLeafNode, maxMB, docValues.length, false);
+          w = new BKDWriter(numValues, dir, "_" + seg, numDataDims, numIndexDims, numBytesPerDim, maxPointsInLeafNode, maxMB, docValues.length, false);
           lastDocIDBase = docID;
         }
       }
@@ -712,7 +720,7 @@ public class TestBKD extends LuceneTestCase {
         out.close();
         in = dir.openInput("bkd", IOContext.DEFAULT);
         seg++;
-        w = new BKDWriter(numValues, dir, "_" + seg, numDims, numBytesPerDim, maxPointsInLeafNode, maxMB, docValues.length, false);
+        w = new BKDWriter(numValues, dir, "_" + seg, numDataDims, numIndexDims, numBytesPerDim, maxPointsInLeafNode, maxMB, docValues.length, false);
         List<BKDReader> readers = new ArrayList<>();
         for(long fp : toMerge) {
           in.seek(fp);
@@ -739,9 +747,9 @@ public class TestBKD extends LuceneTestCase {
         }
 
         // Random N dims rect query:
-        byte[][] queryMin = new byte[numDims][];
-        byte[][] queryMax = new byte[numDims][];    
-        for(int dim=0;dim<numDims;dim++) {    
+        byte[][] queryMin = new byte[numDataDims][];
+        byte[][] queryMax = new byte[numDataDims][];
+        for(int dim=0;dim<numDataDims;dim++) {
           queryMin[dim] = new byte[numBytesPerDim];
           random().nextBytes(queryMin[dim]);
           queryMax[dim] = new byte[numBytesPerDim];
@@ -764,7 +772,7 @@ public class TestBKD extends LuceneTestCase {
             @Override
             public void visit(int docID, byte[] packedValue) {
               //System.out.println("visit check docID=" + docID);
-              for(int dim=0;dim<numDims;dim++) {
+              for(int dim=0;dim<numIndexDims;dim++) {
                 if (FutureArrays.compareUnsigned(packedValue, dim * numBytesPerDim, dim * numBytesPerDim + numBytesPerDim, queryMin[dim], 0, numBytesPerDim) < 0 ||
                     FutureArrays.compareUnsigned(packedValue, dim * numBytesPerDim, dim * numBytesPerDim + numBytesPerDim, queryMax[dim], 0, numBytesPerDim) > 0) {
                   //System.out.println("  no");
@@ -779,7 +787,7 @@ public class TestBKD extends LuceneTestCase {
             @Override
             public Relation compare(byte[] minPacked, byte[] maxPacked) {
               boolean crosses = false;
-              for(int dim=0;dim<numDims;dim++) {
+              for(int dim=0;dim<numIndexDims;dim++) {
                 if (FutureArrays.compareUnsigned(maxPacked, dim * numBytesPerDim, dim * numBytesPerDim + numBytesPerDim, queryMin[dim], 0, numBytesPerDim) < 0 ||
                     FutureArrays.compareUnsigned(minPacked, dim * numBytesPerDim, dim * numBytesPerDim + numBytesPerDim, queryMax[dim], 0, numBytesPerDim) > 0) {
                   return Relation.CELL_OUTSIDE_QUERY;
@@ -800,7 +808,7 @@ public class TestBKD extends LuceneTestCase {
         BitSet expected = new BitSet();
         for(int ord=0;ord<numValues;ord++) {
           boolean matches = true;
-          for(int dim=0;dim<numDims;dim++) {
+          for(int dim=0;dim<numIndexDims;dim++) {
             byte[] x = docValues[ord][dim];
             if (FutureArrays.compareUnsigned(x, 0, numBytesPerDim, queryMin[dim], 0, numBytesPerDim) < 0 ||
                 FutureArrays.compareUnsigned(x, 0, numBytesPerDim, queryMax[dim], 0, numBytesPerDim) > 0) {
@@ -897,7 +905,7 @@ public class TestBKD extends LuceneTestCase {
       };
 
       CorruptIndexException e = expectThrows(CorruptIndexException.class, () -> {
-          verify(dir, docValues, null, numDims, numBytesPerDim, 50, 0.1);
+          verify(dir, docValues, null, numDims, numDims, numBytesPerDim, 50, 0.1);
         });
       assertTrue(e.getMessage().contains("checksum failed (hardware problem?)"));
     }
@@ -945,7 +953,7 @@ public class TestBKD extends LuceneTestCase {
       };
 
       Throwable t = expectThrows(CorruptIndexException.class, () -> {
-          verify(dir, docValues, null, numDims, numBytesPerDim, 50, 0.1);
+          verify(dir, docValues, null, numDims, numDims, numBytesPerDim, 50, 0.1);
         });
       assertCorruptionDetected(t);
     }
@@ -971,7 +979,7 @@ public class TestBKD extends LuceneTestCase {
   public void testTieBreakOrder() throws Exception {
     try (Directory dir = newDirectory()) {
       int numDocs = 10000;
-      BKDWriter w = new BKDWriter(numDocs+1, dir, "tmp", 1, Integer.BYTES, 2, 0.01f, numDocs, true);
+      BKDWriter w = new BKDWriter(numDocs+1, dir, "tmp", 1, 1, Integer.BYTES, 2, 0.01f, numDocs, true);
       for(int i=0;i<numDocs;i++) {
         w.add(new byte[Integer.BYTES], i);
       }
@@ -1012,7 +1020,7 @@ public class TestBKD extends LuceneTestCase {
       boolean singleValuePerDoc = false;
       boolean longOrds = true;
       int offlineSorterMaxTempFiles = TestUtil.nextInt(random(), 2, 20);
-      BKDWriter w = new BKDWriter(numDocs+1, dir, "tmp", 2, Integer.BYTES, 2, 0.01f, numDocs,
+      BKDWriter w = new BKDWriter(numDocs+1, dir, "tmp", 2, 2, Integer.BYTES, 2, 0.01f, numDocs,
                                   singleValuePerDoc, longOrds, 1, offlineSorterMaxTempFiles);
       byte[] buffer = new byte[2*Integer.BYTES];
       for(int i=0;i<numDocs;i++) {
@@ -1058,12 +1066,13 @@ public class TestBKD extends LuceneTestCase {
   // values as a LongPoint:
   public void testWastedLeadingBytes() throws Exception {
     int numDims = TestUtil.nextInt(random(), 1, PointValues.MAX_DIMENSIONS);
+    int numIndexDims = TestUtil.nextInt(random(), 1, numDims);
     int bytesPerDim = PointValues.MAX_NUM_BYTES;
     int bytesUsed = TestUtil.nextInt(random(), 1, 3);
 
     Directory dir = newFSDirectory(createTempDir());
     int numDocs = 100000;
-    BKDWriter w = new BKDWriter(numDocs+1, dir, "tmp", numDims, bytesPerDim, 32, 1f, numDocs, true);
+    BKDWriter w = new BKDWriter(numDocs+1, dir, "tmp", numDims, numIndexDims, bytesPerDim, 32, 1f, numDocs, true);
     byte[] tmp = new byte[bytesUsed];
     byte[] buffer = new byte[numDims * bytesPerDim];
     for(int i=0;i<numDocs;i++) {
@@ -1091,11 +1100,14 @@ public class TestBKD extends LuceneTestCase {
 
         @Override
         public void visit(int docID, byte[] packedValue) {
+          assert packedValue.length == numDims * bytesPerDim;
           visit(docID);
         }
 
         @Override
         public Relation compare(byte[] minPacked, byte[] maxPacked) {
+          assert minPacked.length == numIndexDims * bytesPerDim;
+          assert maxPacked.length == numIndexDims * bytesPerDim;
           if (random().nextInt(7) == 1) {
             return Relation.CELL_CROSSES_QUERY;
           } else {
@@ -1117,7 +1129,7 @@ public class TestBKD extends LuceneTestCase {
     final byte[] uniquePointValue = new byte[numBytesPerDim];
     random().nextBytes(uniquePointValue);
 
-    BKDWriter w = new BKDWriter(numValues, dir, "_temp", 1, numBytesPerDim, maxPointsInLeafNode,
+    BKDWriter w = new BKDWriter(numValues, dir, "_temp", 1, 1, numBytesPerDim, maxPointsInLeafNode,
         BKDWriter.DEFAULT_MAX_MB_SORT_IN_HEAP, numValues, true);
     for (int i = 0; i < numValues; ++i) {
       if (i == numValues / 2) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/test/org/apache/lucene/util/bkd/TestMutablePointsReaderUtils.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/util/bkd/TestMutablePointsReaderUtils.java b/lucene/core/src/test/org/apache/lucene/util/bkd/TestMutablePointsReaderUtils.java
index f38ab5a..da75b12 100644
--- a/lucene/core/src/test/org/apache/lucene/util/bkd/TestMutablePointsReaderUtils.java
+++ b/lucene/core/src/test/org/apache/lucene/util/bkd/TestMutablePointsReaderUtils.java
@@ -232,7 +232,12 @@ public class TestMutablePointsReaderUtils extends LuceneTestCase {
     }
 
     @Override
-    public int getNumDimensions() throws IOException {
+    public int getNumDataDimensions() throws IOException {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int getNumIndexDimensions() throws IOException {
       throw new UnsupportedOperationException();
     }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java
----------------------------------------------------------------------
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java
index 1eef95f..42f7aec 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java
@@ -81,7 +81,7 @@ public class TermVectorLeafReader extends LeafReader {
     }
     FieldInfo fieldInfo = new FieldInfo(field, 0,
                                         true, true, terms.hasPayloads(),
-                                        indexOptions, DocValuesType.NONE, -1, Collections.emptyMap(), 0, 0, false);
+                                        indexOptions, DocValuesType.NONE, -1, Collections.emptyMap(), 0, 0, 0, false);
     fieldInfos = new FieldInfos(new FieldInfo[]{fieldInfo});
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/join/src/java/org/apache/lucene/search/join/PointInSetIncludingScoreQuery.java
----------------------------------------------------------------------
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/PointInSetIncludingScoreQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/PointInSetIncludingScoreQuery.java
index 02b7e86..80810e2 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/PointInSetIncludingScoreQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/PointInSetIncludingScoreQuery.java
@@ -146,8 +146,8 @@ abstract class PointInSetIncludingScoreQuery extends Query {
         if (fieldInfo == null) {
           return null;
         }
-        if (fieldInfo.getPointDimensionCount() != 1) {
-          throw new IllegalArgumentException("field=\"" + field + "\" was indexed with numDims=" + fieldInfo.getPointDimensionCount() + " but this query has numDims=1");
+        if (fieldInfo.getPointDataDimensionCount() != 1) {
+          throw new IllegalArgumentException("field=\"" + field + "\" was indexed with numDims=" + fieldInfo.getPointDataDimensionCount() + " but this query has numDims=1");
         }
         if (fieldInfo.getPointNumBytes() != bytesPerDim) {
           throw new IllegalArgumentException("field=\"" + field + "\" was indexed with bytesPerDim=" + fieldInfo.getPointNumBytes() + " but this query has bytesPerDim=" + bytesPerDim);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
----------------------------------------------------------------------
diff --git a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
index 42af398..0078c3f 100644
--- a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
+++ b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
@@ -410,7 +410,7 @@ public class MemoryIndex {
       storeDocValues(info, docValuesType, docValuesValue);
     }
 
-    if (field.fieldType().pointDimensionCount() > 0) {
+    if (field.fieldType().pointDataDimensionCount() > 0) {
       storePointValues(info, field.binaryValue());
     }
 
@@ -486,9 +486,9 @@ public class MemoryIndex {
     if (info == null) {
       fields.put(fieldName, info = new Info(createFieldInfo(fieldName, fields.size(), fieldType), byteBlockPool));
     }
-    if (fieldType.pointDimensionCount() != info.fieldInfo.getPointDimensionCount()) {
-      if (fieldType.pointDimensionCount() > 0)
-        info.fieldInfo.setPointDimensions(fieldType.pointDimensionCount(), fieldType.pointNumBytes());
+    if (fieldType.pointDataDimensionCount() != info.fieldInfo.getPointDataDimensionCount()) {
+      if (fieldType.pointDataDimensionCount() > 0)
+        info.fieldInfo.setPointDimensions(fieldType.pointDataDimensionCount(), fieldType.pointIndexDimensionCount(), fieldType.pointNumBytes());
     }
     if (fieldType.docValuesType() != info.fieldInfo.getDocValuesType()) {
       if (fieldType.docValuesType() != DocValuesType.NONE)
@@ -501,7 +501,7 @@ public class MemoryIndex {
     IndexOptions indexOptions = storeOffsets ? IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS : IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
     return new FieldInfo(fieldName, ord, fieldType.storeTermVectors(), fieldType.omitNorms(), storePayloads,
         indexOptions, fieldType.docValuesType(), -1, Collections.emptyMap(),
-        fieldType.pointDimensionCount(), fieldType.pointNumBytes(), false);
+        fieldType.pointDataDimensionCount(), fieldType.pointIndexDimensionCount(), fieldType.pointNumBytes(), false);
   }
 
   private void storePointValues(Info info, BytesRef pointValue) {
@@ -520,7 +520,8 @@ public class MemoryIndex {
       info.fieldInfo = new FieldInfo(
           info.fieldInfo.name, info.fieldInfo.number, info.fieldInfo.hasVectors(), info.fieldInfo.hasPayloads(),
           info.fieldInfo.hasPayloads(), info.fieldInfo.getIndexOptions(), docValuesType, -1, info.fieldInfo.attributes(),
-          info.fieldInfo.getPointDimensionCount(), info.fieldInfo.getPointNumBytes(), info.fieldInfo.isSoftDeletesField()
+          info.fieldInfo.getPointDataDimensionCount(), info.fieldInfo.getPointIndexDimensionCount(), info.fieldInfo.getPointNumBytes(),
+          info.fieldInfo.isSoftDeletesField()
       );
     } else if (existingDocValuesType != docValuesType) {
       throw new IllegalArgumentException("Can't add [" + docValuesType + "] doc values field [" + fieldName + "], because [" + existingDocValuesType + "] doc values field already exists");
@@ -870,7 +871,7 @@ public class MemoryIndex {
         if (pointValues != null) {
           assert pointValues[0].bytes.length == pointValues[0].length : "BytesRef should wrap a precise byte[], BytesRef.deepCopyOf() should take care of this";
 
-          final int numDimensions = fieldInfo.getPointDimensionCount();
+          final int numDimensions = fieldInfo.getPointDataDimensionCount();
           final int numBytesPerDimension = fieldInfo.getPointNumBytes();
           if (numDimensions == 1) {
             // PointInSetQuery.MergePointVisitor expects values to be visited in increasing order,
@@ -1576,8 +1577,13 @@ public class MemoryIndex {
       }
 
       @Override
-      public int getNumDimensions() throws IOException {
-        return info.fieldInfo.getPointDimensionCount();
+      public int getNumDataDimensions() throws IOException {
+        return info.fieldInfo.getPointDataDimensionCount();
+      }
+
+      @Override
+      public int getNumIndexDimensions() throws IOException {
+        return info.fieldInfo.getPointDataDimensionCount();
       }
 
       @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/sandbox/src/java/org/apache/lucene/document/BigIntegerPoint.java
----------------------------------------------------------------------
diff --git a/lucene/sandbox/src/java/org/apache/lucene/document/BigIntegerPoint.java b/lucene/sandbox/src/java/org/apache/lucene/document/BigIntegerPoint.java
index 4dc50d1..82eeb3f 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/document/BigIntegerPoint.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/document/BigIntegerPoint.java
@@ -62,8 +62,8 @@ public class BigIntegerPoint extends Field {
 
   /** Change the values of this field */
   public void setBigIntegerValues(BigInteger... point) {
-    if (type.pointDimensionCount() != point.length) {
-      throw new IllegalArgumentException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot change to (incoming) " + point.length + " dimensions");
+    if (type.pointDataDimensionCount() != point.length) {
+      throw new IllegalArgumentException("this field (name=" + name + ") uses " + type.pointDataDimensionCount() + " dimensions; cannot change to (incoming) " + point.length + " dimensions");
     }
     fieldsData = pack(point);
   }
@@ -75,8 +75,8 @@ public class BigIntegerPoint extends Field {
 
   @Override
   public Number numericValue() {
-    if (type.pointDimensionCount() != 1) {
-      throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot convert to a single numeric value");
+    if (type.pointDataDimensionCount() != 1) {
+      throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDataDimensionCount() + " dimensions; cannot convert to a single numeric value");
     }
     BytesRef bytes = (BytesRef) fieldsData;
     assert bytes.length == BYTES;
@@ -119,7 +119,7 @@ public class BigIntegerPoint extends Field {
     result.append(':');
 
     BytesRef bytes = (BytesRef) fieldsData;
-    for (int dim = 0; dim < type.pointDimensionCount(); dim++) {
+    for (int dim = 0; dim < type.pointDataDimensionCount(); dim++) {
       if (dim > 0) {
         result.append(',');
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/sandbox/src/java/org/apache/lucene/document/HalfFloatPoint.java
----------------------------------------------------------------------
diff --git a/lucene/sandbox/src/java/org/apache/lucene/document/HalfFloatPoint.java b/lucene/sandbox/src/java/org/apache/lucene/document/HalfFloatPoint.java
index 1f599a6..15d8240 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/document/HalfFloatPoint.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/document/HalfFloatPoint.java
@@ -213,8 +213,8 @@ public final class HalfFloatPoint extends Field {
 
   /** Change the values of this field */
   public void setFloatValues(float... point) {
-    if (type.pointDimensionCount() != point.length) {
-      throw new IllegalArgumentException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot change to (incoming) " + point.length + " dimensions");
+    if (type.pointDataDimensionCount() != point.length) {
+      throw new IllegalArgumentException("this field (name=" + name + ") uses " + type.pointDataDimensionCount() + " dimensions; cannot change to (incoming) " + point.length + " dimensions");
     }
     fieldsData = pack(point);
   }
@@ -226,8 +226,8 @@ public final class HalfFloatPoint extends Field {
 
   @Override
   public Number numericValue() {
-    if (type.pointDimensionCount() != 1) {
-      throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot convert to a single numeric value");
+    if (type.pointDataDimensionCount() != 1) {
+      throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDataDimensionCount() + " dimensions; cannot convert to a single numeric value");
     }
     BytesRef bytes = (BytesRef) fieldsData;
     assert bytes.length == BYTES;
@@ -270,7 +270,7 @@ public final class HalfFloatPoint extends Field {
     result.append(':');
 
     BytesRef bytes = (BytesRef) fieldsData;
-    for (int dim = 0; dim < type.pointDimensionCount(); dim++) {
+    for (int dim = 0; dim < type.pointDataDimensionCount(); dim++) {
       if (dim > 0) {
         result.append(',');
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java
index 5029d0e..2cdef86 100644
--- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java
+++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java
@@ -148,7 +148,7 @@ public class BBoxStrategy extends SpatialStrategy {
     if ((this.hasDocVals = fieldType.docValuesType() != DocValuesType.NONE)) {
       numQuads++;
     }
-    if ((this.hasPointVals = fieldType.pointDimensionCount() > 0)) {
+    if ((this.hasPointVals = fieldType.pointDataDimensionCount() > 0)) {
       numQuads++;
     }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java
index c7904df..f06770e 100644
--- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java
+++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java
@@ -139,7 +139,7 @@ public class PointVectorStrategy extends SpatialStrategy {
     if ((this.hasDocVals = fieldType.docValuesType() != DocValuesType.NONE)) {
       numPairs++;
     }
-    if ((this.hasPointVals = fieldType.pointDimensionCount() > 0)) {
+    if ((this.hasPointVals = fieldType.pointDataDimensionCount() > 0)) {
       numPairs++;
     }
     this.fieldsLen = numPairs * 2;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPointsFormat.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPointsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPointsFormat.java
index ff2e1b6..4943b99 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPointsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPointsFormat.java
@@ -131,8 +131,8 @@ public final class AssertingPointsFormat extends PointsFormat {
     
     @Override
     public void writeField(FieldInfo fieldInfo, PointsReader values) throws IOException {
-      if (fieldInfo.getPointDimensionCount() == 0) {
-        throw new IllegalArgumentException("writing field=\"" + fieldInfo.name + "\" but pointDimensionalCount is 0");
+      if (fieldInfo.getPointDataDimensionCount() == 0) {
+        throw new IllegalArgumentException("writing field=\"" + fieldInfo.name + "\" but pointDataDimensionalCount is 0");
       }
       in.writeField(fieldInfo, values);
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPointsFormat.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPointsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPointsFormat.java
index 486d81c..681edbd 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPointsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPointsFormat.java
@@ -155,11 +155,19 @@ class CrankyPointsFormat extends PointsFormat {
         }
 
         @Override
-        public int getNumDimensions() throws IOException {
+        public int getNumDataDimensions() throws IOException {
           if (random.nextInt(100) == 0) {
             throw new IOException("Fake IOException");
           }
-          return delegate.getNumDimensions();
+          return delegate.getNumDataDimensions();
+        }
+
+        @Override
+        public int getNumIndexDimensions() throws IOException {
+          if (random.nextInt(100) == 0) {
+            throw new IOException("Fake IOException");
+          }
+          return delegate.getNumIndexDimensions();
         }
 
         @Override


[34/50] [abbrv] lucene-solr:jira/http2: LUCENE-8522: throw InvalidShapeException when constructing a polygon and all points are coplanar.

Posted by da...@apache.org.
LUCENE-8522: throw InvalidShapeException when constructing a polygon
and all points are coplanar.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a5665d8a
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a5665d8a
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a5665d8a

Branch: refs/heads/jira/http2
Commit: a5665d8ae7fbc79b47c38c93588a347215c98405
Parents: 6c5df58
Author: iverase <iv...@apache.org>
Authored: Mon Oct 15 08:51:34 2018 +0200
Committer: iverase <iv...@apache.org>
Committed: Mon Oct 15 08:51:34 2018 +0200

----------------------------------------------------------------------
 .../spatial/spatial4j/Geo3dShapeFactory.java    |  3 +
 .../lucene/spatial/spatial4j/Geo3dRptTest.java  | 41 ----------
 .../lucene/spatial/spatial4j/Geo3dTest.java     | 85 ++++++++++++++++++++
 3 files changed, 88 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a5665d8a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dShapeFactory.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dShapeFactory.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dShapeFactory.java
index ccbd6df..071c775 100644
--- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dShapeFactory.java
+++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/spatial4j/Geo3dShapeFactory.java
@@ -331,6 +331,9 @@ public class Geo3dShapeFactory implements S2ShapeFactory {
     public Shape build() {
       GeoPolygonFactory.PolygonDescription description = new GeoPolygonFactory.PolygonDescription(points, polyHoles);
       GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(planetModel, description);
+      if (polygon == null) {
+        throw new InvalidShapeException("Invalid polygon, all points are coplanar");
+      }
       return new Geo3dShape<>(polygon, context);
     }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a5665d8a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/Geo3dRptTest.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/Geo3dRptTest.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/Geo3dRptTest.java
index eb6ed5b..6ec773b 100644
--- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/Geo3dRptTest.java
+++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/Geo3dRptTest.java
@@ -41,7 +41,6 @@ import org.apache.lucene.spatial3d.geom.GeoPolygonFactory;
 import org.apache.lucene.spatial3d.geom.PlanetModel;
 import org.apache.lucene.spatial3d.geom.RandomGeo3dShapeGenerator;
 import org.junit.Test;
-import org.locationtech.spatial4j.context.SpatialContext;
 import org.locationtech.spatial4j.shape.Rectangle;
 import org.locationtech.spatial4j.shape.Shape;
 
@@ -164,44 +163,4 @@ public class Geo3dRptTest extends RandomSpatialOpStrategyTestCase {
     }
     testOperation(SpatialOperation.Intersects, indexedShapes, queryShapes, random().nextBoolean());
   }
-
-  //TODO move to a new test class?
-  @Test
-  public void testWKT() throws Exception {
-    Geo3dSpatialContextFactory factory = new Geo3dSpatialContextFactory();
-    SpatialContext ctx = factory.newSpatialContext();
-    String wkt = "POLYGON ((20.0 -60.4, 20.1 -60.4, 20.1 -60.3, 20.0  -60.3,20.0 -60.4))";
-    Shape s = ctx.getFormats().getWktReader().read(wkt);
-    assertTrue(s instanceof  Geo3dShape<?>);
-    wkt = "POINT (30 10)";
-    s = ctx.getFormats().getWktReader().read(wkt);
-    assertTrue(s instanceof  Geo3dShape<?>);
-    wkt = "LINESTRING (30 10, 10 30, 40 40)";
-    s = ctx.getFormats().getWktReader().read(wkt);
-    assertTrue(s instanceof  Geo3dShape<?>);
-    wkt = "POLYGON ((35 10, 45 45, 15 40, 10 20, 35 10), (20 30, 35 35, 30 20, 20 30))";
-    s = ctx.getFormats().getWktReader().read(wkt);
-    assertTrue(s instanceof  Geo3dShape<?>);
-    wkt = "MULTIPOINT ((10 40), (40 30), (20 20), (30 10))";
-    s = ctx.getFormats().getWktReader().read(wkt);
-    assertTrue(s instanceof  Geo3dShape<?>);
-    wkt = "MULTILINESTRING ((10 10, 20 20, 10 40),(40 40, 30 30, 40 20, 30 10))";
-    s = ctx.getFormats().getWktReader().read(wkt);
-    assertTrue(s instanceof  Geo3dShape<?>);
-    wkt = "MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)), ((20 35, 10 30, 10 10, 30 5, 45 20, 20 35),(30 20, 20 15, 20 25, 30 20)))";
-    s = ctx.getFormats().getWktReader().read(wkt);
-    assertTrue(s instanceof  Geo3dShape<?>);
-    wkt = "GEOMETRYCOLLECTION(POINT(4 6),LINESTRING(4 6,7 10))";
-    s = ctx.getFormats().getWktReader().read(wkt);
-    assertTrue(s instanceof  Geo3dShape<?>);
-    wkt = "ENVELOPE(1, 2, 4, 3)";
-    s = ctx.getFormats().getWktReader().read(wkt);
-    assertTrue(s instanceof  Geo3dShape<?>);
-    wkt = "BUFFER(POINT(-10 30), 5.2)";
-    s = ctx.getFormats().getWktReader().read(wkt);
-    assertTrue(s instanceof  Geo3dShape<?>);
-    //wkt = "BUFFER(LINESTRING(1 2, 3 4), 0.5)";
-    //s = ctx.getFormats().getWktReader().read(wkt);
-    //assertTrue(s instanceof  Geo3dShape<?>);
-  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a5665d8a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/Geo3dTest.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/Geo3dTest.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/Geo3dTest.java
new file mode 100644
index 0000000..89cb973
--- /dev/null
+++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/Geo3dTest.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.spatial.spatial4j;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.Test;
+import org.locationtech.spatial4j.context.SpatialContext;
+import org.locationtech.spatial4j.exception.InvalidShapeException;
+import org.locationtech.spatial4j.shape.Shape;
+
+public class Geo3dTest extends LuceneTestCase {
+
+  @Test
+  public void testWKT() throws Exception {
+    Geo3dSpatialContextFactory factory = new Geo3dSpatialContextFactory();
+    SpatialContext ctx = factory.newSpatialContext();
+    String wkt = "POLYGON ((20.0 -60.4, 20.1 -60.4, 20.1 -60.3, 20.0  -60.3,20.0 -60.4))";
+    Shape s = ctx.getFormats().getWktReader().read(wkt);
+    assertTrue(s instanceof  Geo3dShape<?>);
+    wkt = "POINT (30 10)";
+    s = ctx.getFormats().getWktReader().read(wkt);
+    assertTrue(s instanceof  Geo3dShape<?>);
+    wkt = "LINESTRING (30 10, 10 30, 40 40)";
+    s = ctx.getFormats().getWktReader().read(wkt);
+    assertTrue(s instanceof  Geo3dShape<?>);
+    wkt = "POLYGON ((35 10, 45 45, 15 40, 10 20, 35 10), (20 30, 35 35, 30 20, 20 30))";
+    s = ctx.getFormats().getWktReader().read(wkt);
+    assertTrue(s instanceof  Geo3dShape<?>);
+    wkt = "MULTIPOINT ((10 40), (40 30), (20 20), (30 10))";
+    s = ctx.getFormats().getWktReader().read(wkt);
+    assertTrue(s instanceof  Geo3dShape<?>);
+    wkt = "MULTILINESTRING ((10 10, 20 20, 10 40),(40 40, 30 30, 40 20, 30 10))";
+    s = ctx.getFormats().getWktReader().read(wkt);
+    assertTrue(s instanceof  Geo3dShape<?>);
+    wkt = "MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)), ((20 35, 10 30, 10 10, 30 5, 45 20, 20 35),(30 20, 20 15, 20 25, 30 20)))";
+    s = ctx.getFormats().getWktReader().read(wkt);
+    assertTrue(s instanceof  Geo3dShape<?>);
+    wkt = "GEOMETRYCOLLECTION(POINT(4 6),LINESTRING(4 6,7 10))";
+    s = ctx.getFormats().getWktReader().read(wkt);
+    assertTrue(s instanceof  Geo3dShape<?>);
+    wkt = "ENVELOPE(1, 2, 4, 3)";
+    s = ctx.getFormats().getWktReader().read(wkt);
+    assertTrue(s instanceof  Geo3dShape<?>);
+    wkt = "BUFFER(POINT(-10 30), 5.2)";
+    s = ctx.getFormats().getWktReader().read(wkt);
+    assertTrue(s instanceof  Geo3dShape<?>);
+    //wkt = "BUFFER(LINESTRING(1 2, 3 4), 0.5)";
+    //s = ctx.getFormats().getWktReader().read(wkt);
+    //assertTrue(s instanceof  Geo3dShape<?>);
+  }
+
+  @Test
+  public void testPolygonWithCoplanarPoints() {
+    Geo3dSpatialContextFactory factory = new Geo3dSpatialContextFactory();
+    SpatialContext ctx = factory.newSpatialContext();
+
+    final String polygon = "POLYGON ((-180 90, -180 -90, 180 -90, 180 90,-180 -90))";
+    expectThrows(InvalidShapeException.class, () -> ctx.getFormats().getWktReader().read(polygon));
+
+    final String polygonWithHole = "POLYGON ((35 10, 45 45, 15 40, 10 20, 35 10), (20 30, 20 30, 20 30))";
+    expectThrows(InvalidShapeException.class, () -> ctx.getFormats().getWktReader().read(polygonWithHole));
+
+    final String geometryCollection = "GEOMETRYCOLLECTION(POINT(4 6), LINESTRING(4 6,7 10), POLYGON ((-180 90, -180 -90, 180 -90, 180 90,-180 -90)))";
+    expectThrows(InvalidShapeException.class, () -> ctx.getFormats().getWktReader().read(geometryCollection));
+
+    final String multiPolygon = "MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)), ((20 35, 10 30, 10 10, 30 5, 45 20, 20 35),(30 20, 20 15, 20 25, 30 20)), ((180 90, 90 90, 180 90)))";
+    expectThrows(InvalidShapeException.class, () -> ctx.getFormats().getWktReader().read(multiPolygon));
+
+  }
+}


[48/50] [abbrv] lucene-solr:jira/http2: SOLR-12877: avoid NPE in TestTlogReplica.testRealTimeGet

Posted by da...@apache.org.
SOLR-12877: avoid NPE in TestTlogReplica.testRealTimeGet


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/15002eba
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/15002eba
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/15002eba

Branch: refs/heads/jira/http2
Commit: 15002eba2f7b90d7323dc6a9277c61e75aea5004
Parents: 95af8d6
Author: Christine Poerschke <cp...@apache.org>
Authored: Tue Oct 16 12:52:51 2018 -0400
Committer: Christine Poerschke <cp...@apache.org>
Committed: Tue Oct 16 12:52:51 2018 -0400

----------------------------------------------------------------------
 solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/15002eba/solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java b/solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java
index 6888d88..8e66b1e 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java
@@ -342,6 +342,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
         client.add(new SolrInputDocument("id", String.valueOf(id), "foo_s", "bar"));
       }
       SolrDocument docCloudClient = cluster.getSolrClient().getById(collectionName, String.valueOf(id));
+      assertNotNull(docCloudClient);
       assertEquals("bar", docCloudClient.getFieldValue("foo_s"));
       for (Replica rGet:slice.getReplicas()) {
         try (HttpSolrClient client = getHttpSolrClient(rGet.getCoreUrl(), httpClient)) {


[11/50] [abbrv] lucene-solr:jira/http2: SOLR-11812: Remove backward compatibility of old LIR implementation in 8.0

Posted by da...@apache.org.
SOLR-11812: Remove backward compatibility of old LIR implementation in 8.0


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a37a2139
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a37a2139
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a37a2139

Branch: refs/heads/jira/http2
Commit: a37a21397564d747e0ac0f5292da1904b989a526
Parents: 184ed88
Author: Cao Manh Dat <da...@apache.org>
Authored: Tue Oct 9 16:20:49 2018 +0700
Committer: Cao Manh Dat <da...@apache.org>
Committed: Tue Oct 9 16:20:49 2018 +0700

----------------------------------------------------------------------
 solr/CHANGES.txt                                |   2 +
 .../org/apache/solr/cloud/ElectionContext.java  | 116 -----
 .../cloud/LeaderInitiatedRecoveryThread.java    | 366 --------------
 .../org/apache/solr/cloud/ZkController.java     | 354 +-------------
 .../solr/handler/admin/CollectionsHandler.java  |   9 -
 .../processor/DistributedUpdateProcessor.java   |  28 +-
 .../apache/solr/cloud/DeleteReplicaTest.java    |  13 -
 .../org/apache/solr/cloud/ForceLeaderTest.java  | 190 --------
 .../apache/solr/cloud/HttpPartitionTest.java    |  58 ---
 .../solr/cloud/LIRRollingUpdatesTest.java       | 473 -------------------
 .../TestLeaderInitiatedRecoveryThread.java      | 242 ----------
 .../org/apache/solr/cloud/ZkShardTermsTest.java |   3 +-
 12 files changed, 14 insertions(+), 1840 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a37a2139/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 49e425d..9ca6cf2 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -89,6 +89,8 @@ Other Changes
 
 * LUCENE-8513: SlowCompositeReaderWrapper now uses MultiTerms directly instead of MultiFields (David Smiley)
 
+* SOLR-11812: Remove backward compatibility of old LIR implementation in 8.0 (Cao Manh Dat)
+
 ==================  7.6.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a37a2139/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java b/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
index 8d7012a..a698f83 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
@@ -455,8 +455,6 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
       boolean isLeader = true;
       if (!isClosed) {
         try {
-          // we must check LIR before registering as leader
-          checkLIR(coreName, allReplicasInLine);
           if (replicaType == Replica.Type.TLOG) {
             // stop replicate from old leader
             zkController.stopReplicationFromLeader(coreName);
@@ -509,16 +507,6 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
             rejoinLeaderElection(core);
           }
         }
-
-        if (isLeader) {
-          // check for any replicas in my shard that were set to down by the previous leader
-          try {
-            startLeaderInitiatedRecoveryOnReplicas(coreName);
-          } catch (Exception exc) {
-            // don't want leader election to fail because of
-            // an error trying to tell others to recover
-          }
-        }
       } else {
         cancelElection();
       }
@@ -595,110 +583,6 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
     return docCollection.getReplica(replicaName);
   }
 
-  @Deprecated
-  public void checkLIR(String coreName, boolean allReplicasInLine)
-      throws InterruptedException, KeeperException, IOException {
-    if (allReplicasInLine) {
-      log.info("Found all replicas participating in election, clear LIR");
-      // SOLR-8075: A bug may allow the proper leader to get marked as LIR DOWN and
-      // if we are marked as DOWN but were able to become the leader, we remove
-      // the DOWN entry here so that we don't fail publishing ACTIVE due to being in LIR.
-      // We only do this if all the replicas participated in the election just in case
-      // this was a valid LIR entry and the proper leader replica is missing.
-      try (SolrCore core = cc.getCore(coreName)) {
-        final Replica.State lirState = zkController.getLeaderInitiatedRecoveryState(collection, shardId,
-            core.getCoreDescriptor().getCloudDescriptor().getCoreNodeName());
-        if (lirState == Replica.State.DOWN) {
-          // We can do this before registering as leader because only setting DOWN requires that
-          // we are already registered as leader, and here we are setting ACTIVE
-          // The fact that we just won the zk leader election provides a quasi lock on setting this state, but
-          // we should improve this: see SOLR-8075 discussion
-          zkController.updateLeaderInitiatedRecoveryState(collection, shardId,
-              leaderProps.getStr(ZkStateReader.CORE_NODE_NAME_PROP), Replica.State.ACTIVE, core.getCoreDescriptor(), true);
-        }
-      }
-
-    } else {
-      try (SolrCore core = cc.getCore(coreName)) {
-        if (core != null) {
-          final Replica.State lirState = zkController.getLeaderInitiatedRecoveryState(collection, shardId,
-              core.getCoreDescriptor().getCloudDescriptor().getCoreNodeName());
-          if (lirState == Replica.State.DOWN || lirState == Replica.State.RECOVERING) {
-            log.warn("The previous leader marked me " + core.getName()
-                + " as " + lirState.toString() + " and I haven't recovered yet, so I shouldn't be the leader.");
-
-            throw new SolrException(ErrorCode.SERVER_ERROR, "Leader Initiated Recovery prevented leadership");
-          }
-        }
-      }
-    }
-  }
-
-  @Deprecated
-  private void startLeaderInitiatedRecoveryOnReplicas(String coreName) throws Exception {
-    try (SolrCore core = cc.getCore(coreName)) {
-      CloudDescriptor cloudDesc = core.getCoreDescriptor().getCloudDescriptor();
-      String coll = cloudDesc.getCollectionName();
-      String shardId = cloudDesc.getShardId();
-      String coreNodeName = cloudDesc.getCoreNodeName();
-
-      if (coll == null || shardId == null) {
-        log.error("Cannot start leader-initiated recovery on new leader (core="+
-            coreName+",coreNodeName=" + coreNodeName + ") because collection and/or shard is null!");
-        return;
-      }
-
-      String znodePath = zkController.getLeaderInitiatedRecoveryZnodePath(coll, shardId);
-      List<String> replicas = null;
-      try {
-        replicas = zkClient.getChildren(znodePath, null, false);
-      } catch (NoNodeException nne) {
-        // this can be ignored
-      }
-
-      if (replicas != null && replicas.size() > 0) {
-        // set of replicas which is running in new LIR but lirState=DOWN
-        Set<String> replicasMustBeInLowerTerm = new HashSet<>();
-        for (String replicaCoreNodeName : replicas) {
-
-          if (coreNodeName.equals(replicaCoreNodeName))
-            continue; // added safe-guard so we don't mark this core as down
-
-          final Replica.State lirState = zkController.getLeaderInitiatedRecoveryState(coll, shardId, replicaCoreNodeName);
-          if (lirState == Replica.State.DOWN || lirState == Replica.State.RECOVERY_FAILED) {
-            log.info("After core={} coreNodeName={} was elected leader, a replica coreNodeName={} was found in state: "
-                + lirState.toString() + " and needing recovery.", coreName, coreNodeName, replicaCoreNodeName);
-            List<Replica> replicasProps =
-                zkController.getZkStateReader().getClusterState().getCollection(collection)
-                    .getSlice(shardId).getReplicas(EnumSet.of(Replica.Type.NRT, Replica.Type.TLOG));
-
-            if (replicasProps != null && replicasProps.size() > 0) {
-              ZkCoreNodeProps coreNodeProps = null;
-              for (Replica p : replicasProps) {
-                if (p.getName().equals(replicaCoreNodeName)) {
-                  coreNodeProps = new ZkCoreNodeProps(p);
-                  break;
-                }
-              }
-
-              if (zkController.getShardTerms(collection, shardId).registered(replicaCoreNodeName)) {
-                replicasMustBeInLowerTerm.add(replicaCoreNodeName);
-              } else {
-                zkController.ensureReplicaInLeaderInitiatedRecovery(cc,
-                    collection, shardId, coreNodeProps, core.getCoreDescriptor(),
-                    false /* forcePublishState */);
-              }
-            }
-          }
-        }
-        // these replicas registered their terms so it is running with the new LIR implementation
-        // we can put this replica into recovery by increase our terms
-        zkController.getShardTerms(collection, shardId).ensureTermsIsHigher(coreNodeName, replicasMustBeInLowerTerm);
-      }
-    } // core gets closed automagically
-  }
-
-
   // returns true if all replicas are found to be up, false if not
   private boolean waitForReplicasToComeUp(int timeoutms) throws InterruptedException {
     long timeoutAt = System.nanoTime() + TimeUnit.NANOSECONDS.convert(timeoutms, TimeUnit.MILLISECONDS);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a37a2139/solr/core/src/java/org/apache/solr/cloud/LeaderInitiatedRecoveryThread.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/LeaderInitiatedRecoveryThread.java b/solr/core/src/java/org/apache/solr/cloud/LeaderInitiatedRecoveryThread.java
deleted file mode 100644
index 071bfbf..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/LeaderInitiatedRecoveryThread.java
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import org.apache.http.NoHttpResponseException;
-import org.apache.http.conn.ConnectTimeoutException;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CoreAdminRequest.RequestRecovery;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.CoreDescriptor;
-import org.apache.zookeeper.KeeperException;
-import org.apache.solr.util.RTimer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.lang.invoke.MethodHandles;
-import java.net.ConnectException;
-import java.net.SocketException;
-import java.net.UnknownHostException;
-import java.util.List;
-
-/**
- * Background daemon thread that tries to send the REQUESTRECOVERY to a downed
- * replica; used by a shard leader to nag a replica into recovering after the
- * leader experiences an error trying to send an update request to the replica.
- */
-@Deprecated
-public class LeaderInitiatedRecoveryThread extends Thread {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  protected ZkController zkController;
-  protected CoreContainer coreContainer;
-  protected String collection;
-  protected String shardId;
-  protected ZkCoreNodeProps nodeProps;
-  protected int maxTries;
-  private CoreDescriptor leaderCd;
-  
-  public LeaderInitiatedRecoveryThread(ZkController zkController, 
-                                       CoreContainer cc, 
-                                       String collection, 
-                                       String shardId, 
-                                       ZkCoreNodeProps nodeProps,
-                                       int maxTries,
-                                       CoreDescriptor leaderCd)
-  {
-    super("LeaderInitiatedRecoveryThread-"+nodeProps.getCoreName());
-    this.zkController = zkController;
-    this.coreContainer = cc;
-    this.collection = collection;
-    this.shardId = shardId;    
-    this.nodeProps = nodeProps;
-    this.maxTries = maxTries;
-    this.leaderCd = leaderCd;
-    setDaemon(true);
-  }
-  
-  public void run() {
-    RTimer timer = new RTimer();
-
-    String replicaCoreName = nodeProps.getCoreName();
-    String replicaCoreNodeName = ((Replica) nodeProps.getNodeProps()).getName();
-    String replicaNodeName = nodeProps.getNodeName();
-    final String replicaUrl = nodeProps.getCoreUrl();
-
-    if (!zkController.isReplicaInRecoveryHandling(replicaUrl)) {
-      throw new SolrException(ErrorCode.INVALID_STATE, "Replica: " + replicaUrl + " should have been marked under leader initiated recovery in ZkController but wasn't.");
-    }
-    if (!CloudUtil.replicaExists(zkController.getClusterState(), collection, shardId, replicaCoreNodeName)) {
-      log.info("Replica does not exist, skip doing LIR");
-    }
-    boolean sendRecoveryCommand = publishDownState(replicaCoreName, replicaCoreNodeName, replicaNodeName, replicaUrl, false);
-
-    if (sendRecoveryCommand)  {
-      try {
-        sendRecoveryCommandWithRetry();
-      } catch (Exception exc) {
-        log.error(getName()+" failed due to: "+exc, exc);
-        if (exc instanceof SolrException) {
-          throw (SolrException)exc;
-        } else {
-          throw new SolrException(ErrorCode.SERVER_ERROR, exc);
-        }
-      } finally {
-        zkController.removeReplicaFromLeaderInitiatedRecoveryHandling(replicaUrl);
-      }
-    } else  {
-      // replica is no longer in recovery on this node (may be handled on another node)
-      zkController.removeReplicaFromLeaderInitiatedRecoveryHandling(replicaUrl);
-    }
-    log.info("{} completed successfully after running for {}ms", getName(), timer.getTime());
-  }
-
-  public boolean publishDownState(String replicaCoreName, String replicaCoreNodeName, String replicaNodeName, String replicaUrl, boolean forcePublishState) {
-    boolean sendRecoveryCommand = true;
-    boolean publishDownState = false;
-
-    if (zkController.getZkStateReader().getClusterState().liveNodesContain(replicaNodeName)) {
-      try {
-        // create a znode that requires the replica needs to "ack" to verify it knows it was out-of-sync
-        updateLIRState(replicaCoreNodeName);
-
-        log.info("Put replica core={} coreNodeName={} on " +
-            replicaNodeName + " into leader-initiated recovery.", replicaCoreName, replicaCoreNodeName);
-        publishDownState = true;
-      } catch (Exception e) {
-        Throwable setLirZnodeFailedCause = SolrException.getRootCause(e);
-        log.error("Leader failed to set replica " +
-            nodeProps.getCoreUrl() + " state to DOWN due to: " + setLirZnodeFailedCause, setLirZnodeFailedCause);
-        if (setLirZnodeFailedCause instanceof KeeperException.SessionExpiredException
-            || setLirZnodeFailedCause instanceof KeeperException.ConnectionLossException
-            || setLirZnodeFailedCause instanceof ZkController.NotLeaderException) {
-          // our session is expired, which means our state is suspect, so don't go
-          // putting other replicas in recovery (see SOLR-6511)
-          sendRecoveryCommand = false;
-          forcePublishState = false; // no need to force publish any state in this case
-        } // else will go ahead and try to send the recovery command once after this error
-      }
-    } else  {
-      log.info("Node " + replicaNodeName +
-              " is not live, so skipping leader-initiated recovery for replica: core={} coreNodeName={}",
-          replicaCoreName, replicaCoreNodeName);
-      // publishDownState will be false to avoid publishing the "down" state too many times
-      // as many errors can occur together and will each call into this method (SOLR-6189)
-      forcePublishState = false; // no need to force publish the state because replica is not live
-      sendRecoveryCommand = false; // no need to send recovery messages as well
-    }
-
-    try {
-      if (publishDownState || forcePublishState) {
-        ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, "state",
-            ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
-            ZkStateReader.BASE_URL_PROP, nodeProps.getBaseUrl(),
-            ZkStateReader.CORE_NAME_PROP, nodeProps.getCoreName(),
-            ZkStateReader.CORE_NODE_NAME_PROP, replicaCoreNodeName,
-            ZkStateReader.NODE_NAME_PROP, nodeProps.getNodeName(),
-            ZkStateReader.SHARD_ID_PROP, shardId,
-            ZkStateReader.COLLECTION_PROP, collection,
-            ZkStateReader.FORCE_SET_STATE_PROP, "false");
-        log.warn("Leader is publishing core={} coreNodeName ={} state={} on behalf of un-reachable replica {}",
-            replicaCoreName, replicaCoreNodeName, Replica.State.DOWN.toString(), replicaUrl);
-        zkController.getOverseerJobQueue().offer(Utils.toJSON(m));
-      }
-    } catch (Exception e) {
-      log.error("Could not publish 'down' state for replicaUrl: {}", replicaUrl, e);
-    }
-
-    return sendRecoveryCommand;
-  }
-
-  private void removeLIRState(String replicaCoreNodeName) {
-    zkController.updateLeaderInitiatedRecoveryState(collection,
-        shardId,
-        replicaCoreNodeName, Replica.State.ACTIVE, leaderCd, true);
-  }
-
-  /*
-  protected scope for testing purposes
-   */
-  protected void updateLIRState(String replicaCoreNodeName) {
-    zkController.updateLeaderInitiatedRecoveryState(collection,
-        shardId,
-        replicaCoreNodeName, Replica.State.DOWN, leaderCd, true);
-  }
-
-  protected void sendRecoveryCommandWithRetry() throws Exception {    
-    int tries = 0;
-    long waitBetweenTriesMs = 5000L;
-    boolean continueTrying = true;
-
-    String replicaCoreName = nodeProps.getCoreName();
-    String recoveryUrl = nodeProps.getBaseUrl();
-    String replicaNodeName = nodeProps.getNodeName();
-    String coreNeedingRecovery = nodeProps.getCoreName();
-    String replicaCoreNodeName = ((Replica) nodeProps.getNodeProps()).getName();
-    String replicaUrl = nodeProps.getCoreUrl();
-    
-    log.info(getName()+" started running to send REQUESTRECOVERY command to "+replicaUrl+
-        "; will try for a max of "+(maxTries * (waitBetweenTriesMs/1000))+" secs");
-
-    RequestRecovery recoverRequestCmd = new RequestRecovery();
-    recoverRequestCmd.setAction(CoreAdminAction.REQUESTRECOVERY);
-    recoverRequestCmd.setCoreName(coreNeedingRecovery);
-    
-    while (continueTrying && ++tries <= maxTries) {
-      if (tries > 1) {
-        log.warn("Asking core={} coreNodeName={} on " + recoveryUrl +
-            " to recover; unsuccessful after "+tries+" of "+maxTries+" attempts so far ...", coreNeedingRecovery, replicaCoreNodeName);
-      } else {
-        log.info("Asking core={} coreNodeName={} on " + recoveryUrl + " to recover", coreNeedingRecovery, replicaCoreNodeName);
-      }
-
-      try (HttpSolrClient client = new HttpSolrClient.Builder(recoveryUrl)
-          .withConnectionTimeout(15000)
-          .withSocketTimeout(60000)
-          .build()) {
-        try {
-          client.request(recoverRequestCmd);
-          
-          log.info("Successfully sent " + CoreAdminAction.REQUESTRECOVERY +
-              " command to core={} coreNodeName={} on " + recoveryUrl, coreNeedingRecovery, replicaCoreNodeName);
-          
-          continueTrying = false; // succeeded, so stop looping
-        } catch (Exception t) {
-          Throwable rootCause = SolrException.getRootCause(t);
-          boolean wasCommError =
-              (rootCause instanceof ConnectException ||
-                  rootCause instanceof ConnectTimeoutException ||
-                  rootCause instanceof NoHttpResponseException ||
-                  rootCause instanceof SocketException ||
-                  rootCause instanceof UnknownHostException);
-
-          if (!wasCommError) {
-            continueTrying = false;
-          }
-
-          if (rootCause.getMessage().contains("Unable to locate core")) {
-            log.info("Replica {} is removed, hence remove its lir state", replicaCoreNodeName);
-            removeLIRState(replicaCoreNodeName);
-            break;
-          } else {
-            SolrException.log(log, recoveryUrl + ": Could not tell a replica to recover, wasCommError:"+wasCommError, t);
-          }
-        }
-      }
-      
-      // wait a few seconds
-      if (continueTrying) {
-        try {
-          Thread.sleep(waitBetweenTriesMs);
-        } catch (InterruptedException ignoreMe) {
-          Thread.currentThread().interrupt();          
-        }
-        
-        if (coreContainer.isShutDown()) {
-          log.warn("Stop trying to send recovery command to downed replica core={} coreNodeName={} on "
-              + replicaNodeName + " because my core container is closed.", coreNeedingRecovery, replicaCoreNodeName);
-          continueTrying = false;
-          break;
-        }
-        
-        // see if the replica's node is still live, if not, no need to keep doing this loop
-        ZkStateReader zkStateReader = zkController.getZkStateReader();
-        if (!zkStateReader.getClusterState().liveNodesContain(replicaNodeName)) {
-          log.warn("Node "+replicaNodeName+" hosting core "+coreNeedingRecovery+
-              " is no longer live. No need to keep trying to tell it to recover!");
-          continueTrying = false;
-          break;
-        }
-
-        String leaderCoreNodeName = leaderCd.getCloudDescriptor().getCoreNodeName();
-        // stop trying if I'm no longer the leader
-        if (leaderCoreNodeName != null && collection != null) {
-          String leaderCoreNodeNameFromZk = null;
-          try {
-            leaderCoreNodeNameFromZk = zkController.getZkStateReader().getLeaderRetry(collection, shardId, 1000).getName();
-          } catch (Exception exc) {
-            log.error("Failed to determine if " + leaderCoreNodeName + " is still the leader for " + collection +
-                " " + shardId + " before starting leader-initiated recovery thread for " + replicaUrl + " due to: " + exc);
-          }
-          if (!leaderCoreNodeName.equals(leaderCoreNodeNameFromZk)) {
-            log.warn("Stop trying to send recovery command to downed replica core=" + coreNeedingRecovery +
-                ",coreNodeName=" + replicaCoreNodeName + " on " + replicaNodeName + " because " +
-                leaderCoreNodeName + " is no longer the leader! New leader is " + leaderCoreNodeNameFromZk);
-            continueTrying = false;
-            break;
-          }
-          if (!leaderCd.getCloudDescriptor().isLeader()) {
-            log.warn("Stop trying to send recovery command to downed replica core=" + coreNeedingRecovery +
-                ",coreNodeName=" + replicaCoreNodeName + " on " + replicaNodeName + " because " +
-                leaderCoreNodeName + " is no longer the leader!");
-            continueTrying = false;
-            break;
-          }
-        }
-
-        // additional safeguard against the replica trying to be in the active state
-        // before acknowledging the leader initiated recovery command
-        if (collection != null && shardId != null) {
-          try {
-            // call out to ZooKeeper to get the leader-initiated recovery state
-            final Replica.State lirState = zkController.getLeaderInitiatedRecoveryState(collection, shardId, replicaCoreNodeName);
-            
-            if (lirState == null) {
-              log.warn("Stop trying to send recovery command to downed replica core="+coreNeedingRecovery+
-                  ",coreNodeName=" + replicaCoreNodeName + " on "+replicaNodeName+" because the znode no longer exists.");
-              continueTrying = false;
-              break;              
-            }
-            
-            if (lirState == Replica.State.RECOVERING) {
-              // replica has ack'd leader initiated recovery and entered the recovering state
-              // so we don't need to keep looping to send the command
-              continueTrying = false;  
-              log.info("Replica "+coreNeedingRecovery+
-                  " on node "+replicaNodeName+" ack'd the leader initiated recovery state, "
-                      + "no need to keep trying to send recovery command");
-            } else {
-              String lcnn = zkStateReader.getLeaderRetry(collection, shardId, 5000).getName();
-              List<ZkCoreNodeProps> replicaProps = 
-                  zkStateReader.getReplicaProps(collection, shardId, lcnn);
-              if (replicaProps != null && replicaProps.size() > 0) {
-                for (ZkCoreNodeProps prop : replicaProps) {
-                  final Replica replica = (Replica) prop.getNodeProps();
-                  if (replicaCoreNodeName.equals(replica.getName())) {
-                    if (replica.getState() == Replica.State.ACTIVE) {
-                      // replica published its state as "active",
-                      // which is bad if lirState is still "down"
-                      if (lirState == Replica.State.DOWN) {
-                        // OK, so the replica thinks it is active, but it never ack'd the leader initiated recovery
-                        // so its state cannot be trusted and it needs to be told to recover again ... and we keep looping here
-                        log.warn("Replica core={} coreNodeName={} set to active but the leader thinks it should be in recovery;"
-                            + " forcing it back to down state to re-run the leader-initiated recovery process; props: " + replicaProps.get(0), coreNeedingRecovery, replicaCoreNodeName);
-                        publishDownState(replicaCoreName, replicaCoreNodeName, replicaNodeName, replicaUrl, true);
-                      }
-                    }
-                    break;
-                  }
-                }
-              }
-            }                  
-          } catch (Exception ignoreMe) {
-            log.warn("Failed to determine state of core={} coreNodeName={} due to: "+ignoreMe, coreNeedingRecovery, replicaCoreNodeName);
-            // eventually this loop will exhaust max tries and stop so we can just log this for now
-          }                
-        }
-      }
-    }
-    
-    // replica is no longer in recovery on this node (may be handled on another node)
-    zkController.removeReplicaFromLeaderInitiatedRecoveryHandling(replicaUrl);
-    
-    if (continueTrying) {
-      // ugh! this means the loop timed out before the recovery command could be delivered
-      // how exotic do we want to get here?
-      log.error("Timed out after waiting for "+(tries * (waitBetweenTriesMs/1000))+
-          " secs to send the recovery request to: "+replicaUrl+"; not much more we can do here?");
-      
-      // TODO: need to raise a JMX event to allow monitoring tools to take over from here
-      
-    }    
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a37a2139/solr/core/src/java/org/apache/solr/cloud/ZkController.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkController.java b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
index d5de5dd..be5a3f4 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ZkController.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
@@ -1115,9 +1115,7 @@ public class ZkController {
 
       ZkShardTerms shardTerms = getShardTerms(collection, cloudDesc.getShardId());
 
-      // This flag is used for testing rolling updates and should be removed in SOLR-11812
-      boolean isRunningInNewLIR = "new".equals(desc.getCoreProperty("lirVersion", "new"));
-      if (isRunningInNewLIR && replica.getType() != Type.PULL) {
+      if (replica.getType() != Type.PULL) {
         shardTerms.registerTerm(coreZkNodeName);
       }
 
@@ -1196,7 +1194,7 @@ public class ZkController {
           publish(desc, Replica.State.ACTIVE);
         }
 
-        if (isRunningInNewLIR && replica.getType() != Type.PULL) {
+        if (replica.getType() != Type.PULL) {
           // the watcher is added to a set so multiple calls of this method will left only one watcher
           shardTerms.addListener(new RecoveringCoreTermWatcher(core.getCoreDescriptor(), getCoreContainer()));
         }
@@ -1406,15 +1404,6 @@ public class ZkController {
         return true;
       }
 
-      // see if the leader told us to recover
-      final Replica.State lirState = getLeaderInitiatedRecoveryState(collection, shardId,
-          core.getCoreDescriptor().getCloudDescriptor().getCoreNodeName());
-      if (lirState == Replica.State.DOWN) {
-        log.info("Leader marked core " + core.getName() + " down; starting recovery process");
-        core.getUpdateHandler().getSolrCoreState().doRecovery(cc, core.getCoreDescriptor());
-        return true;
-      }
-
       ZkShardTerms zkShardTerms = getShardTerms(collection, shardId);
       if (zkShardTerms.registered(coreZkNodeName) && !zkShardTerms.canBecomeLeader(coreZkNodeName)) {
         log.info("Leader's term larger than core " + core.getName() + "; starting recovery process");
@@ -1468,29 +1457,6 @@ public class ZkController {
       
       String coreNodeName = cd.getCloudDescriptor().getCoreNodeName();
 
-      // If the leader initiated recovery, then verify that this replica has performed
-      // recovery as requested before becoming active; don't even look at lirState if going down
-      if (state != Replica.State.DOWN) {
-        final Replica.State lirState = getLeaderInitiatedRecoveryState(collection, shardId, coreNodeName);
-        if (lirState != null) {
-          assert cd.getCloudDescriptor().getReplicaType() != Replica.Type.PULL: "LIR should not happen for pull replicas!";
-          if (state == Replica.State.ACTIVE) {
-            // trying to become active, so leader-initiated state must be recovering
-            if (lirState == Replica.State.RECOVERING) {
-              updateLeaderInitiatedRecoveryState(collection, shardId, coreNodeName, Replica.State.ACTIVE, cd, true);
-            } else if (lirState == Replica.State.DOWN) {
-              throw new SolrException(ErrorCode.INVALID_STATE,
-                  "Cannot publish state of core '" + cd.getName() + "' as active without recovering first!");
-            }
-          } else if (state == Replica.State.RECOVERING) {
-            // if it is currently DOWN, then trying to enter into recovering state is good
-            if (lirState == Replica.State.DOWN) {
-              updateLeaderInitiatedRecoveryState(collection, shardId, coreNodeName, Replica.State.RECOVERING, cd, true);
-            }
-          }
-        }
-      }
-
       Map<String,Object> props = new HashMap<>();
       props.put(Overseer.QUEUE_OPERATION, "state");
       props.put(ZkStateReader.STATE_PROP, state.toString());
@@ -1529,15 +1495,13 @@ public class ZkController {
         log.info("The core '{}' had failed to initialize before.", cd.getName());
       }
 
-      // This flag is used for testing rolling updates and should be removed in SOLR-11812
-      boolean isRunningInNewLIR = "new".equals(cd.getCoreProperty("lirVersion", "new"));
       // pull replicas are excluded because their terms are not considered
-      if (state == Replica.State.RECOVERING && isRunningInNewLIR && cd.getCloudDescriptor().getReplicaType() != Type.PULL) {
+      if (state == Replica.State.RECOVERING && cd.getCloudDescriptor().getReplicaType() != Type.PULL) {
         // state is used by client, state of replica can change from RECOVERING to DOWN without needed to finish recovery
         // by calling this we will know that a replica actually finished recovery or not
         getShardTerms(collection, shardId).startRecovering(coreNodeName);
       }
-      if (state == Replica.State.ACTIVE && isRunningInNewLIR && cd.getCloudDescriptor().getReplicaType() != Type.PULL) {
+      if (state == Replica.State.ACTIVE && cd.getCloudDescriptor().getReplicaType() != Type.PULL) {
         getShardTerms(collection, shardId).doneRecovering(coreNodeName);
       }
 
@@ -1857,24 +1821,12 @@ public class ZkController {
 
     boolean isLeader = leaderProps.getCoreUrl().equals(ourUrl);
     if (!isLeader && !SKIP_AUTO_RECOVERY) {
-
-      // detect if this core is in leader-initiated recovery and if so,
-      // then we don't need the leader to wait on seeing the down state
-      Replica.State lirState = null;
-      try {
-        lirState = getLeaderInitiatedRecoveryState(collection, shard, myCoreNodeName);
-      } catch (Exception exc) {
-        log.error("Failed to determine if replica " + myCoreNodeName +
-            " is in leader-initiated recovery due to: " + exc, exc);
-      }
-
-      if (lirState != null || !getShardTerms(collection, shard).canBecomeLeader(myCoreNodeName)) {
+      if (!getShardTerms(collection, shard).canBecomeLeader(myCoreNodeName)) {
         log.debug("Term of replica " + myCoreNodeName +
             " is already less than leader, so not waiting for leader to see down state.");
       } else {
 
-        log.info("Replica " + myCoreNodeName +
-            " NOT in leader-initiated recovery, need to wait for leader to see down state.");
+        log.info("Replica need to wait for leader to see down state.");
 
         try (HttpSolrClient client = new Builder(leaderBaseUrl)
             .withConnectionTimeout(15000)
@@ -2211,291 +2163,6 @@ public class ZkController {
     return cc;
   }
 
-  /**
-   * When a leader receives a communication error when trying to send a request to a replica,
-   * it calls this method to ensure the replica enters recovery when connectivity is restored.
-   * <p>
-   * returns true if the node hosting the replica is still considered "live" by ZooKeeper;
-   * false means the node is not live either, so no point in trying to send recovery commands
-   * to it.
-   */
-  @Deprecated
-  public boolean ensureReplicaInLeaderInitiatedRecovery(
-      final CoreContainer container,
-      final String collection, final String shardId, final ZkCoreNodeProps replicaCoreProps,
-      CoreDescriptor leaderCd, boolean forcePublishState)
-      throws KeeperException, InterruptedException {
-    final String replicaUrl = replicaCoreProps.getCoreUrl();
-
-    if (collection == null)
-      throw new IllegalArgumentException("collection parameter cannot be null for starting leader-initiated recovery for replica: " + replicaUrl);
-
-    if (shardId == null)
-      throw new IllegalArgumentException("shard parameter cannot be null for starting leader-initiated recovery for replica: " + replicaUrl);
-
-    if (replicaUrl == null)
-      throw new IllegalArgumentException("replicaUrl parameter cannot be null for starting leader-initiated recovery");
-
-    // First, determine if this replica is already in recovery handling
-    // which is needed because there can be many concurrent errors flooding in
-    // about the same replica having trouble and we only need to send the "needs"
-    // recovery signal once
-    boolean nodeIsLive = true;
-    String replicaNodeName = replicaCoreProps.getNodeName();
-    String replicaCoreNodeName = ((Replica) replicaCoreProps.getNodeProps()).getName();
-    assert replicaCoreNodeName != null : "No core name for replica " + replicaNodeName;
-    synchronized (replicasInLeaderInitiatedRecovery) {
-      if (replicasInLeaderInitiatedRecovery.containsKey(replicaUrl)) {
-        if (!forcePublishState) {
-          log.debug("Replica {} already in leader-initiated recovery handling.", replicaUrl);
-          return false; // already in this recovery process
-        }
-      }
-
-      // we only really need to try to start the LIR process if the node itself is "live"
-      if (getZkStateReader().getClusterState().liveNodesContain(replicaNodeName)
-          && CloudUtil.replicaExists(getZkStateReader().getClusterState(), collection, shardId, replicaCoreNodeName)) {
-
-        LeaderInitiatedRecoveryThread lirThread =
-            new LeaderInitiatedRecoveryThread(this,
-                container,
-                collection,
-                shardId,
-                replicaCoreProps,
-                120,
-                leaderCd);
-        ExecutorService executor = container.getUpdateShardHandler().getUpdateExecutor();
-        try {
-          MDC.put("DistributedUpdateProcessor.replicaUrlToRecover", replicaCoreProps.getCoreUrl());
-          executor.execute(lirThread);
-        } finally {
-          MDC.remove("DistributedUpdateProcessor.replicaUrlToRecover");
-        }
-
-        // create a znode that requires the replica needs to "ack" to verify it knows it was out-of-sync
-        replicasInLeaderInitiatedRecovery.put(replicaUrl,
-            getLeaderInitiatedRecoveryZnodePath(collection, shardId, replicaCoreNodeName));
-        log.info("Put replica core={} coreNodeName={} on " +
-            replicaNodeName + " into leader-initiated recovery.", replicaCoreProps.getCoreName(), replicaCoreNodeName);
-      } else {
-        nodeIsLive = false; // we really don't need to send the recovery request if the node is NOT live
-        log.info("Node {} is not live or replica {} is deleted, so skipping leader-initiated recovery for replica: core={}",
-            replicaNodeName, replicaCoreNodeName, replicaCoreProps.getCoreName());
-        // publishDownState will be false to avoid publishing the "down" state too many times
-        // as many errors can occur together and will each call into this method (SOLR-6189)
-      }
-    }
-
-    return nodeIsLive;
-  }
-
-  @Deprecated
-  public boolean isReplicaInRecoveryHandling(String replicaUrl) {
-    boolean exists = false;
-    synchronized (replicasInLeaderInitiatedRecovery) {
-      exists = replicasInLeaderInitiatedRecovery.containsKey(replicaUrl);
-    }
-    return exists;
-  }
-
-  @Deprecated
-  public void removeReplicaFromLeaderInitiatedRecoveryHandling(String replicaUrl) {
-    synchronized (replicasInLeaderInitiatedRecovery) {
-      replicasInLeaderInitiatedRecovery.remove(replicaUrl);
-    }
-  }
-
-  @Deprecated
-  public Replica.State getLeaderInitiatedRecoveryState(String collection, String shardId, String coreNodeName) {
-    final Map<String, Object> stateObj = getLeaderInitiatedRecoveryStateObject(collection, shardId, coreNodeName);
-    if (stateObj == null) {
-      return null;
-    }
-    final String stateStr = (String) stateObj.get(ZkStateReader.STATE_PROP);
-    return stateStr == null ? null : Replica.State.getState(stateStr);
-  }
-
-  @Deprecated
-  public Map<String, Object> getLeaderInitiatedRecoveryStateObject(String collection, String shardId, String coreNodeName) {
-
-    if (collection == null || shardId == null || coreNodeName == null)
-      return null; // if we don't have complete data about a core in cloud mode, return null
-
-    String znodePath = getLeaderInitiatedRecoveryZnodePath(collection, shardId, coreNodeName);
-    byte[] stateData = null;
-    try {
-      stateData = zkClient.getData(znodePath, null, new Stat(), false);
-    } catch (NoNodeException ignoreMe) {
-      // safe to ignore as this znode will only exist if the leader initiated recovery
-    } catch (ConnectionLossException | SessionExpiredException cle) {
-      // sort of safe to ignore ??? Usually these are seen when the core is going down
-      // or there are bigger issues to deal with than reading this znode
-      log.warn("Unable to read " + znodePath + " due to: " + cle);
-    } catch (Exception exc) {
-      log.error("Failed to read data from znode " + znodePath + " due to: " + exc);
-      if (exc instanceof SolrException) {
-        throw (SolrException) exc;
-      } else {
-        throw new SolrException(ErrorCode.SERVER_ERROR,
-            "Failed to read data from znodePath: " + znodePath, exc);
-      }
-    }
-
-    Map<String, Object> stateObj = null;
-    if (stateData != null && stateData.length > 0) {
-      // TODO: Remove later ... this is for upgrading from 4.8.x to 4.10.3 (see: SOLR-6732)
-      if (stateData[0] == (byte) '{') {
-        Object parsedJson = Utils.fromJSON(stateData);
-        if (parsedJson instanceof Map) {
-          stateObj = (Map<String, Object>) parsedJson;
-        } else {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "Leader-initiated recovery state data is invalid! " + parsedJson);
-        }
-      } else {
-        // old format still in ZK
-        stateObj = Utils.makeMap("state", new String(stateData, StandardCharsets.UTF_8));
-      }
-    }
-
-    return stateObj;
-  }
-
-  @Deprecated
-  public void updateLeaderInitiatedRecoveryState(String collection, String shardId, String coreNodeName,
-      Replica.State state, CoreDescriptor leaderCd, boolean retryOnConnLoss) {
-    if (collection == null || shardId == null || coreNodeName == null) {
-      log.warn("Cannot set leader-initiated recovery state znode to "
-          + state.toString() + " using: collection=" + collection
-          + "; shardId=" + shardId + "; coreNodeName=" + coreNodeName);
-      return; // if we don't have complete data about a core in cloud mode, do nothing
-    }
-
-    assert leaderCd != null;
-    assert leaderCd.getCloudDescriptor() != null;
-
-    String leaderCoreNodeName = leaderCd.getCloudDescriptor().getCoreNodeName();
-
-    String znodePath = getLeaderInitiatedRecoveryZnodePath(collection, shardId, coreNodeName);
-
-    if (state == Replica.State.ACTIVE) {
-      // since we're marking it active, we don't need this znode anymore, so delete instead of update
-      try {
-        zkClient.delete(znodePath, -1, retryOnConnLoss);
-      } catch (Exception justLogIt) {
-        log.warn("Failed to delete znode " + znodePath, justLogIt);
-      }
-      return;
-    }
-
-    Map<String, Object> stateObj = null;
-    try {
-      stateObj = getLeaderInitiatedRecoveryStateObject(collection, shardId, coreNodeName);
-    } catch (Exception exc) {
-      log.warn(exc.getMessage(), exc);
-    }
-    if (stateObj == null) {
-      stateObj = Utils.makeMap();
-    }
-
-    stateObj.put(ZkStateReader.STATE_PROP, state.toString());
-    // only update the createdBy value if it's not set
-    if (stateObj.get("createdByNodeName") == null) {
-      stateObj.put("createdByNodeName", this.nodeName);
-    }
-    if (stateObj.get("createdByCoreNodeName") == null && leaderCoreNodeName != null)  {
-      stateObj.put("createdByCoreNodeName", leaderCoreNodeName);
-    }
-
-    byte[] znodeData = Utils.toJSON(stateObj);
-
-    try {
-      if (state == Replica.State.DOWN) {
-        markShardAsDownIfLeader(collection, shardId, leaderCd, znodePath, znodeData, retryOnConnLoss);
-      } else {
-        // must retry on conn loss otherwise future election attempts may assume wrong LIR state
-        if (zkClient.exists(znodePath, true)) {
-          zkClient.setData(znodePath, znodeData, retryOnConnLoss);
-        } else {
-          zkClient.makePath(znodePath, znodeData, retryOnConnLoss);
-        }
-      }
-      log.debug("Wrote {} to {}", state.toString(), znodePath);
-    } catch (Exception exc) {
-      if (exc instanceof SolrException) {
-        throw (SolrException) exc;
-      } else {
-        throw new SolrException(ErrorCode.SERVER_ERROR,
-            "Failed to update data to " + state.toString() + " for znode: " + znodePath, exc);
-      }
-    }
-  }
-
-  /**
-   * we use ZK's multi-transactional semantics to ensure that we are able to
-   * publish a replica as 'down' only if our leader election node still exists
-   * in ZK. This ensures that a long running network partition caused by GC etc
-   * doesn't let us mark a node as down *after* we've already lost our session
-   */
-  private void markShardAsDownIfLeader(String collection, String shardId, CoreDescriptor leaderCd,
-                                       String znodePath, byte[] znodeData,
-                                       boolean retryOnConnLoss) throws KeeperException, InterruptedException {
-
-
-    if (!leaderCd.getCloudDescriptor().isLeader()) {
-      log.info("No longer leader, aborting attempt to mark shard down as part of LIR");
-      throw new NotLeaderException(ErrorCode.SERVER_ERROR, "Locally, we do not think we are the leader.");
-    }
-
-    ContextKey key = new ContextKey(collection, leaderCd.getCloudDescriptor().getCoreNodeName());
-    ElectionContext context = electionContexts.get(key);
-
-    // we make sure we locally think we are the leader before and after getting the context - then
-    // we only try zk if we still think we are the leader and have our leader context
-    if (context == null || !leaderCd.getCloudDescriptor().isLeader()) {
-      log.info("No longer leader, aborting attempt to mark shard down as part of LIR");
-      throw new NotLeaderException(ErrorCode.SERVER_ERROR, "Locally, we do not think we are the leader.");
-    }
-
-    // we think we are the leader - get the expected shard leader version
-    // we use this version and multi to ensure *only* the current zk registered leader
-    // for a shard can put a replica into LIR
-
-    Integer leaderZkNodeParentVersion = ((ShardLeaderElectionContextBase)context).getLeaderZkNodeParentVersion();
-
-    // TODO: should we do this optimistically to avoid races?
-    if (zkClient.exists(znodePath, retryOnConnLoss)) {
-      List<Op> ops = new ArrayList<>(2);
-      ops.add(Op.check(new org.apache.hadoop.fs.Path(((ShardLeaderElectionContextBase)context).leaderPath).getParent().toString(), leaderZkNodeParentVersion));
-      ops.add(Op.setData(znodePath, znodeData, -1));
-      zkClient.multi(ops, retryOnConnLoss);
-    } else {
-      String parentZNodePath = getLeaderInitiatedRecoveryZnodePath(collection, shardId);
-      try {
-        // make sure we don't create /collections/{collection} if they do not exist with 2 param
-        zkClient.makePath(parentZNodePath, (byte[]) null, CreateMode.PERSISTENT, (Watcher) null, true, retryOnConnLoss, 2);
-      } catch (KeeperException.NodeExistsException nee) {
-        // if it exists, that's great!
-      }
-
-      // we only create the entry if the context we are using is registered as the current leader in ZK
-      List<Op> ops = new ArrayList<>(2);
-      ops.add(Op.check(new org.apache.hadoop.fs.Path(((ShardLeaderElectionContextBase)context).leaderPath).getParent().toString(), leaderZkNodeParentVersion));
-      ops.add(Op.create(znodePath, znodeData, zkClient.getZkACLProvider().getACLsToAdd(znodePath),
-          CreateMode.PERSISTENT));
-      zkClient.multi(ops, retryOnConnLoss);
-    }
-  }
-
-  @Deprecated
-  public static String getLeaderInitiatedRecoveryZnodePath(String collection, String shardId) {
-    return "/collections/" + collection + "/leader_initiated_recovery/" + shardId;
-  }
-
-  @Deprecated
-  public static String getLeaderInitiatedRecoveryZnodePath(String collection, String shardId, String coreNodeName) {
-    return getLeaderInitiatedRecoveryZnodePath(collection, shardId) + "/" + coreNodeName;
-  }
-
   public void throwErrorIfReplicaReplaced(CoreDescriptor desc) {
     ClusterState clusterState = getZkStateReader().getClusterState();
     if (clusterState != null) {
@@ -2831,15 +2498,6 @@ public class ZkController {
   }
 
   /**
-   * Thrown during leader initiated recovery process if current node is not leader
-   */
-  public static class NotLeaderException extends SolrException  {
-    public NotLeaderException(ErrorCode code, String msg) {
-      super(code, msg);
-    }
-  }
-
-  /**
    * Thrown during pre register process if the replica is not present in clusterstate
    */
   public static class NotInClusterStateException extends SolrException {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a37a2139/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
index 93181a4..276ab94 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
@@ -1193,15 +1193,6 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
             "The shard already has an active leader. Force leader is not applicable. State: " + slice);
       }
 
-      // Clear out any LIR state
-      String lirPath = handler.coreContainer.getZkController().getLeaderInitiatedRecoveryZnodePath(collectionName, sliceId);
-      if (handler.coreContainer.getZkController().getZkClient().exists(lirPath, true)) {
-        StringBuilder sb = new StringBuilder();
-        handler.coreContainer.getZkController().getZkClient().printLayout(lirPath, 4, sb);
-        log.info("Cleaning out LIR data, which was: {}", sb);
-        handler.coreContainer.getZkController().getZkClient().clean(lirPath);
-      }
-
       final Set<String> liveNodes = clusterState.getLiveNodes();
       List<Replica> liveReplicas = slice.getReplicas().stream()
           .filter(rep -> liveNodes.contains(rep.getNodeName())).collect(Collectors.toList());

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a37a2139/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java b/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
index bab8607..56bcb7a 100644
--- a/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
@@ -207,10 +207,6 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
   private final boolean cloneRequiredOnLeader;
   private final Replica.Type replicaType;
 
-  @Deprecated
-  // this flag, used for testing rolling updates, should be removed by SOLR-11812
-  private final boolean isOldLIRMode;
-
   public DistributedUpdateProcessor(SolrQueryRequest req, SolrQueryResponse rsp, UpdateRequestProcessor next) {
     this(req, rsp, new AtomicUpdateDocumentMerger(req), next);
   }
@@ -229,7 +225,6 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
 
     this.ulog = req.getCore().getUpdateHandler().getUpdateLog();
     this.vinfo = ulog == null ? null : ulog.getVersionInfo();
-    this.isOldLIRMode = !"new".equals(req.getCore().getCoreDescriptor().getCoreProperty("lirVersion", "new"));
     versionsStored = this.vinfo != null && this.vinfo.getVersionField() != null;
     returnVersions = req.getParams().getBool(UpdateParams.VERSIONS ,false);
 
@@ -381,7 +376,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
           String coreNodeName = replica.getName();
           if (skipList != null && skipListSet.contains(replica.getCoreUrl())) {
             log.info("check url:" + replica.getCoreUrl() + " against:" + skipListSet + " result:true");
-          } else if(!isOldLIRMode && zkShardTerms.registered(coreNodeName) && zkShardTerms.skipSendingUpdatesTo(coreNodeName)) {
+          } else if(zkShardTerms.registered(coreNodeName) && zkShardTerms.skipSendingUpdatesTo(coreNodeName)) {
             log.debug("skip url:{} cause its term is less than leader", replica.getCoreUrl());
             skippedCoreNodeNames.add(replica.getName());
           } else if (!clusterState.getLiveNodes().contains(replica.getNodeName()) || replica.getState() == Replica.State.DOWN) {
@@ -769,7 +764,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
  
   // TODO: optionally fail if n replicas are not reached...
   private void doFinish() {
-    boolean shouldUpdateTerms = isLeader && !isOldLIRMode && isIndexChanged;
+    boolean shouldUpdateTerms = isLeader && isIndexChanged;
     if (shouldUpdateTerms) {
       ZkShardTerms zkShardTerms = zkController.getShardTerms(cloudDesc.getCollectionName(), cloudDesc.getShardId());
       if (skippedCoreNodeNames != null) {
@@ -875,21 +870,8 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
             // if false, then the node is probably not "live" anymore
             // and we do not need to send a recovery message
             Throwable rootCause = SolrException.getRootCause(error.e);
-            if (!isOldLIRMode && zkController.getShardTerms(collection, shardId).registered(coreNodeName)) {
-              log.error("Setting up to try to start recovery on replica {} with url {} by increasing leader term", coreNodeName, replicaUrl, rootCause);
-              replicasShouldBeInLowerTerms.add(coreNodeName);
-            } else {
-              // The replica did not registered its term, so it must run with old LIR implementation
-              log.error("Setting up to try to start recovery on replica {}", replicaUrl, rootCause);
-              zkController.ensureReplicaInLeaderInitiatedRecovery(
-                  req.getCore().getCoreContainer(),
-                  collection,
-                  shardId,
-                  stdNode.getNodeProps(),
-                  req.getCore().getCoreDescriptor(),
-                  false /* forcePublishState */
-              );
-            }
+            log.error("Setting up to try to start recovery on replica {} with url {} by increasing leader term", coreNodeName, replicaUrl, rootCause);
+            replicasShouldBeInLowerTerms.add(coreNodeName);
           } catch (Exception exc) {
             Throwable setLirZnodeFailedCause = SolrException.getRootCause(exc);
             log.error("Leader failed to set replica " +
@@ -913,7 +895,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
         }
       }
     }
-    if (!isOldLIRMode && !replicasShouldBeInLowerTerms.isEmpty()) {
+    if (!replicasShouldBeInLowerTerms.isEmpty()) {
       zkController.getShardTerms(cloudDesc.getCollectionName(), cloudDesc.getShardId())
           .ensureTermsIsHigher(cloudDesc.getCoreNodeName(), replicasShouldBeInLowerTerms);
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a37a2139/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java b/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
index 5fa3d56..997845f 100644
--- a/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
@@ -392,19 +392,6 @@ public class DeleteReplicaTest extends SolrCloudTestCase {
       log.info("Timeout wait for state {}", getCollectionState(collectionName));
       throw e;
     }
-
-    TimeOut timeOut = new TimeOut(20, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-    timeOut.waitFor("Time out waiting for LIR state get removed", () -> {
-      String lirPath = ZkController.getLeaderInitiatedRecoveryZnodePath(collectionName, "shard1");
-      try {
-        List<String> children = zkClient().getChildren(lirPath, null, true);
-        return children.size() == 0;
-      } catch (KeeperException.NoNodeException e) {
-        return true;
-      } catch (Exception e) {
-        throw new AssertionError(e);
-      }
-    });
   }
 }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a37a2139/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java b/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java
index 793dcb3..378bcba 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java
@@ -18,10 +18,8 @@ package org.apache.solr.cloud;
 
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
-import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
-import java.util.Properties;
 
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrServerException;
@@ -33,8 +31,6 @@ import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Replica.State;
 import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.KeeperException.NoNodeException;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -204,117 +200,6 @@ public class ForceLeaderTest extends HttpPartitionTest {
     }
   }
 
-  /***
-   * Tests that FORCELEADER can get an active leader after leader puts all replicas in LIR and itself goes down,
-   * hence resulting in a leaderless shard.
-   */
-  @Test
-  @Slow
-  //TODO remove in SOLR-11812
-// 12-Jun-2018   @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028")
-  public void testReplicasInLIRNoLeader() throws Exception {
-    handle.put("maxScore", SKIPVAL);
-    handle.put("timestamp", SKIPVAL);
-
-    String testCollectionName = "forceleader_test_collection";
-    createOldLirCollection(testCollectionName, 3);
-    cloudClient.setDefaultCollection(testCollectionName);
-
-    try {
-      List<Replica> notLeaders = ensureAllReplicasAreActive(testCollectionName, SHARD1, 1, 3, maxWaitSecsToSeeAllActive);
-      assertEquals("Expected 2 replicas for collection " + testCollectionName
-          + " but found " + notLeaders.size() + "; clusterState: "
-          + printClusterStateInfo(testCollectionName), 2, notLeaders.size());
-
-      Replica leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, SHARD1);
-      JettySolrRunner notLeader0 = getJettyOnPort(getReplicaPort(notLeaders.get(0)));
-      ZkController zkController = notLeader0.getCoreContainer().getZkController();
-
-      putNonLeadersIntoLIR(testCollectionName, SHARD1, zkController, leader, notLeaders);
-
-      cloudClient.getZkStateReader().forceUpdateCollection(testCollectionName);
-      ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
-      int numActiveReplicas = getNumberOfActiveReplicas(clusterState, testCollectionName, SHARD1);
-      assertEquals("Expected only 0 active replica but found " + numActiveReplicas +
-          "; clusterState: " + printClusterStateInfo(), 0, numActiveReplicas);
-
-      int numReplicasOnLiveNodes = 0;
-      for (Replica rep : clusterState.getCollection(testCollectionName).getSlice(SHARD1).getReplicas()) {
-        if (clusterState.getLiveNodes().contains(rep.getNodeName())) {
-          numReplicasOnLiveNodes++;
-        }
-      }
-      assertEquals(2, numReplicasOnLiveNodes);
-      log.info("Before forcing leader: " + printClusterStateInfo());
-      // Assert there is no leader yet
-      assertNull("Expected no leader right now. State: " + clusterState.getCollection(testCollectionName).getSlice(SHARD1),
-          clusterState.getCollection(testCollectionName).getSlice(SHARD1).getLeader());
-
-      assertSendDocFails(3);
-
-      doForceLeader(cloudClient, testCollectionName, SHARD1);
-
-      // By now we have an active leader. Wait for recoveries to begin
-      waitForRecoveriesToFinish(testCollectionName, cloudClient.getZkStateReader(), true);
-
-      cloudClient.getZkStateReader().forceUpdateCollection(testCollectionName);
-      clusterState = cloudClient.getZkStateReader().getClusterState();
-      log.info("After forcing leader: " + clusterState.getCollection(testCollectionName).getSlice(SHARD1));
-      // we have a leader
-      Replica newLeader = clusterState.getCollectionOrNull(testCollectionName).getSlice(SHARD1).getLeader();
-      assertNotNull(newLeader);
-      // leader is active
-      assertEquals(State.ACTIVE, newLeader.getState());
-
-      numActiveReplicas = getNumberOfActiveReplicas(clusterState, testCollectionName, SHARD1);
-      assertEquals(2, numActiveReplicas);
-
-      // Assert that indexing works again
-      log.info("Sending doc 4...");
-      sendDoc(4);
-      log.info("Committing...");
-      cloudClient.commit();
-      log.info("Doc 4 sent and commit issued");
-
-      assertDocsExistInAllReplicas(notLeaders, testCollectionName, 1, 1);
-      assertDocsExistInAllReplicas(notLeaders, testCollectionName, 4, 4);
-
-      // Docs 1 and 4 should be here. 2 was lost during the partition, 3 had failed to be indexed.
-      log.info("Checking doc counts...");
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.add("q", "*:*");
-      assertEquals("Expected only 2 documents in the index", 2, cloudClient.query(params).getResults().getNumFound());
-
-      bringBackOldLeaderAndSendDoc(testCollectionName, leader, notLeaders, 5);
-    } finally {
-      log.info("Cleaning up after the test.");
-      // try to clean up
-      attemptCollectionDelete(cloudClient, testCollectionName);
-    }
-  }
-
-  private void createOldLirCollection(String collection, int numReplicas) throws IOException, SolrServerException {
-    if (onlyLeaderIndexes) {
-      CollectionAdminRequest
-          .createCollection(collection, "conf1", 1, 0, numReplicas, 0)
-          .setCreateNodeSet("")
-          .process(cloudClient);
-    } else {
-      CollectionAdminRequest.createCollection(collection, "conf1", 1, numReplicas)
-          .setCreateNodeSet("")
-          .process(cloudClient);
-    }
-    Properties oldLir = new Properties();
-    oldLir.setProperty("lirVersion", "old");
-    for (int i = 0; i < numReplicas; i++) {
-      // this is the only way to create replicas which run in old lir implementation
-      CollectionAdminRequest
-          .addReplicaToShard(collection, "shard1", onlyLeaderIndexes? Replica.Type.TLOG: Replica.Type.NRT)
-          .setProperties(oldLir)
-          .process(cloudClient);
-    }
-  }
-
   private void assertSendDocFails(int docId) throws Exception {
     // sending a doc in this state fails
     expectThrows(SolrException.class,
@@ -322,68 +207,6 @@ public class ForceLeaderTest extends HttpPartitionTest {
         () -> sendDoc(docId));
   }
 
-  private void putNonLeadersIntoLIR(String collectionName, String shard, ZkController zkController, Replica leader, List<Replica> notLeaders) throws Exception {
-    SocketProxy[] nonLeaderProxies = new SocketProxy[notLeaders.size()];
-    for (int i = 0; i < notLeaders.size(); i++)
-      nonLeaderProxies[i] = getProxyForReplica(notLeaders.get(i));
-
-    sendDoc(1);
-
-    // ok, now introduce a network partition between the leader and both replicas
-    log.info("Closing proxies for the non-leader replicas...");
-    for (SocketProxy proxy : nonLeaderProxies)
-      proxy.close();
-
-    // indexing during a partition
-    log.info("Sending a doc during the network partition...");
-    sendDoc(2);
-
-    // Wait a little
-    Thread.sleep(2000);
-
-    // Kill the leader
-    log.info("Killing leader for shard1 of " + collectionName + " on node " + leader.getNodeName() + "");
-    JettySolrRunner leaderJetty = getJettyOnPort(getReplicaPort(leader));
-    getProxyForReplica(leader).close();
-    leaderJetty.stop();
-
-    // Wait for a steady state, till LIR flags have been set and the shard is leaderless
-    log.info("Sleep and periodically wake up to check for state...");
-    for (int i = 0; i < 20; i++) {
-      Thread.sleep(1000);
-      State lirStates[] = new State[notLeaders.size()];
-      for (int j = 0; j < notLeaders.size(); j++)
-        lirStates[j] = zkController.getLeaderInitiatedRecoveryState(collectionName, shard, notLeaders.get(j).getName());
-
-      ClusterState clusterState = zkController.getZkStateReader().getClusterState();
-      boolean allDown = true;
-      for (State lirState : lirStates)
-        if (Replica.State.DOWN.equals(lirState) == false)
-          allDown = false;
-      if (allDown && clusterState.getCollection(collectionName).getSlice(shard).getLeader() == null) {
-        break;
-      }
-      log.warn("Attempt " + i + ", waiting on for 1 sec to settle down in the steady state. State: " +
-          printClusterStateInfo(collectionName));
-      log.warn("LIR state: " + getLIRState(zkController, collectionName, shard));
-    }
-    log.info("Waking up...");
-
-    // remove the network partition
-    log.info("Reopening the proxies for the non-leader replicas...");
-    for (SocketProxy proxy : nonLeaderProxies)
-      proxy.reopen();
-
-    log.info("LIR state: " + getLIRState(zkController, collectionName, shard));
-
-    State lirStates[] = new State[notLeaders.size()];
-    for (int j = 0; j < notLeaders.size(); j++)
-      lirStates[j] = zkController.getLeaderInitiatedRecoveryState(collectionName, shard, notLeaders.get(j).getName());
-    for (State lirState : lirStates)
-      assertTrue("Expected that the replicas would be in LIR state by now. LIR states: "+Arrays.toString(lirStates),
-          Replica.State.DOWN == lirState || Replica.State.RECOVERING == lirState);
-  }
-
   private void bringBackOldLeaderAndSendDoc(String collection, Replica leader, List<Replica> notLeaders, int docid) throws Exception {
     // Bring back the leader which was stopped
     log.info("Bringing back originally killed leader...");
@@ -405,19 +228,6 @@ public class ForceLeaderTest extends HttpPartitionTest {
     assertDocsExistInAllReplicas(Collections.singletonList(leader), collection, docid, docid);
   }
 
-  private String getLIRState(ZkController zkController, String collection, String shard) throws KeeperException, InterruptedException {
-    StringBuilder sb = new StringBuilder();
-    String path = zkController.getLeaderInitiatedRecoveryZnodePath(collection, shard);
-    if (path == null)
-      return null;
-    try {
-      zkController.getZkClient().printLayout(path, 4, sb);
-    } catch (NoNodeException ex) {
-      return null;
-    }
-    return sb.toString();
-  }
-
   @Override
   protected int sendDoc(int docId) throws Exception {
     SolrInputDocument doc = new SolrInputDocument();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a37a2139/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
index 15fdb3b..70d23aa 100644
--- a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
@@ -129,8 +129,6 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
   public void test() throws Exception {
     waitForThingsToLevelOut(30000);
 
-    testLeaderInitiatedRecoveryCRUD();
-
     testDoRecoveryOnRestart();
 
     // test a 1x2 collection
@@ -152,62 +150,6 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
     log.info("HttpPartitionTest succeeded ... shutting down now!");
   }
 
-  /**
-   * Tests handling of different format of lir nodes
-   */
-  //TODO remove in SOLR-11812
-  protected void testLeaderInitiatedRecoveryCRUD() throws Exception {
-    String testCollectionName = "c8n_crud_1x2";
-    String shardId = "shard1";
-    createCollectionRetry(testCollectionName, "conf1", 1, 2, 1);
-    cloudClient.setDefaultCollection(testCollectionName);
-
-    Replica leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, shardId);
-    JettySolrRunner leaderJetty = getJettyOnPort(getReplicaPort(leader));
-
-    CoreContainer cores = leaderJetty.getCoreContainer();
-    ZkController zkController = cores.getZkController();
-    assertNotNull("ZkController is null", zkController);
-
-    Replica notLeader =
-        ensureAllReplicasAreActive(testCollectionName, shardId, 1, 2, maxWaitSecsToSeeAllActive).get(0);
-
-    ZkCoreNodeProps replicaCoreNodeProps = new ZkCoreNodeProps(notLeader);
-    String replicaUrl = replicaCoreNodeProps.getCoreUrl();
-
-    MockCoreDescriptor cd = new MockCoreDescriptor() {
-      public CloudDescriptor getCloudDescriptor() {
-        return new CloudDescriptor(leader.getStr(ZkStateReader.CORE_NAME_PROP), new Properties(), this) {
-          @Override
-          public String getCoreNodeName() {
-            return leader.getName();
-          }
-          @Override
-          public boolean isLeader() {
-            return true;
-          }
-        };
-      }
-    };
-
-    zkController.updateLeaderInitiatedRecoveryState(testCollectionName, shardId, notLeader.getName(), DOWN, cd, true);
-    Map<String,Object> lirStateMap = zkController.getLeaderInitiatedRecoveryStateObject(testCollectionName, shardId, notLeader.getName());
-    assertNotNull(lirStateMap);
-    assertSame(DOWN, Replica.State.getState((String) lirStateMap.get(ZkStateReader.STATE_PROP)));
-
-    // test old non-json format handling
-    SolrZkClient zkClient = zkController.getZkClient();
-    String znodePath = zkController.getLeaderInitiatedRecoveryZnodePath(testCollectionName, shardId, notLeader.getName());
-    zkClient.setData(znodePath, "down".getBytes(StandardCharsets.UTF_8), true);
-    lirStateMap = zkController.getLeaderInitiatedRecoveryStateObject(testCollectionName, shardId, notLeader.getName());
-    assertNotNull(lirStateMap);
-    assertSame(DOWN, Replica.State.getState((String) lirStateMap.get(ZkStateReader.STATE_PROP)));
-    zkClient.delete(znodePath, -1, false);
-
-    // try to clean up
-    attemptCollectionDelete(cloudClient, testCollectionName);
-  }
-
   private void testDoRecoveryOnRestart() throws Exception {
     String testCollectionName = "collDoRecoveryOnRestart";
     try {


[42/50] [abbrv] lucene-solr:jira/http2: SOLR-12740: migration docs

Posted by da...@apache.org.
SOLR-12740: migration docs


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/8d3810df
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/8d3810df
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/8d3810df

Branch: refs/heads/jira/http2
Commit: 8d3810df548e1edd88b7b8a68703362b590dca6a
Parents: 9c7b856
Author: Noble Paul <no...@apache.org>
Authored: Tue Oct 16 17:38:51 2018 +1100
Committer: Noble Paul <no...@apache.org>
Committed: Tue Oct 16 17:38:51 2018 +1100

----------------------------------------------------------------------
 .../src/migrate-to-policy-rule.adoc             | 170 +++++++++++++++++++
 1 file changed, 170 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8d3810df/solr/solr-ref-guide/src/migrate-to-policy-rule.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/migrate-to-policy-rule.adoc b/solr/solr-ref-guide/src/migrate-to-policy-rule.adoc
new file mode 100644
index 0000000..c91e272
--- /dev/null
+++ b/solr/solr-ref-guide/src/migrate-to-policy-rule.adoc
@@ -0,0 +1,170 @@
+= Migrate to Policy Rules
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+Replica placement rules (legacy) are deprecated in favor of the new policy and preferences syntax (new). This document helps you to migrate your existing replica placement rules to the new syntax.
+
+Every rule in the legacy framework can be expressed in the new syntax. Please refer to <<solrcloud-autoscaling-policy-preferences.adoc ,Autoscaling Policy & Preferences>> document for more details.
+
+
+The following is the legacy syntax for a rule that limits maximum one replica for each shard in any Solr node
+[source,text]
+----
+replica:<2,node:*,shard:**
+----
+The rules are specified along with a collection creation operation as the `rules` parameter
+
+The equivalent new syntax is
+[source,json]
+----
+{"replica":"<2","node":"#ANY","shard":"#EACH"}
+----
+
+The new policy rules have to be created separately using an API call  <<solrcloud-autoscaling-api.adoc#Create and Modify Cluster Policies, See examples>>
+
+
+=== Rule Operators
+
+All the following operators can be directly used in the new policy syntax and they mean the same.
+
+* *equals (no operator required)*: `tag:x` means tag value must be equal to ‘x’
+* *greater than (>)*: `tag:>x` means tag value greater than ‘x’. x must be a number
+* *less than (<)*: `tag:<x` means tag value less than ‘x’. x must be a number
+* *not equal (!)*: `tag:!x` means tag value MUST NOT be equal to ‘x’. The equals check is performed on String value
+
+=== Fuzzy Operator (~)
+There is no `~` operator in the new syntax. Please use the attribute `"strict":false` instead
+
+example:
+[source,json]
+----
+{"replica":"<2","node":"#ANY","shard":"#EACH", "strict": false}
+----
+
+
+== Tag names
+
+Tag values are provided by the framework and these tags mean the same the new syntax as well
+
+
+* *cores*: Number of cores in the node
+* *freedisk*: Disk space available in the node
+* *host*: host name of the node
+* *port*: port of the node
+* *node*: node name
+* *role*: The role of the node. The only supported role is 'overseer'
+* *ip_1, ip_2, ip_3, ip_4*: These are ip fragments for each node. For example, in a host with ip `192.168.1.2`, `ip_1 = 2`, `ip_2 =1`, `ip_3 = 168` and` ip_4 = 192`
+* *sysprop.\{PROPERTY_NAME}*: These are values available from system properties. `sysprop.key` means a value that is passed to the node as `-Dkey=keyValue` during the node startup. It is possible to use rules like `sysprop.key:expectedVal,shard:*`
+
+=== Snitches
+There is no equivalent for a snitch in the new policy framework
+
+== Porting existing Replica placement rules
+
+=== Keep less than 2 replicas (at most 1 replica) of this collection on any node
+
+For this rule, we define the `replica` condition with operators for "less than 2", and use a pre-defined tag named `node` to define nodes with any name.
+
+*legacy:*
+
+[source,text]
+----
+replica:<2,node:*
+----
+
+*new:*
+
+[source,json]
+----
+{"replica":"<2","node":"#ANY"}
+----
+=== For a given shard, keep less than 2 replicas on any node
+
+For this rule, we use the `shard` condition to define any shard, the `replica` condition with operators for "less than 2", and finally a pre-defined tag named `node` to define nodes with any name.
+
+*legacy:*
+
+[source,text]
+----
+shard:*,replica:<2,node:*
+----
+
+*new:*
+
+[source,json]
+----
+{"replica":"<2","shard":"#EACH", "node":"#ANY"}
+----
+=== Assign all replicas in shard1 to rack 730
+
+This rule limits the `shard` condition to 'shard1', but any number of replicas. We're also referencing a custom tag named `rack`. Before defining this rule, we will need to configure a custom Snitch which provides values for the tag `rack`.
+
+*legacy:*
+
+[source,text]
+----
+shard:shard1,replica:*,rack:730
+----
+
+*new:*
+
+[source,json]
+----
+{"replica":"#ALL", "shard":"shard1", "sysprop.rack":"730"}
+----
+
+Please note that all your nodes must be started with a system property `-Drack=<rack-number>`
+
+
+=== Create replicas in nodes with less than 5 cores only
+
+This rule uses the `replica` condition to define any number of replicas, but adds a pre-defined tag named `core` and uses operators for "less than 5".
+
+*legacy:*
+
+[source,text]
+----
+cores:<5
+----
+
+*new:*
+
+[source,json]
+----
+{"cores":"<5", "node":"#ANY"}
+----
+
+=== Do not create any replicas in host 192.45.67.3
+
+*legacy:*
+
+[source,text]
+----
+host:!192.45.67.3
+----
+
+*new:*
+
+[source,json]
+----
+{"replica": 0, "host":"192.45.67.3"}
+----
+
+
+== Defining Rules
+
+Rules are always defined in along with the collection in the legacy system. The new syntax allows you to specify rules globally as well as on a <<solrcloud-autoscaling-policy-preferences.adoc#Defining Collection-Specific Policies,per collection basis>>


[09/50] [abbrv] lucene-solr:jira/http2: SOLR-12845: Disable the test until we can add a default policy equivalent to the hard coded legacy assignment rules

Posted by da...@apache.org.
SOLR-12845: Disable the test until we can add a default policy equivalent to the hard coded legacy assignment rules


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/184ed88e
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/184ed88e
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/184ed88e

Branch: refs/heads/jira/http2
Commit: 184ed88ecb96cbcc6545302f230f935576894124
Parents: b4d9b25
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Tue Oct 9 13:54:46 2018 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Tue Oct 9 13:54:46 2018 +0530

----------------------------------------------------------------------
 .../org/apache/solr/cloud/TestDeleteCollectionOnDownNodes.java     | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/184ed88e/solr/core/src/test/org/apache/solr/cloud/TestDeleteCollectionOnDownNodes.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestDeleteCollectionOnDownNodes.java b/solr/core/src/test/org/apache/solr/cloud/TestDeleteCollectionOnDownNodes.java
index cb33e26..7c93e81 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestDeleteCollectionOnDownNodes.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestDeleteCollectionOnDownNodes.java
@@ -17,11 +17,13 @@
 
 package org.apache.solr.cloud;
 
+import org.apache.lucene.util.LuceneTestCase;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.common.cloud.Slice;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+@LuceneTestCase.AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/SOLR-12845")
 public class TestDeleteCollectionOnDownNodes extends SolrCloudTestCase {
 
   @BeforeClass


[39/50] [abbrv] lucene-solr:jira/http2: SOLR-12862: Fix TestLang

Posted by da...@apache.org.
SOLR-12862: Fix TestLang


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/73a413cd
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/73a413cd
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/73a413cd

Branch: refs/heads/jira/http2
Commit: 73a413cd85ca03dae69250189b9c6ae24f42801c
Parents: 6c0fbe5
Author: Joel Bernstein <jb...@apache.org>
Authored: Mon Oct 15 15:10:23 2018 -0400
Committer: Joel Bernstein <jb...@apache.org>
Committed: Mon Oct 15 15:47:03 2018 -0400

----------------------------------------------------------------------
 solr/solrj/src/test/org/apache/solr/client/solrj/io/TestLang.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/73a413cd/solr/solrj/src/test/org/apache/solr/client/solrj/io/TestLang.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/TestLang.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/TestLang.java
index 85ddd93..07b0938 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/TestLang.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/TestLang.java
@@ -73,7 +73,7 @@ public class TestLang extends LuceneTestCase {
       "outliers", "stream", "getCache", "putCache", "listCache", "removeCache", "zscores", "latlonVectors",
       "convexHull", "getVertices", "getBaryCenter", "getArea", "getBoundarySize","oscillate",
       "getAmplitude", "getPhase", "getAngularFrequency", "enclosingDisk", "getCenter", "getRadius",
-      "getSupportPoints", "pairSort"};
+      "getSupportPoints", "pairSort", "log10"};
 
   @Test
   public void testLang() {


[23/50] [abbrv] lucene-solr:jira/http2: LUCENE-8496: Fix BKDWriter to use writeField1Dim when numDataDims is set to 1

Posted by da...@apache.org.
LUCENE-8496: Fix BKDWriter to use writeField1Dim when numDataDims is set to 1


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/4cfa876d
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/4cfa876d
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/4cfa876d

Branch: refs/heads/jira/http2
Commit: 4cfa876d9d269ed11be8e6668d2de31690150915
Parents: 095707d
Author: Nicholas Knize <nk...@gmail.com>
Authored: Wed Oct 10 09:35:19 2018 -0500
Committer: Nicholas Knize <nk...@gmail.com>
Committed: Wed Oct 10 09:35:19 2018 -0500

----------------------------------------------------------------------
 lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4cfa876d/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
index d7db94b..c4ac04e 100644
--- a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
@@ -435,7 +435,7 @@ public class BKDWriter implements Closeable {
    *  disk. This method does not use transient disk in order to reorder points.
    */
   public long writeField(IndexOutput out, String fieldName, MutablePointValues reader) throws IOException {
-    if (numIndexDims == 1) {
+    if (numDataDims == 1) {
       return writeField1Dim(out, fieldName, reader);
     } else {
       return writeFieldNDims(out, fieldName, reader);


[31/50] [abbrv] lucene-solr:jira/http2: SOLR-8808: Add null/empty check to SolrClient.deleteByIds

Posted by da...@apache.org.
SOLR-8808: Add null/empty check to SolrClient.deleteByIds


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/6d1b2e2f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/6d1b2e2f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/6d1b2e2f

Branch: refs/heads/jira/http2
Commit: 6d1b2e2f38a8979c65c1a208a0dd4cfc2de951ed
Parents: 9c8ffab
Author: Jason Gerlowski <ge...@apache.org>
Authored: Sat Oct 13 21:22:01 2018 -0400
Committer: Jason Gerlowski <ge...@apache.org>
Committed: Sat Oct 13 22:15:36 2018 -0400

----------------------------------------------------------------------
 .../apache/solr/client/solrj/SolrClient.java    |  9 +-
 .../solrj/impl/CloudSolrClientBadInputTest.java | 73 +++++++++++++++
 .../ConcurrentUpdateSolrClientBadInputTest.java | 91 +++++++++++++++++++
 .../solrj/impl/HttpSolrClientBadInputTest.java  | 93 ++++++++++++++++++++
 .../impl/LBHttpSolrClientBadInputTest.java      | 89 +++++++++++++++++++
 5 files changed, 352 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6d1b2e2f/solr/solrj/src/java/org/apache/solr/client/solrj/SolrClient.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/SolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/SolrClient.java
index 4b555e1..1c79efe 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/SolrClient.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/SolrClient.java
@@ -775,7 +775,7 @@ public abstract class SolrClient implements Serializable, Closeable {
    * Deletes a list of documents by unique ID
    *
    * @param collection the Solr collection to delete the documents from
-   * @param ids  the list of document IDs to delete
+   * @param ids  the list of document IDs to delete; must be non-null and contain elements
    *
    * @return an {@link org.apache.solr.client.solrj.response.UpdateResponse} containing the response
    *         from the server
@@ -790,7 +790,7 @@ public abstract class SolrClient implements Serializable, Closeable {
   /**
    * Deletes a list of documents by unique ID
    *
-   * @param ids  the list of document IDs to delete
+   * @param ids  the list of document IDs to delete; must be non-null and contain elements
    *
    * @return an {@link org.apache.solr.client.solrj.response.UpdateResponse} containing the response
    *         from the server
@@ -806,7 +806,7 @@ public abstract class SolrClient implements Serializable, Closeable {
    * Deletes a list of documents by unique ID, specifying max time before commit
    *
    * @param collection the Solr collection to delete the documents from
-   * @param ids  the list of document IDs to delete 
+   * @param ids  the list of document IDs to delete; must be non-null and contain elements
    * @param commitWithinMs  max time (in ms) before a commit will happen
    *
    * @return an {@link org.apache.solr.client.solrj.response.UpdateResponse} containing the response
@@ -818,6 +818,9 @@ public abstract class SolrClient implements Serializable, Closeable {
    * @since 5.1
    */
   public UpdateResponse deleteById(String collection, List<String> ids, int commitWithinMs) throws SolrServerException, IOException {
+    if (ids == null) throw new IllegalArgumentException("'ids' parameter must be non-null");
+    if (ids.isEmpty()) throw new IllegalArgumentException("'ids' parameter must not be empty; should contain IDs to delete");
+
     UpdateRequest req = new UpdateRequest();
     req.deleteById(ids);
     req.setCommitWithin(commitWithinMs);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6d1b2e2f/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientBadInputTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientBadInputTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientBadInputTest.java
new file mode 100644
index 0000000..5ec5bc6
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientBadInputTest.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.client.solrj.impl;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import com.google.common.collect.Lists;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.junit.internal.matchers.StringContains.containsString;
+
+public class CloudSolrClientBadInputTest extends SolrCloudTestCase {
+  private static final List<String> NULL_STR_LIST = null;
+  private static final List<String> EMPTY_STR_LIST = new ArrayList<>();
+  private static final String ANY_COLLECTION = "ANY_COLLECTION";
+  private static final int ANY_COMMIT_WITHIN_TIME = -1;
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(1)
+        .configure();
+
+    final List<String> solrUrls = new ArrayList<>();
+  }
+
+  @Test
+  public void testDeleteByIdReportsInvalidIdLists() throws Exception {
+    try (SolrClient client = getCloudSolrClient(cluster)) {
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "null"), () -> {
+        client.deleteById(ANY_COLLECTION, NULL_STR_LIST);
+      });
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "empty"), () -> {
+        client.deleteById(ANY_COLLECTION, EMPTY_STR_LIST);
+      });
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "null"), () -> {
+        client.deleteById(ANY_COLLECTION, NULL_STR_LIST, ANY_COMMIT_WITHIN_TIME);
+      });
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "empty"), () -> {
+        client.deleteById(ANY_COLLECTION, EMPTY_STR_LIST, ANY_COMMIT_WITHIN_TIME);
+      });
+    }
+  }
+
+  private void assertExceptionThrownWithMessageContaining(Class expectedType, List<String> expectedStrings, LuceneTestCase.ThrowingRunnable runnable) {
+    Throwable thrown = expectThrows(expectedType, runnable);
+
+    if (expectedStrings != null) {
+      for (String expectedString : expectedStrings) {
+        assertThat(thrown.getMessage(), containsString(expectedString));
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6d1b2e2f/solr/solrj/src/test/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClientBadInputTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClientBadInputTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClientBadInputTest.java
new file mode 100644
index 0000000..f28d9c0
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClientBadInputTest.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.client.solrj.impl;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import com.google.common.collect.Lists;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.SolrJettyTestBase;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.embedded.JettyConfig;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.junit.internal.matchers.StringContains.containsString;
+
+public class ConcurrentUpdateSolrClientBadInputTest extends SolrJettyTestBase {
+  private static final List<String> NULL_STR_LIST = null;
+  private static final List<String> EMPTY_STR_LIST = new ArrayList<>();
+  private static final String ANY_COLLECTION = "ANY_COLLECTION";
+  private static final int ANY_COMMIT_WITHIN_TIME = -1;
+  private static final int ANY_QUEUE_SIZE = 1;
+  private static final int ANY_MAX_NUM_THREADS = 1;
+
+  @BeforeClass
+  public static void beforeTest() throws Exception {
+    JettyConfig jettyConfig = JettyConfig.builder()
+        .withSSLConfig(sslConfig)
+        .build();
+    createJetty(legacyExampleCollection1SolrHome(), jettyConfig);
+  }
+
+  @Test
+  public void testDeleteByIdReportsInvalidIdLists() throws Exception {
+    try (SolrClient client = getConcurrentUpdateSolrClient(jetty.getBaseUrl().toString() + "/" + ANY_COLLECTION, ANY_QUEUE_SIZE, ANY_MAX_NUM_THREADS)) {
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "null"), () -> {
+        client.deleteById(NULL_STR_LIST);
+      });
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "empty"), () -> {
+        client.deleteById(EMPTY_STR_LIST);
+      });
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "null"), () -> {
+        client.deleteById(NULL_STR_LIST, ANY_COMMIT_WITHIN_TIME);
+      });
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "empty"), () -> {
+        client.deleteById(EMPTY_STR_LIST, ANY_COMMIT_WITHIN_TIME);
+      });
+    }
+
+    try (SolrClient client = getConcurrentUpdateSolrClient(jetty.getBaseUrl().toString(), ANY_QUEUE_SIZE, ANY_MAX_NUM_THREADS)) {
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "null"), () -> {
+        client.deleteById(ANY_COLLECTION, NULL_STR_LIST);
+      });
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "empty"), () -> {
+        client.deleteById(ANY_COLLECTION, EMPTY_STR_LIST);
+      });
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "null"), () -> {
+        client.deleteById(ANY_COLLECTION, NULL_STR_LIST, ANY_COMMIT_WITHIN_TIME);
+      });
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "empty"), () -> {
+        client.deleteById(ANY_COLLECTION, EMPTY_STR_LIST, ANY_COMMIT_WITHIN_TIME);
+      });
+    }
+  }
+
+  private void assertExceptionThrownWithMessageContaining(Class expectedType, List<String> expectedStrings, LuceneTestCase.ThrowingRunnable runnable) {
+    Throwable thrown = expectThrows(expectedType, runnable);
+
+    if (expectedStrings != null) {
+      for (String expectedString : expectedStrings) {
+        assertThat(thrown.getMessage(), containsString(expectedString));
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6d1b2e2f/solr/solrj/src/test/org/apache/solr/client/solrj/impl/HttpSolrClientBadInputTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/HttpSolrClientBadInputTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/HttpSolrClientBadInputTest.java
new file mode 100644
index 0000000..cf97829
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/HttpSolrClientBadInputTest.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.client.solrj.impl;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import com.google.common.collect.Lists;
+import org.apache.solr.SolrJettyTestBase;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.embedded.JettyConfig;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import static org.junit.internal.matchers.StringContains.containsString;
+
+/**
+ * Tests {@link HttpSolrClient}'s response to a variety of bad inputs.
+ */
+public class HttpSolrClientBadInputTest extends SolrJettyTestBase {
+  private static final List<String> NULL_STR_LIST = null;
+  private static final List<String> EMPTY_STR_LIST = new ArrayList<>();
+  private static final String ANY_COLLECTION = "ANY_COLLECTION";
+  private static final int ANY_COMMIT_WITHIN_TIME = -1;
+
+  @BeforeClass
+  public static void beforeTest() throws Exception {
+    JettyConfig jettyConfig = JettyConfig.builder()
+        .withSSLConfig(sslConfig)
+        .build();
+    createJetty(legacyExampleCollection1SolrHome(), jettyConfig);
+  }
+
+  private void assertExceptionThrownWithMessageContaining(Class expectedType, List<String> expectedStrings, ThrowingRunnable runnable) {
+    Throwable thrown = expectThrows(expectedType, runnable);
+
+    if (expectedStrings != null) {
+      for (String expectedString : expectedStrings) {
+        assertThat(thrown.getMessage(), containsString(expectedString));
+      }
+    }
+  }
+
+  @Test
+  public void testDeleteByIdReportsInvalidIdLists() throws Exception {
+    try (SolrClient client = getHttpSolrClient(jetty.getBaseUrl().toString() + "/" + ANY_COLLECTION)) {
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "null"), () -> {
+        client.deleteById(NULL_STR_LIST);
+      });
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "empty"), () -> {
+        client.deleteById(EMPTY_STR_LIST);
+      });
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "null"), () -> {
+        client.deleteById(NULL_STR_LIST, ANY_COMMIT_WITHIN_TIME);
+      });
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "empty"), () -> {
+        client.deleteById(EMPTY_STR_LIST, ANY_COMMIT_WITHIN_TIME);
+      });
+    }
+
+    try (SolrClient client = getHttpSolrClient(jetty.getBaseUrl().toString())) {
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "null"), () -> {
+        client.deleteById(ANY_COLLECTION, NULL_STR_LIST);
+      });
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "empty"), () -> {
+        client.deleteById(ANY_COLLECTION, EMPTY_STR_LIST);
+      });
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "null"), () -> {
+        client.deleteById(ANY_COLLECTION, NULL_STR_LIST, ANY_COMMIT_WITHIN_TIME);
+      });
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "empty"), () -> {
+        client.deleteById(ANY_COLLECTION, EMPTY_STR_LIST, ANY_COMMIT_WITHIN_TIME);
+      });
+    }
+  }
+
+
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6d1b2e2f/solr/solrj/src/test/org/apache/solr/client/solrj/impl/LBHttpSolrClientBadInputTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/LBHttpSolrClientBadInputTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/LBHttpSolrClientBadInputTest.java
new file mode 100644
index 0000000..6c0ad81
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/LBHttpSolrClientBadInputTest.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.client.solrj.impl;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import com.google.common.collect.Lists;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.SolrJettyTestBase;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.embedded.JettyConfig;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.junit.internal.matchers.StringContains.containsString;
+
+public class LBHttpSolrClientBadInputTest extends SolrJettyTestBase {
+  private static final List<String> NULL_STR_LIST = null;
+  private static final List<String> EMPTY_STR_LIST = new ArrayList<>();
+  private static final String ANY_COLLECTION = "ANY_COLLECTION";
+  private static final int ANY_COMMIT_WITHIN_TIME = -1;
+
+  @BeforeClass
+  public static void beforeTest() throws Exception {
+    JettyConfig jettyConfig = JettyConfig.builder()
+        .withSSLConfig(sslConfig)
+        .build();
+    createJetty(legacyExampleCollection1SolrHome(), jettyConfig);
+  }
+
+  @Test
+  public void testDeleteByIdReportsInvalidIdLists() throws Exception {
+    try (SolrClient client = getLBHttpSolrClient(jetty.getBaseUrl().toString() + "/" + ANY_COLLECTION)) {
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "null"), () -> {
+        client.deleteById(NULL_STR_LIST);
+      });
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "empty"), () -> {
+        client.deleteById(EMPTY_STR_LIST);
+      });
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "null"), () -> {
+        client.deleteById(NULL_STR_LIST, ANY_COMMIT_WITHIN_TIME);
+      });
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "empty"), () -> {
+        client.deleteById(EMPTY_STR_LIST, ANY_COMMIT_WITHIN_TIME);
+      });
+    }
+
+    try (SolrClient client = getLBHttpSolrClient(jetty.getBaseUrl().toString())) {
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "null"), () -> {
+        client.deleteById(ANY_COLLECTION, NULL_STR_LIST);
+      });
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "empty"), () -> {
+        client.deleteById(ANY_COLLECTION, EMPTY_STR_LIST);
+      });
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "null"), () -> {
+        client.deleteById(ANY_COLLECTION, NULL_STR_LIST, ANY_COMMIT_WITHIN_TIME);
+      });
+      assertExceptionThrownWithMessageContaining(IllegalArgumentException.class, Lists.newArrayList("ids", "empty"), () -> {
+        client.deleteById(ANY_COLLECTION, EMPTY_STR_LIST, ANY_COMMIT_WITHIN_TIME);
+      });
+    }
+  }
+
+  private void assertExceptionThrownWithMessageContaining(Class expectedType, List<String> expectedStrings, LuceneTestCase.ThrowingRunnable runnable) {
+    Throwable thrown = expectThrows(expectedType, runnable);
+
+    if (expectedStrings != null) {
+      for (String expectedString : expectedStrings) {
+        assertThat(thrown.getMessage(), containsString(expectedString));
+      }
+    }
+  }
+}


[40/50] [abbrv] lucene-solr:jira/http2: LUCENE-12357: Update ref guide notes

Posted by da...@apache.org.
LUCENE-12357: Update ref guide notes


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/306065fc
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/306065fc
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/306065fc

Branch: refs/heads/jira/http2
Commit: 306065fca8a4b540f197aca5d53e46f2700c25bc
Parents: 73a413c
Author: David Smiley <ds...@apache.org>
Authored: Mon Oct 15 23:38:09 2018 -0400
Committer: David Smiley <ds...@apache.org>
Committed: Mon Oct 15 23:38:09 2018 -0400

----------------------------------------------------------------------
 solr/solr-ref-guide/src/time-routed-aliases.adoc | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/306065fc/solr/solr-ref-guide/src/time-routed-aliases.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/time-routed-aliases.adoc b/solr/solr-ref-guide/src/time-routed-aliases.adoc
index 2e4160e..a97d051 100644
--- a/solr/solr-ref-guide/src/time-routed-aliases.adoc
+++ b/solr/solr-ref-guide/src/time-routed-aliases.adoc
@@ -60,6 +60,8 @@ TRUP first reads TRA configuration from the alias properties when it is initiali
 Each time a new collection is added, the oldest collections in the TRA are examined for possible deletion, if that has
     been configured.
 All this happens synchronously, potentially adding seconds to the update request and indexing latency.
+If `router.preemptiveCreateMath` is configured and if the document arrives within this window then it will occur
+asynchronously.
 
 Any other type of update like a commit or delete is routed by TRUP to all collections.
 Generally speaking, this is not a performance concern. When Solr receives a delete or commit wherein nothing is deleted
@@ -78,12 +80,11 @@ Some _potential_ areas for improvement that _are not implemented yet_ are:
 * Ways to automatically optimize (or reduce the resources of) older collections that aren't expected to receive more
   updates, and might have less search demand.
 
-* New collections ought to be created preemptively, as an option, to avoid delaying a document that does not yet have
-  a collection to go to.
-
 * CloudSolrClient could route documents to the correct collection based on a timestamp instead always picking the
   latest.
 
+* Compatibility with CDCR.
+
 == Limitations & Assumptions
 
 * Only *time* routed aliases are supported.  If you instead have some other sequential number, you could fake it


[22/50] [abbrv] lucene-solr:jira/http2: SOLR-12739: Remove unused import

Posted by da...@apache.org.
SOLR-12739: Remove unused import


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/095707d5
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/095707d5
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/095707d5

Branch: refs/heads/jira/http2
Commit: 095707d54717a745245fd2702779e02d8a46e9ce
Parents: 50d1c7b
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Wed Oct 10 18:18:29 2018 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Wed Oct 10 18:18:29 2018 +0530

----------------------------------------------------------------------
 .../apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/095707d5/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
index 5761359..ec51d55 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
@@ -78,7 +78,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import static org.apache.solr.common.params.CollectionAdminParams.CLUSTER;
-import static org.apache.solr.common.params.CollectionAdminParams.COLLECTION;
 import static org.apache.solr.common.params.CollectionAdminParams.DEFAULTS;
 import static org.apache.solr.common.params.CollectionAdminParams.USE_LEGACY_REPLICA_ASSIGNMENT;
 import static org.mockito.Mockito.any;


[36/50] [abbrv] lucene-solr:jira/http2: LUCENE-8528: we can't have random outputs on input in a test with fixed assertions (test bug).

Posted by da...@apache.org.
LUCENE-8528: we can't have random outputs on input in a test with fixed assertions (test bug).


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d7fd82c0
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d7fd82c0
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d7fd82c0

Branch: refs/heads/jira/http2
Commit: d7fd82c0f8517251d67b0af021d259dffaa4dce6
Parents: a312c0d
Author: Dawid Weiss <dw...@apache.org>
Authored: Mon Oct 15 13:53:08 2018 +0200
Committer: Dawid Weiss <dw...@apache.org>
Committed: Mon Oct 15 13:53:08 2018 +0200

----------------------------------------------------------------------
 lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d7fd82c0/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java b/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
index a6748a3..e431074 100644
--- a/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
+++ b/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
@@ -157,7 +157,7 @@ public class TestFSTs extends LuceneTestCase {
         final BytesRef NO_OUTPUT = outputs.getNoOutput();      
         final List<FSTTester.InputOutput<BytesRef>> pairs = new ArrayList<>(terms2.length);
         for(int idx=0;idx<terms2.length;idx++) {
-          final BytesRef output = random().nextInt(30) == 17 ? NO_OUTPUT : new BytesRef(Integer.toString(idx));
+          final BytesRef output = idx == 17 ? NO_OUTPUT : new BytesRef(Integer.toString(idx));
           pairs.add(new FSTTester.InputOutput<>(terms2[idx], output));
         }
         FSTTester<BytesRef> tester = new FSTTester<>(random(), dir, inputMode, pairs, outputs, false);


[12/50] [abbrv] lucene-solr:jira/http2: SOLR-12843: precommit errors

Posted by da...@apache.org.
SOLR-12843: precommit errors


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a52d47a1
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a52d47a1
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a52d47a1

Branch: refs/heads/jira/http2
Commit: a52d47a1a5ca24bf090105388743a07ec985b399
Parents: a37a213
Author: Noble Paul <no...@apache.org>
Authored: Tue Oct 9 23:16:15 2018 +1100
Committer: Noble Paul <no...@apache.org>
Committed: Tue Oct 9 23:17:09 2018 +1100

----------------------------------------------------------------------
 .../solr/client/solrj/request/MultiContentWriterRequest.java      | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a52d47a1/solr/solrj/src/java/org/apache/solr/client/solrj/request/MultiContentWriterRequest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/MultiContentWriterRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/MultiContentWriterRequest.java
index 1a206b8..54a1842 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/MultiContentWriterRequest.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/MultiContentWriterRequest.java
@@ -33,6 +33,7 @@ import org.apache.solr.common.util.JavaBinCodec;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.Pair;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.solr.common.params.UpdateParams.ASSUME_CONTENT_TYPE;
 
 public class MultiContentWriterRequest extends AbstractUpdateRequest {
@@ -92,7 +93,7 @@ public class MultiContentWriterRequest extends AbstractUpdateRequest {
     byte[] bytes = null;
     if (o instanceof byte[]) bytes = (byte[]) o;
     else if (o instanceof ByteBuffer) bytes = ((ByteBuffer) o).array();
-    rdr = new InputStreamReader(new ByteArrayInputStream(bytes));
+    rdr = new InputStreamReader(new ByteArrayInputStream(bytes), UTF_8);
     String detectedContentType = null;
     for (;;) {
       int ch = rdr.read();


[43/50] [abbrv] lucene-solr:jira/http2: SOLR-12739: Clear all collections in TestCollectionStateWatchers setup so that the collections created by test methods are spread evenly in the cluster.

Posted by da...@apache.org.
SOLR-12739: Clear all collections in TestCollectionStateWatchers setup so that the collections created by test methods are spread evenly in the cluster.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/aa0a5289
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/aa0a5289
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/aa0a5289

Branch: refs/heads/jira/http2
Commit: aa0a5289e692286297762d54434ae726333a5b64
Parents: 8d3810d
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Tue Oct 16 12:14:08 2018 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Tue Oct 16 12:14:22 2018 +0530

----------------------------------------------------------------------
 .../org/apache/solr/common/cloud/TestCollectionStateWatchers.java   | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/aa0a5289/solr/solrj/src/test/org/apache/solr/common/cloud/TestCollectionStateWatchers.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/common/cloud/TestCollectionStateWatchers.java b/solr/solrj/src/test/org/apache/solr/common/cloud/TestCollectionStateWatchers.java
index 51ee814..63f7b3e 100644
--- a/solr/solrj/src/test/org/apache/solr/common/cloud/TestCollectionStateWatchers.java
+++ b/solr/solrj/src/test/org/apache/solr/common/cloud/TestCollectionStateWatchers.java
@@ -62,6 +62,7 @@ public class TestCollectionStateWatchers extends SolrCloudTestCase {
 
   @Before
   public void prepareCluster() throws Exception {
+    cluster.deleteAllCollections();
     int missingServers = CLUSTER_SIZE - cluster.getJettySolrRunners().size();
     for (int i = 0; i < missingServers; i++) {
       cluster.startJettySolrRunner();


[07/50] [abbrv] lucene-solr:jira/http2: SOLR-12739: Autoscaling policy framework is now used as the default strategy to select the nodes on which new replicas or replicas of new collections are created.

Posted by da...@apache.org.
SOLR-12739: Autoscaling policy framework is now used as the default strategy to select the nodes on which new replicas or replicas of new collections are created.

Previously, the maxShardsPerNode parameter was not allowed on collections when autoscaling policy was configured. Also if an autoscaling policy was configured then the default was to set an unlimited maxShardsPerNode automatically. Now the maxShardsPerNode parameter is always allowed during collection creation and maxShardsPerNode should be set correctly (if required) regardless of whether autoscaling policies are in effect or not. The default value of maxShardsPerNode continues to be 1 as before. It can be set to -1 during collection creation to fall back to the old behavior of unlimited maxShardsPerNode when using autoscaling policy. This patch also fixes PolicyHelper to find the free disk space requirements of a new replica from the leader only if said leader node is alive.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/dbed8baf
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/dbed8baf
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/dbed8baf

Branch: refs/heads/jira/http2
Commit: dbed8bafe6ee167361599deaa4f1b5fdbb0b1c32
Parents: 1118299
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Tue Oct 9 12:10:28 2018 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Tue Oct 9 12:10:28 2018 +0530

----------------------------------------------------------------------
 solr/CHANGES.txt                                | 11 +++
 .../java/org/apache/solr/cloud/CloudUtil.java   | 37 -----------
 .../solr/cloud/api/collections/Assign.java      | 70 +++++++++++++++++---
 .../api/collections/CreateCollectionCmd.java    | 19 +-----
 .../api/collections/DeleteCollectionCmd.java    |  2 +
 .../solr/cloud/CollectionsAPISolrJTest.java     | 11 +++
 ...verseerCollectionConfigSetProcessorTest.java |  7 ++
 .../solr/cloud/api/collections/AssignTest.java  | 39 ++++++++++-
 .../autoscaling/AutoScalingHandlerTest.java     | 13 +---
 .../autoscaling/ComputePlanActionTest.java      | 10 +--
 .../autoscaling/ExecutePlanActionTest.java      |  1 +
 .../sim/SimClusterStateProvider.java            |  4 +-
 .../sim/TestSimComputePlanAction.java           |  4 +-
 solr/solr-ref-guide/src/collections-api.adoc    |  4 +-
 .../src/solrcloud-autoscaling-overview.adoc     |  2 +-
 .../client/solrj/cloud/autoscaling/Policy.java  |  2 +
 .../solrj/cloud/autoscaling/PolicyHelper.java   |  2 +-
 .../common/params/CollectionAdminParams.java    | 10 ++-
 .../src/resources/apispec/cluster.Commands.json |  9 +++
 .../solrj/cloud/autoscaling/TestPolicy.java     | 24 +------
 20 files changed, 171 insertions(+), 110 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dbed8baf/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index a760f5e..9bf6080 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -111,6 +111,15 @@ Upgrade Notes
   ZK as well as written using the V2 set-obj-property syntax but it is deprecated and will be removed in Solr 9.
   We recommend that users change their API calls to use the new format going forward.
 
+* SOLR-12739: Autoscaling policy framework is now used as the default strategy to select the nodes on which
+  new replicas or replicas of new collections are created. Previously, the maxShardsPerNode parameter was not allowed
+  on collections when autoscaling policy was configured. Also if an autoscaling policy was configured then the default
+  was to set an unlimited maxShardsPerNode automatically. Now the maxShardsPerNode parameter is always
+  allowed during collection creation and maxShardsPerNode should be set correctly (if required) regardless of whether
+  autoscaling policies are in effect or not. The default value of maxShardsPerNode continues to be 1 as before. It can
+  be set to -1 during collection creation to fall back to the old behavior of unlimited maxShardsPerNode when using
+  autoscaling policy.
+
 New Features
 ----------------------
 
@@ -179,6 +188,8 @@ Improvements
   can't be uninverted (saves mem) and can avoid wrapping the reader altogether if there's nothing to uninvert.
   IndexSchema.getUninversionMap refactored to getUninversionMapper and no longer merges FieldInfos. (David Smiley)
 
+* SOLR-12739: Make autoscaling policy based replica placement the default strategy for placing replicas. (shalin)
+
 ==================  7.5.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dbed8baf/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java b/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
index 14bebef..302703b 100644
--- a/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
+++ b/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
@@ -23,12 +23,8 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Objects;
-import java.util.Optional;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
 import org.apache.solr.common.cloud.ClusterState;
@@ -146,37 +142,4 @@ public class CloudUtil {
 
   }
 
-  /**
-   * <b>Note:</b> where possible, the {@link #usePolicyFramework(DocCollection, SolrCloudManager)} method should
-   * be used instead of this method
-   *
-   * @return true if autoscaling policy framework should be used for replica placement
-   */
-  public static boolean usePolicyFramework(SolrCloudManager cloudManager) throws IOException, InterruptedException {
-    Objects.requireNonNull(cloudManager, "The SolrCloudManager instance cannot be null");
-    return usePolicyFramework(Optional.empty(), cloudManager);
-  }
-
-  /**
-   * @return true if auto scaling policy framework should be used for replica placement
-   * for this collection, otherwise false
-   */
-  public static boolean usePolicyFramework(DocCollection collection, SolrCloudManager cloudManager)
-      throws IOException, InterruptedException {
-    Objects.requireNonNull(collection, "The DocCollection instance cannot be null");
-    Objects.requireNonNull(cloudManager, "The SolrCloudManager instance cannot be null");
-    return usePolicyFramework(Optional.of(collection), cloudManager);
-  }
-
-  private static boolean usePolicyFramework(Optional<DocCollection> collection, SolrCloudManager cloudManager) throws IOException, InterruptedException {
-    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
-    // if no autoscaling configuration exists then obviously we cannot use the policy framework
-    if (autoScalingConfig.getPolicy().isEmpty()) return false;
-    // do custom preferences exist
-    if (!autoScalingConfig.getPolicy().isEmptyPreferences()) return true;
-    // does a cluster policy exist
-    if (!autoScalingConfig.getPolicy().getClusterPolicy().isEmpty()) return true;
-    // finally we check if the current collection has a policy
-    return !collection.isPresent() || collection.get().getPolicyName() != null;
-  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dbed8baf/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
index a24ad1a..841ee93 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
@@ -29,6 +29,7 @@ import java.util.List;
 import java.util.Locale;
 import java.util.Map;
 import java.util.Objects;
+import java.util.Optional;
 import java.util.Random;
 import java.util.Set;
 import java.util.stream.Collectors;
@@ -41,7 +42,6 @@ import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
 import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
 import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
 import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
-import org.apache.solr.cloud.CloudUtil;
 import org.apache.solr.cloud.rule.ReplicaAssigner;
 import org.apache.solr.cloud.rule.Rule;
 import org.apache.solr.common.SolrException;
@@ -52,6 +52,7 @@ import org.apache.solr.common.cloud.ReplicaPosition;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionAdminParams;
 import org.apache.solr.common.util.StrUtils;
 import org.apache.solr.common.util.Utils;
 import org.apache.solr.util.NumberUtils;
@@ -241,6 +242,56 @@ public class Assign {
     return nodeList;
   }
 
+  /**
+   * <b>Note:</b> where possible, the {@link #usePolicyFramework(DocCollection, SolrCloudManager)} method should
+   * be used instead of this method
+   *
+   * @return true if autoscaling policy framework should be used for replica placement
+   */
+  public static boolean usePolicyFramework(SolrCloudManager cloudManager) throws IOException, InterruptedException {
+    Objects.requireNonNull(cloudManager, "The SolrCloudManager instance cannot be null");
+    return usePolicyFramework(Optional.empty(), cloudManager);
+  }
+
+  /**
+   * @return true if auto scaling policy framework should be used for replica placement
+   * for this collection, otherwise false
+   */
+  public static boolean usePolicyFramework(DocCollection collection, SolrCloudManager cloudManager)
+      throws IOException, InterruptedException {
+    Objects.requireNonNull(collection, "The DocCollection instance cannot be null");
+    Objects.requireNonNull(cloudManager, "The SolrCloudManager instance cannot be null");
+    return usePolicyFramework(Optional.of(collection), cloudManager);
+  }
+
+  private static boolean usePolicyFramework(Optional<DocCollection> collection, SolrCloudManager cloudManager) throws IOException, InterruptedException {
+    boolean useLegacyAssignment = false;
+    Map<String, Object> clusterProperties = cloudManager.getClusterStateProvider().getClusterProperties();
+    if (clusterProperties.containsKey(CollectionAdminParams.DEFAULTS))  {
+      Map<String, Object> defaults = (Map<String, Object>) clusterProperties.get(CollectionAdminParams.DEFAULTS);
+      Map<String, Object> collectionDefaults = (Map<String, Object>) defaults.getOrDefault(CollectionAdminParams.COLLECTION, Collections.emptyMap());
+      useLegacyAssignment = (boolean) collectionDefaults.getOrDefault(CollectionAdminParams.USE_LEGACY_REPLICA_ASSIGNMENT, false);
+    }
+
+    if (!useLegacyAssignment) {
+      // if legacy assignment is not selected then autoscaling is always available through the implicit policy/preferences
+      return true;
+    }
+
+    // legacy assignment is turned on, which means we must look at the actual autoscaling config
+    // to determine whether policy framework can be used or not for this collection
+
+    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
+    // if no autoscaling configuration exists then obviously we cannot use the policy framework
+    if (autoScalingConfig.getPolicy().isEmpty()) return false;
+    // do custom preferences exist
+    if (!autoScalingConfig.getPolicy().isEmptyPreferences()) return true;
+    // does a cluster policy exist
+    if (!autoScalingConfig.getPolicy().getClusterPolicy().isEmpty()) return true;
+    // finally we check if the current collection has a policy
+    return !collection.isPresent() || collection.get().getPolicyName() != null;
+  }
+
   static class ReplicaCount {
     public final String nodeName;
     public int thisCollectionNodes = 0;
@@ -581,18 +632,17 @@ public class Assign {
       List<Map> ruleMaps = (List<Map>) collection.get("rule");
       String policyName = collection.getStr(POLICY);
       List snitches = (List) collection.get(SNITCH);
-      AutoScalingConfig autoScalingConfig = solrCloudManager.getDistribStateManager().getAutoScalingConfig();
 
-      StrategyType strategyType = null;
-      if ((ruleMaps == null || ruleMaps.isEmpty()) && !CloudUtil.usePolicyFramework(collection, solrCloudManager)) {
-        strategyType = StrategyType.LEGACY;
+      Strategy strategy = null;
+      if ((ruleMaps == null || ruleMaps.isEmpty()) && !usePolicyFramework(collection, solrCloudManager)) {
+        strategy = Strategy.LEGACY;
       } else if (ruleMaps != null && !ruleMaps.isEmpty()) {
-        strategyType = StrategyType.RULES;
+        strategy = Strategy.RULES;
       } else {
-        strategyType = StrategyType.POLICY;
+        strategy = Strategy.POLICY;
       }
 
-      switch (strategyType) {
+      switch (strategy) {
         case LEGACY:
           return new LegacyAssignStrategy();
         case RULES:
@@ -602,11 +652,11 @@ public class Assign {
         case POLICY:
           return new PolicyBasedAssignStrategy(policyName);
         default:
-          throw new Assign.AssignmentException("Unknown strategy type: " + strategyType);
+          throw new Assign.AssignmentException("Unknown strategy type: " + strategy);
       }
     }
 
-    private enum StrategyType {
+    private enum Strategy {
       LEGACY, RULES, POLICY;
     }
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dbed8baf/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
index 212437e..533aee8 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
@@ -38,10 +38,8 @@ import org.apache.solr.client.solrj.cloud.SolrCloudManager;
 import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
 import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
 import org.apache.solr.client.solrj.cloud.autoscaling.NotEmptyException;
-import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
 import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
 import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
-import org.apache.solr.cloud.CloudUtil;
 import org.apache.solr.cloud.Overseer;
 import org.apache.solr.cloud.ZkController;
 import org.apache.solr.cloud.overseer.ClusterStateMutator;
@@ -132,12 +130,9 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
     ocmh.validateConfigOrThrowSolrException(configName);
 
     String router = message.getStr("router.name", DocRouter.DEFAULT_NAME);
-    String policy = message.getStr(Policy.POLICY);
-    boolean usePolicyFramework = CloudUtil.usePolicyFramework(ocmh.cloudManager) || policy != null;
 
     // fail fast if parameters are wrong or incomplete
     List<String> shardNames = populateShardNames(message, router);
-    checkMaxShardsPerNode(message, usePolicyFramework);
     checkReplicaTypes(message);
 
     AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
@@ -343,10 +338,10 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
     int numTlogReplicas = message.getInt(TLOG_REPLICAS, 0);
     int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, numTlogReplicas>0?0:1));
     int numPullReplicas = message.getInt(PULL_REPLICAS, 0);
-    boolean usePolicyFramework = CloudUtil.usePolicyFramework(docCollection, cloudManager);
 
     int numSlices = shardNames.size();
-    int maxShardsPerNode = checkMaxShardsPerNode(message, usePolicyFramework);
+    int maxShardsPerNode = message.getInt(MAX_SHARDS_PER_NODE, 1);
+    if (maxShardsPerNode == -1) maxShardsPerNode = Integer.MAX_VALUE;
 
     // we need to look at every node and see how many cores it serves
     // add our new cores to existing nodes serving the least number of cores
@@ -402,16 +397,6 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
     return replicaPositions;
   }
 
-  public static int checkMaxShardsPerNode(ZkNodeProps message, boolean usePolicyFramework) {
-    int maxShardsPerNode = message.getInt(MAX_SHARDS_PER_NODE, 1);
-    if (usePolicyFramework && message.getStr(MAX_SHARDS_PER_NODE) != null && maxShardsPerNode > 0) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "'maxShardsPerNode>0' is not supported when autoScaling policies are used");
-    }
-    if (maxShardsPerNode == -1 || usePolicyFramework) maxShardsPerNode = Integer.MAX_VALUE;
-
-    return maxShardsPerNode;
-  }
-
   public static void checkReplicaTypes(ZkNodeProps message) {
     int numTlogReplicas = message.getInt(TLOG_REPLICAS, 0);
     int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, numTlogReplicas > 0 ? 0 : 1));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dbed8baf/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java
index 9a569d1..f1767ee 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java
@@ -23,6 +23,7 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
@@ -180,6 +181,7 @@ public class DeleteCollectionCmd implements OverseerCollectionMessageHandler.Cmd
   }
 
   private String referencedByAlias(String collection, Aliases aliases) {
+    Objects.requireNonNull(aliases);
     return aliases.getCollectionAliasListMap().entrySet().stream()
         .filter(e -> e.getValue().contains(collection))
         .map(Map.Entry::getKey) // alias name

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dbed8baf/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
index d9826eb..4c3022c 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
@@ -43,12 +43,15 @@ import org.apache.solr.common.cloud.ClusterProperties;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CoreAdminParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.common.util.Utils;
 import org.apache.solr.util.TimeOut;
 import org.apache.zookeeper.KeeperException;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -56,6 +59,7 @@ import static java.util.Arrays.asList;
 import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_DEF;
 import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
 import static org.apache.solr.common.cloud.ZkStateReader.NUM_SHARDS_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SOLR_AUTOSCALING_CONF_PATH;
 import static org.apache.solr.common.params.CollectionAdminParams.COLLECTION;
 import static org.apache.solr.common.params.CollectionAdminParams.DEFAULTS;
 
@@ -69,6 +73,13 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
         .configure();
   }
 
+  @Before
+  public void beforeTest() throws Exception {
+    // clear any persisted auto scaling configuration
+    zkClient().setData(SOLR_AUTOSCALING_CONF_PATH, Utils.toJSON(new ZkNodeProps()), true);
+    cluster.deleteAllCollections();
+  }
+
   /**
    * When a config name is not specified during collection creation, the _default should
    * be used.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dbed8baf/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
index 241cfb2..e999d73 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
@@ -43,6 +43,7 @@ import org.apache.solr.client.solrj.impl.ClusterStateProvider;
 import org.apache.solr.cloud.Overseer.LeaderStatus;
 import org.apache.solr.cloud.OverseerTaskQueue.QueueEvent;
 import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
+import org.apache.solr.common.cloud.Aliases;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.DocRouter;
@@ -76,6 +77,9 @@ import org.mockito.stubbing.Answer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.solr.common.params.CollectionAdminParams.COLLECTION;
+import static org.apache.solr.common.params.CollectionAdminParams.DEFAULTS;
+import static org.apache.solr.common.params.CollectionAdminParams.USE_LEGACY_REPLICA_ASSIGNMENT;
 import static org.mockito.Mockito.any;
 import static org.mockito.Mockito.anyBoolean;
 import static org.mockito.Mockito.anyInt;
@@ -260,6 +264,7 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
     when(zkStateReaderMock.getZkClient()).thenReturn(solrZkClientMock);
     when(zkStateReaderMock.getClusterState()).thenReturn(clusterStateMock);
     when(zkStateReaderMock.getAutoScalingConfig()).thenReturn(autoScalingConfig);
+    when(zkStateReaderMock.getAliases()).thenReturn(Aliases.EMPTY);
 
     when(clusterStateMock.getCollection(anyString())).thenAnswer(invocation -> {
       String key = invocation.getArgument(0);
@@ -325,6 +330,8 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
     when(zkControllerMock.getSolrCloudManager()).thenReturn(cloudDataProviderMock);
     when(cloudDataProviderMock.getClusterStateProvider()).thenReturn(clusterStateProviderMock);
     when(clusterStateProviderMock.getClusterState()).thenReturn(clusterStateMock);
+    when(clusterStateProviderMock.getLiveNodes()).thenReturn(liveNodes);
+    when(clusterStateProviderMock.getClusterProperties()).thenReturn(Utils.makeMap(DEFAULTS, Utils.makeMap(COLLECTION, Utils.makeMap(USE_LEGACY_REPLICA_ASSIGNMENT, true))));
     when(cloudDataProviderMock.getDistribStateManager()).thenReturn(stateManagerMock);
     when(stateManagerMock.hasData(anyString())).thenAnswer(invocation -> zkMap.containsKey(invocation.getArgument(0)));
     when(stateManagerMock.getAutoScalingConfig()).thenReturn(autoScalingConfig);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dbed8baf/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java
index d2b35e4..91e37b2 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java
@@ -18,14 +18,21 @@ package org.apache.solr.cloud.api.collections;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
+import java.util.stream.Collectors;
 
 import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.client.solrj.cloud.DistribStateManager;
+import org.apache.solr.client.solrj.cloud.SolrCloudManager;
+import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
+import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
+import org.apache.solr.client.solrj.impl.ClusterStateProvider;
 import org.apache.solr.client.solrj.impl.ZkDistribStateManager;
 import org.apache.solr.cloud.ZkTestServer;
 import org.apache.solr.common.cloud.DocCollection;
@@ -34,6 +41,7 @@ import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.util.ExecutorUtil;
+import org.apache.solr.common.util.Utils;
 import org.apache.zookeeper.KeeperException;
 import org.junit.After;
 import org.junit.Before;
@@ -152,5 +160,34 @@ public class AssignTest extends SolrTestCaseJ4 {
       server.shutdown();
     }
   }
-  
+
+  @Test
+  public void testUsePolicyByDefault() throws Exception {
+    assumeWorkingMockito();
+
+    SolrCloudManager solrCloudManager = mock(SolrCloudManager.class);
+    ClusterStateProvider clusterStateProvider = mock(ClusterStateProvider.class);
+    when(solrCloudManager.getClusterStateProvider()).thenReturn(clusterStateProvider);
+    // first we set useLegacyReplicaAssignment=false, so autoscaling should always be used
+    when(clusterStateProvider.getClusterProperties()).thenReturn(Utils.makeMap("defaults", Utils.makeMap("collection", Utils.makeMap("useLegacyReplicaAssignment", false))));
+    // verify
+    boolean usePolicyFramework = Assign.usePolicyFramework(solrCloudManager);
+    assertTrue(usePolicyFramework);
+
+    // now we set useLegacyReplicaAssignment=true, so autoscaling can only be used if an explicit policy or preference exists
+    when(clusterStateProvider.getClusterProperties()).thenReturn(Utils.makeMap("defaults", Utils.makeMap("collection", Utils.makeMap("useLegacyReplicaAssignment", true))));
+    DistribStateManager distribStateManager = mock(DistribStateManager.class);
+    when(solrCloudManager.getDistribStateManager()).thenReturn(distribStateManager);
+    when(distribStateManager.getAutoScalingConfig()).thenReturn(new AutoScalingConfig(Collections.emptyMap()));
+    assertFalse(Assign.usePolicyFramework(solrCloudManager));
+
+    // lets provide a custom preference and assert that autoscaling is used even if useLegacyReplicaAssignment=false
+    // our custom preferences are exactly the same as the default ones
+    // but because we are providing them explicitly, they must cause autoscaling to turn on
+    List<Map> customPreferences = Policy.DEFAULT_PREFERENCES
+        .stream().map(preference -> preference.getOriginal()).collect(Collectors.toList());
+
+    when(distribStateManager.getAutoScalingConfig()).thenReturn(new AutoScalingConfig(Utils.makeMap("cluster-preferences", customPreferences)));
+    assertTrue(Assign.usePolicyFramework(solrCloudManager));
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dbed8baf/solr/core/src/test/org/apache/solr/cloud/autoscaling/AutoScalingHandlerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/AutoScalingHandlerTest.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/AutoScalingHandlerTest.java
index 5c469c9..d8219ab 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/AutoScalingHandlerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/AutoScalingHandlerTest.java
@@ -777,16 +777,6 @@ public class AutoScalingHandlerTest extends SolrCloudTestCase {
     assertNotNull(violations);
     assertEquals(0, violations.size());
 
-    // assert that when a cluster policy is in effect, using maxShardsPerNode throws an exception
-    try {
-      CollectionAdminRequest.Create create = CollectionAdminRequest.Create.createCollection("readApiTestViolations", CONFIGSET_NAME, 1, 6);
-      create.setMaxShardsPerNode(10);
-      create.process(solrClient);
-      fail();
-    } catch (Exception e) {
-      assertTrue(e.getMessage().contains("'maxShardsPerNode>0' is not supported when autoScaling policies are used"));
-    }
-
     // temporarily increase replica limit in cluster policy so that we can create a collection with 6 replicas
     String tempClusterPolicyCommand = "{" +
         " 'set-cluster-policy': [" +
@@ -799,7 +789,8 @@ public class AutoScalingHandlerTest extends SolrCloudTestCase {
     assertEquals(response.get("result").toString(), "success");
 
     // lets create a collection which violates the rule replicas < 2
-    CollectionAdminRequest.Create create = CollectionAdminRequest.Create.createCollection("readApiTestViolations", CONFIGSET_NAME, 1, 6);
+    CollectionAdminRequest.Create create = CollectionAdminRequest.Create.createCollection("readApiTestViolations", CONFIGSET_NAME, 1, 6)
+        .setMaxShardsPerNode(3);
     CollectionAdminResponse adminResponse = create.process(solrClient);
     assertTrue(adminResponse.isSuccess());
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dbed8baf/solr/core/src/test/org/apache/solr/cloud/autoscaling/ComputePlanActionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/ComputePlanActionTest.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/ComputePlanActionTest.java
index 2eaec83..087094a 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/ComputePlanActionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/ComputePlanActionTest.java
@@ -276,7 +276,7 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
 
     CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection("testNodeWithMultipleReplicasLost",
         "conf",2, 3);
-//    create.setMaxShardsPerNode(2);
+    create.setMaxShardsPerNode(2);
     create.process(solrClient);
 
     waitForState("Timed out waiting for replicas of new collection to be active",
@@ -353,7 +353,7 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
     assertEquals(response.get("result").toString(), "success");
 
     CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection("testNodeAdded",
-        "conf",1, 2);
+        "conf",1, 2).setMaxShardsPerNode(2);
     create.process(solrClient);
 
     waitForState("Timed out waiting for replicas of new collection to be active",
@@ -553,7 +553,7 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
 
 
     CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionNamePrefix + "_0",
-        "conf", numShards, 1);
+        "conf", numShards, 1).setMaxShardsPerNode(2);
     create.process(solrClient);
 
     waitForState("Timed out waiting for replicas of new collection to be active",
@@ -579,7 +579,7 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
 
     for (int i = 1; i < numCollections; i++) {
       create = CollectionAdminRequest.createCollection(collectionNamePrefix + "_" + i,
-          "conf", numShards, 2);
+          "conf", numShards, 2).setMaxShardsPerNode(numShards * 2);
       create.process(solrClient);
 
       waitForState("Timed out waiting for replicas of new collection to be active",
@@ -649,7 +649,7 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
     String newNodeName = newNode.getNodeName();
 
     CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionNamePrefix + "_0",
-        "conf", numShards, 2);
+        "conf", numShards, 2).setMaxShardsPerNode(numShards * 2);
     create.process(solrClient);
 
     waitForState("Timed out waiting for replicas of new collection to be active",

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dbed8baf/solr/core/src/test/org/apache/solr/cloud/autoscaling/ExecutePlanActionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/ExecutePlanActionTest.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/ExecutePlanActionTest.java
index c876557..c15bc53 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/ExecutePlanActionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/ExecutePlanActionTest.java
@@ -88,6 +88,7 @@ public class ExecutePlanActionTest extends SolrCloudTestCase {
     cluster.waitForAllNodes(30);
     loader = cluster.getJettySolrRunner(0).getCoreContainer().getResourceLoader();
     cloudManager = cluster.getJettySolrRunner(0).getCoreContainer().getZkController().getSolrCloudManager();
+    cluster.deleteAllCollections();
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dbed8baf/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
index ee39666..1d377b6 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
@@ -99,6 +99,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
 import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
 import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
 import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
@@ -787,7 +788,8 @@ public class SimClusterStateProvider implements ClusterStateProvider {
 
     // fail fast if parameters are wrong or incomplete
     List<String> shardNames = CreateCollectionCmd.populateShardNames(props, router);
-    CreateCollectionCmd.checkMaxShardsPerNode(props, usePolicyFramework);
+    int maxShardsPerNode = props.getInt(MAX_SHARDS_PER_NODE, 1);
+    if (maxShardsPerNode == -1) maxShardsPerNode = Integer.MAX_VALUE;
     CreateCollectionCmd.checkReplicaTypes(props);
 
     // always force getting fresh state

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dbed8baf/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimComputePlanAction.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimComputePlanAction.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimComputePlanAction.java
index 719bb7b..98a7728 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimComputePlanAction.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimComputePlanAction.java
@@ -205,7 +205,7 @@ public class TestSimComputePlanAction extends SimSolrCloudTestCase {
 
     CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection("testNodeWithMultipleReplicasLost",
         "conf",2, 3);
-//    create.setMaxShardsPerNode(2);
+    create.setMaxShardsPerNode(2);
     create.process(solrClient);
 
     CloudTestUtils.waitForState(cluster, "Timed out waiting for replicas of new collection to be active",
@@ -283,7 +283,7 @@ public class TestSimComputePlanAction extends SimSolrCloudTestCase {
     assertEquals(response.get("result").toString(), "success");
 
     CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection("testNodeAdded",
-        "conf",1, 4);
+        "conf",1, 4).setMaxShardsPerNode(-1);
     create.process(solrClient);
 
     CloudTestUtils.waitForState(cluster, "Timed out waiting for replicas of new collection to be active",

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dbed8baf/solr/solr-ref-guide/src/collections-api.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/collections-api.adoc b/solr/solr-ref-guide/src/collections-api.adoc
index f320cdb..0e4112d 100644
--- a/solr/solr-ref-guide/src/collections-api.adoc
+++ b/solr/solr-ref-guide/src/collections-api.adoc
@@ -70,7 +70,7 @@ When creating collections, the shards and/or replicas are spread across all avai
 +
 If a node is not live when the CREATE action is called, it will not get any parts of the new collection, which could lead to too many replicas being created on a single live node. Defining `maxShardsPerNode` sets a limit on the number of replicas the CREATE action will spread to each node.
 +
-If the entire collection can not be fit into the live nodes, no collection will be created at all. The default `maxShardsPerNode` value is `1`.
+If the entire collection can not be fit into the live nodes, no collection will be created at all. The default `maxShardsPerNode` value is `1`. A value of `-1` means unlimited. If a `policy` is also specified then the stricter of `maxShardsPerNode` and policy rules apply.
 
 `createNodeSet`::
 Allows defining the nodes to spread the new collection across. The format is a comma-separated list of node_names, such as `localhost:8983_solr,localhost:8984_solr,localhost:8985_solr`.
@@ -80,7 +80,7 @@ If not provided, the CREATE operation will create shard-replicas spread across a
 Alternatively, use the special value of `EMPTY` to initially create no shard-replica within the new collection and then later use the <<addreplica,ADDREPLICA>> operation to add shard-replicas when and where required.
 
 `createNodeSet.shuffle`::
-Controls wether or not the shard-replicas created for this collection will be assigned to the nodes specified by the `createNodeSet` in a sequential manner, or if the list of nodes should be shuffled prior to creating individual replicas.
+Controls whether or not the shard-replicas created for this collection will be assigned to the nodes specified by the `createNodeSet` in a sequential manner, or if the list of nodes should be shuffled prior to creating individual replicas.
 +
 A `false` value makes the results of a collection creation predictable and gives more exact control over the location of the individual shard-replicas, but `true` can be a better choice for ensuring replicas are distributed evenly across nodes. The default is `true`.
 +

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dbed8baf/solr/solr-ref-guide/src/solrcloud-autoscaling-overview.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/solrcloud-autoscaling-overview.adoc b/solr/solr-ref-guide/src/solrcloud-autoscaling-overview.adoc
index 518d9ee..279a396 100644
--- a/solr/solr-ref-guide/src/solrcloud-autoscaling-overview.adoc
+++ b/solr/solr-ref-guide/src/solrcloud-autoscaling-overview.adoc
@@ -53,7 +53,7 @@ Cluster preferences allow you to tell Solr how to assess system load on each nod
 
 In general, when an operation increases replica counts, the *least loaded* <<solrcloud-autoscaling-policy-preferences.adoc#node-selector,qualified node>> will be chosen, and when the operation reduces replica counts, the *most loaded* qualified node will be chosen. 
 
-The default cluster preferences are `[{minimize:cores}]`, which tells Solr to minimize the number of cores on all nodes.  In this case, the least loaded node is the one with the fewest cores.
+The default cluster preferences are `[{minimize:cores},{maximize:freedisk}]`, which tells Solr to minimize the number of cores on all nodes and if number of cores are equal, maximize the free disk space available.  In this case, the least loaded node is the one with the fewest cores or if two nodes have an equal number of cores, the node with the most free disk space.
 
 You can learn more about preferences in the <<solrcloud-autoscaling-policy-preferences.adoc#solrcloud-autoscaling-policy-preferences,Autoscaling Cluster Preferences>> section.
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dbed8baf/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
index cfe9455..db1015f 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
@@ -80,6 +80,8 @@ public class Policy implements MapWriter {
   public static final Set<String> GLOBAL_ONLY_TAGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList("cores", CollectionAdminParams.WITH_COLLECTION)));
   public static final List<Preference> DEFAULT_PREFERENCES = Collections.unmodifiableList(
       Arrays.asList(
+          // NOTE - if you change this, make sure to update the solrcloud-autoscaling-overview.adoc which
+          // lists the default preferences
           new Preference((Map<String, Object>) Utils.fromJSONString("{minimize : cores, precision:1}")),
           new Preference((Map<String, Object>) Utils.fromJSONString("{maximize : freedisk}"))));
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dbed8baf/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/PolicyHelper.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/PolicyHelper.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/PolicyHelper.java
index 0f4af70..70bc413 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/PolicyHelper.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/PolicyHelper.java
@@ -130,7 +130,7 @@ public class PolicyHelper {
         if (coll != null) {
           for (String shardName : shardNames) {
             Replica ldr = coll.getLeader(shardName);
-            if (ldr != null) {
+            if (ldr != null && cloudManager.getClusterStateProvider().getLiveNodes().contains(ldr.getNodeName())) {
               Map<String, Map<String, List<ReplicaInfo>>> details = cloudManager.getNodeStateProvider().getReplicaInfo(ldr.getNodeName(),
                   Collections.singleton(FREEDISK.perReplicaValue));
               ReplicaInfo replicaInfo = details.getOrDefault(collName, emptyMap()).getOrDefault(shardName, singletonList(null)).get(0);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dbed8baf/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java b/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java
index cb70fb8..c34f930 100644
--- a/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java
+++ b/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java
@@ -92,7 +92,15 @@ public interface CollectionAdminParams {
   String COLOCATED_WITH = "COLOCATED_WITH";
 
   /**
-   * Used by cluster properties API to provide defaults for collection, cluster etc.
+   * Used by cluster properties API as a wrapper key to provide defaults for collection, cluster etc.
+   *
+   * e.g. {defaults:{collection:{useLegacyReplicaAssignment:false}}}
    */
   String DEFAULTS = "defaults";
+
+  /**
+   * This cluster property decides whether Solr should use the legacy round-robin replica placement strategy
+   * or the autoscaling policy based strategy to assign replicas to nodes. The default is false.
+   */
+  String USE_LEGACY_REPLICA_ASSIGNMENT = "useLegacyReplicaAssignment";
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dbed8baf/solr/solrj/src/resources/apispec/cluster.Commands.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/resources/apispec/cluster.Commands.json b/solr/solrj/src/resources/apispec/cluster.Commands.json
index d1f5738..da31b3c 100644
--- a/solr/solrj/src/resources/apispec/cluster.Commands.json
+++ b/solr/solrj/src/resources/apispec/cluster.Commands.json
@@ -93,6 +93,15 @@
         "defaults" : {
           "type" : "object",
           "properties": {
+            "cluster": {
+              "type" : "object",
+              "properties": {
+                "useLegacyReplicaAssignment": {
+                  "type" : "boolean",
+                  "description" : "Decides wheyher to use the deprecated legacy replica assignment strategy or not"
+                }
+              }
+            },
             "collection": {
               "type": "object",
               "properties": {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/dbed8baf/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
index ec3c56c..fecc749 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
@@ -49,7 +49,6 @@ import org.apache.solr.client.solrj.cloud.autoscaling.Suggester.Hint;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
 import org.apache.solr.client.solrj.impl.SolrClientNodeStateProvider;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.cloud.Overseer;
 import org.apache.solr.cloud.api.collections.Assign;
 import org.apache.solr.common.MapWriter;
 import org.apache.solr.common.cloud.ClusterState;
@@ -57,7 +56,6 @@ import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.DocRouter;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.ReplicaPosition;
-import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.cloud.rule.ImplicitSnitch;
 import org.apache.solr.common.params.CollectionParams;
@@ -81,9 +79,6 @@ import static org.apache.solr.client.solrj.cloud.autoscaling.Policy.CLUSTER_PREF
 import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.CORES;
 import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.FREEDISK;
 import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.REPLICA;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_TYPE;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOVEREPLICA;
 
@@ -3091,13 +3086,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
         "      'totaldisk': 1700," +
         "      'port': 8985" +
         "    }" +
-        "  }," +
-        "  'autoscalingJson': {" +
-        "     'cluster-preferences': [" +
-        "       { 'maximize': 'freedisk'}," +
-        "       { 'minimize': 'cores', 'precision': 3}" +
-        "     ]" +
-        "   }" +
+        "  }" +
         "}";
 
     String clusterState = "{\n" +
@@ -3161,7 +3150,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
 
       });
     });
-    AutoScalingConfig asc = m.containsKey("autoscalingJson") ? new AutoScalingConfig((Map<String, Object>) m.get("autoscalingJson")) : null;
+    AutoScalingConfig asc = m.containsKey("autoscalingJson") ? new AutoScalingConfig((Map<String, Object>) m.get("autoscalingJson")) : new AutoScalingConfig(Collections.emptyMap());
     DelegatingCloudManager cloudManager = new DelegatingCloudManager(null) {
 
       @Override
@@ -3207,13 +3196,6 @@ public class TestPolicy extends SolrTestCaseJ4 {
       }
     };
 
-    ZkNodeProps message = new ZkNodeProps(
-        Overseer.QUEUE_OPERATION, ADDREPLICA.toLower(),
-        COLLECTION_PROP, "c1",
-        SHARD_ID_PROP, "s1",
-        REPLICA_TYPE, Replica.Type.NRT.toString()
-    );
-
     Assign.AssignRequest assignRequest = new Assign.AssignRequestBuilder()
         .forCollection("c1")
         .forShard(Collections.singletonList("s1"))
@@ -3237,7 +3219,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
    * The reason behind doing this is to ensure that implicitly added cluster preferences do not ever
    * go to ZooKeeper so that we can decide whether to enable autoscaling policy framework or not.
    *
-   * @see org.apache.solr.cloud.CloudUtil#usePolicyFramework(DocCollection, SolrCloudManager)
+   * @see Assign#usePolicyFramework(DocCollection, SolrCloudManager)
    */
   public void testPolicyMapWriterWithEmptyPreferences() throws IOException {
     List<Map> defaultPreferences = Policy.DEFAULT_PREFERENCES


[26/50] [abbrv] lucene-solr:jira/http2: LUCENE-8526: Add javadocs in CJKBigramFilter explaining the behavior of the StandardTokenizer on Hangul syllables.

Posted by da...@apache.org.
LUCENE-8526: Add javadocs in CJKBigramFilter explaining the behavior of the StandardTokenizer on Hangul syllables.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/c87778c5
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/c87778c5
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/c87778c5

Branch: refs/heads/jira/http2
Commit: c87778c50472ab81c6bfae7a5371f36a105544b3
Parents: 971a0e3
Author: Jim Ferenczi <ji...@apache.org>
Authored: Thu Oct 11 13:49:14 2018 +0100
Committer: Jim Ferenczi <ji...@apache.org>
Committed: Thu Oct 11 13:49:14 2018 +0100

----------------------------------------------------------------------
 .../java/org/apache/lucene/analysis/cjk/CJKBigramFilter.java | 8 ++++++++
 1 file changed, 8 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c87778c5/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKBigramFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKBigramFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKBigramFilter.java
index bf4f621..7d79b84 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKBigramFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKBigramFilter.java
@@ -43,6 +43,14 @@ import org.apache.lucene.util.ArrayUtil;
  * flag in {@link CJKBigramFilter#CJKBigramFilter(TokenStream, int, boolean)}.
  * This can be used for a combined unigram+bigram approach.
  * <p>
+ * Unlike ICUTokenizer, StandardTokenizer does not split at script boundaries.
+ * Korean Hangul characters are treated the same as many other scripts'
+ * letters, and as a result, StandardTokenizer can produce tokens that mix
+ * Hangul and non-Hangul characters, e.g. "한국abc".  Such mixed-script tokens
+ * are typed as <code>&lt;ALPHANUM&gt;</code> rather than
+ * <code>&lt;HANGUL&gt;</code>, and as a result, will not be converted to
+ * bigrams by CJKBigramFilter.
+ *
  * In all cases, all non-CJK input is passed thru unmodified.
  */
 public final class CJKBigramFilter extends TokenFilter {


[29/50] [abbrv] lucene-solr:jira/http2: SOLR-12565: Add SolrJ snippet to 'Using ZooKeeper to manage config' ref-guide page

Posted by da...@apache.org.
SOLR-12565: Add SolrJ snippet to 'Using ZooKeeper to manage config' ref-guide page


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/42ac07d1
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/42ac07d1
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/42ac07d1

Branch: refs/heads/jira/http2
Commit: 42ac07d11b9735df6dace64bf751ce528c0d01c8
Parents: d48f22c
Author: Jason Gerlowski <ge...@apache.org>
Authored: Thu Oct 11 14:21:49 2018 -0400
Committer: Jason Gerlowski <ge...@apache.org>
Committed: Thu Oct 11 14:30:43 2018 -0400

----------------------------------------------------------------------
 ...zookeeper-to-manage-configuration-files.adoc | 12 ++-
 .../ref_guide_examples/ZkConfigFilesTest.java   | 94 ++++++++++++++++++++
 2 files changed, 104 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/42ac07d1/solr/solr-ref-guide/src/using-zookeeper-to-manage-configuration-files.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/using-zookeeper-to-manage-configuration-files.adoc b/solr/solr-ref-guide/src/using-zookeeper-to-manage-configuration-files.adoc
index b1b3b5a..546ee99 100644
--- a/solr/solr-ref-guide/src/using-zookeeper-to-manage-configuration-files.adoc
+++ b/solr/solr-ref-guide/src/using-zookeeper-to-manage-configuration-files.adoc
@@ -1,4 +1,6 @@
 = Using ZooKeeper to Manage Configuration Files
+:solr-root-path: ../../
+:example-source-dir: {solr-root-path}solrj/src/test/org/apache/solr/client/ref_guide_examples/
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -51,7 +53,7 @@ IMPORTANT: It's a good idea to keep these files under version control.
 
 == Uploading Configuration Files using bin/solr or SolrJ
 
-In production situations, <<config-sets.adoc#config-sets,Config Sets>> can also be uploaded to ZooKeeper independent of collection creation using either Solr's <<solr-control-script-reference.adoc#solr-control-script-reference,Solr Control Script>> or the {solr-javadocs}/solr-solrj/org/apache/solr/client/solrj/impl/CloudSolrClient.html[CloudSolrClient.uploadConfig] java method.
+In production situations, <<config-sets.adoc#config-sets,Config Sets>> can also be uploaded to ZooKeeper independent of collection creation using either Solr's <<solr-control-script-reference.adoc#solr-control-script-reference,Solr Control Script>> or SolrJ.
 
 The below command can be used to upload a new configset using the bin/solr script.
 
@@ -60,6 +62,12 @@ The below command can be used to upload a new configset using the bin/solr scrip
 bin/solr zk upconfig -n <name for configset> -d <path to directory with configset>
 ----
 
+The following code shows how this can also be achieved using SolrJ:
+[source,java,indent=0]
+----
+include::{example-source-dir}ZkConfigFilesTest.java[tag=zk-configset-upload]
+----
+
 It is strongly recommended that the configurations be kept in a version control system, Git, SVN or similar.
 
 == Managing Your SolrCloud Configuration Files
@@ -85,4 +93,4 @@ If you for example would like to keep your `solr.xml` in ZooKeeper to avoid havi
 bin/solr zk cp file:local/file/path/to/solr.xml zk:/solr.xml -z localhost:2181
 ----
 
-NOTE: If you have defined `ZK_HOST` in `solr.in.sh`/`solr.in.cmd` (see <<setting-up-an-external-zookeeper-ensemble#updating-solr-s-include-files,instructions>>) you can omit `-z <zk host string>` from the above command.
\ No newline at end of file
+NOTE: If you have defined `ZK_HOST` in `solr.in.sh`/`solr.in.cmd` (see <<setting-up-an-external-zookeeper-ensemble#updating-solr-s-include-files,instructions>>) you can omit `-z <zk host string>` from the above command.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/42ac07d1/solr/solrj/src/test/org/apache/solr/client/ref_guide_examples/ZkConfigFilesTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/ref_guide_examples/ZkConfigFilesTest.java b/solr/solrj/src/test/org/apache/solr/client/ref_guide_examples/ZkConfigFilesTest.java
new file mode 100644
index 0000000..dcb1111
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/ref_guide_examples/ZkConfigFilesTest.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.client.ref_guide_examples;
+
+import java.io.File;
+import java.nio.file.Paths;
+import java.util.List;
+
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkConfigManager;
+import org.apache.solr.util.ExternalPaths;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Examples showing how to manipulate configsets in ZK.
+ *
+ * Snippets surrounded by "tag" and "end" comments are extracted and used in the Solr Reference Guide.
+ */
+public class ZkConfigFilesTest extends SolrCloudTestCase {
+
+  private static final int ZK_TIMEOUT_MILLIS = 10000;
+
+  @BeforeClass
+  public static void setUpCluster() throws Exception {
+    configureCluster(1)
+        .configure();
+  }
+
+  @Before
+  public void clearConfigsBefore() throws Exception {
+    clearConfigs();
+  }
+
+  @After
+  public void clearConfigsAfter() throws Exception {
+    clearConfigs();
+  }
+
+  private void clearConfigs() throws Exception {
+    ZkConfigManager manager = new ZkConfigManager(cluster.getZkClient());
+    List<String> configs = manager.listConfigs();
+    for (String config : configs) {
+      manager.deleteConfigDir(config);
+    }
+  }
+
+  @Test
+  public void testCanUploadConfigToZk() throws Exception {
+    final String zkConnectionString = cluster.getZkClient().getZkServerAddress();
+    final String localConfigSetDirectory = new File(ExternalPaths.TECHPRODUCTS_CONFIGSET).getAbsolutePath();
+
+    assertConfigsContainOnly();
+
+    // tag::zk-configset-upload[]
+    try (SolrZkClient zkClient = new SolrZkClient(zkConnectionString, ZK_TIMEOUT_MILLIS)) {
+      ZkConfigManager manager = new ZkConfigManager(zkClient);
+      manager.uploadConfigDir(Paths.get(localConfigSetDirectory), "nameForConfigset");
+    }
+    // end::zk-configset-upload[]
+
+    assertConfigsContainOnly("nameForConfigset");
+  }
+
+  private void assertConfigsContainOnly(String... expectedConfigs) throws Exception {
+    final int expectedSize = expectedConfigs.length;
+
+    ZkConfigManager manager = new ZkConfigManager(cluster.getZkClient());
+    List<String> actualConfigs = manager.listConfigs();
+
+    assertEquals(expectedSize, actualConfigs.size());
+    for (String expectedConfig : expectedConfigs) {
+      assertTrue("Expected ZK to contain " + expectedConfig + ", but it didn't.  Actual configs: ", actualConfigs.contains(expectedConfig));
+    }
+  }
+}


[24/50] [abbrv] lucene-solr:jira/http2: SOLR-12620: Remove the Admin UI Cloud -> Graph (Radial) view

Posted by da...@apache.org.
SOLR-12620: Remove the Admin UI Cloud -> Graph (Radial) view


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/80011d66
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/80011d66
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/80011d66

Branch: refs/heads/jira/http2
Commit: 80011d669ad8883379535521acbcf9274473f8b9
Parents: 4cfa876
Author: Jan Høydahl <ja...@apache.org>
Authored: Wed Oct 10 21:54:34 2018 +0200
Committer: Jan Høydahl <ja...@apache.org>
Committed: Wed Oct 10 21:54:34 2018 +0200

----------------------------------------------------------------------
 solr/CHANGES.txt                                |   2 +
 solr/solr-ref-guide/src/cloud-screens.adoc      |   8 +--
 .../src/images/cloud-screens/cloud-radial.png   | Bin 102878 -> 0 bytes
 solr/webapp/web/css/angular/menu.css            |   1 -
 solr/webapp/web/index.html                      |   1 -
 solr/webapp/web/js/angular/controllers/cloud.js |  70 +------------------
 solr/webapp/web/partials/cloud.html             |   2 +-
 7 files changed, 8 insertions(+), 76 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/80011d66/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 6fac318..2db18c1 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -91,6 +91,8 @@ Other Changes
 
 * SOLR-11812: Remove backward compatibility of old LIR implementation in 8.0 (Cao Manh Dat)
 
+* SOLR-12620: Remove the Admin UI Cloud -> Graph (Radial) view (janhoy) 
+
 ==================  7.6.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/80011d66/solr/solr-ref-guide/src/cloud-screens.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/cloud-screens.adoc b/solr/solr-ref-guide/src/cloud-screens.adoc
index 6e7c5a0..a61679e 100644
--- a/solr/solr-ref-guide/src/cloud-screens.adoc
+++ b/solr/solr-ref-guide/src/cloud-screens.adoc
@@ -26,7 +26,7 @@ This screen provides status information about each collection & node in your clu
 The "Cloud" menu option is only available on Solr instances running in <<getting-started-with-solrcloud.adoc#getting-started-with-solrcloud,SolrCloud mode>>. Single node or master/slave replication instances of Solr will not display this option.
 ====
 
-Click on the "Cloud" option in the left-hand navigation, and a small sub-menu appears with options called "Nodes", "Tree", "Graph" and "Graph (Radial)". The sub-view selected by default is "Graph".
+Click on the "Cloud" option in the left-hand navigation, and a small sub-menu appears with options called "Nodes", "Tree", "ZK Status" and "Graph". The sub-view selected by default is "Nodes".
 
 == Nodes View
 The "Nodes" view shows a list of the hosts and nodes in the cluster along with key information for each: "CPU", "Heap", "Disk usage", "Requests", "Collections" and "Replicas".
@@ -47,7 +47,7 @@ The "ZK Status" view gives an overview over the Zookeepers used by Solr. It list
 
 image::images/cloud-screens/cloud-zkstatus.png[image,width=512,height=509]
 
-== Graph Views
+== Graph View
 The "Graph" view shows a graph of each collection, the shards that make up those collections, and the addresses and type ("NRT", "TLOG" or "PULL") of each replica for each shard.
 
 This example shows a simple cluster. In addition to the 2 shard, 2 replica "gettingstarted" collection, there is an additional "tlog" collection consisting of mixed TLOG and PULL replica types.
@@ -57,7 +57,3 @@ image::images/cloud-screens/cloud-graph.png[image,width=512,height=250]
 Tooltips appear when hovering over each replica giving additional information.
 
 image::images/cloud-screens/cloud-hover.png[image,width=512,height=250]
-
-The "Graph (Radial)" option provides a different visual view of each node. Using the same example cluster, the radial graph view looks like:
-
-image::images/cloud-screens/cloud-radial.png[image,width=478,height=250]

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/80011d66/solr/solr-ref-guide/src/images/cloud-screens/cloud-radial.png
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/images/cloud-screens/cloud-radial.png b/solr/solr-ref-guide/src/images/cloud-screens/cloud-radial.png
deleted file mode 100644
index d3b245f..0000000
Binary files a/solr/solr-ref-guide/src/images/cloud-screens/cloud-radial.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/80011d66/solr/webapp/web/css/angular/menu.css
----------------------------------------------------------------------
diff --git a/solr/webapp/web/css/angular/menu.css b/solr/webapp/web/css/angular/menu.css
index f4e04c1..71a1668 100644
--- a/solr/webapp/web/css/angular/menu.css
+++ b/solr/webapp/web/css/angular/menu.css
@@ -263,7 +263,6 @@ limitations under the License.
 #menu #cloud.global .nodes a { background-image: url( ../../img/solr-ico.png ); }
 #menu #cloud.global .zkstatus a { background-image: url( ../../img/ico/node-master.png ); }
 #menu #cloud.global .graph a { background-image: url( ../../img/ico/molecule.png ); }
-#menu #cloud.global .rgraph a { background-image: url( ../../img/ico/asterisk.png ); }
 
 .sub-menu .ping.error a
 {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/80011d66/solr/webapp/web/index.html
----------------------------------------------------------------------
diff --git a/solr/webapp/web/index.html b/solr/webapp/web/index.html
index 0663805..00f70ef 100644
--- a/solr/webapp/web/index.html
+++ b/solr/webapp/web/index.html
@@ -154,7 +154,6 @@ limitations under the License.
                 <li class="tree" ng-class="{active:page=='cloud-tree'}"><a href="#/~cloud?view=tree">Tree</a></li>
                 <li class="zkstatus" ng-class="{active:page=='cloud-zkstatus'}"><a href="#/~cloud?view=zkstatus">ZK Status</a></li>
                 <li class="graph" ng-class="{active:page=='cloud-graph'}"><a href="#/~cloud?view=graph">Graph</a></li>
-                <li class="rgraph" ng-class="{active:page=='cloud-rgraph'}"><a href="#/~cloud?view=rgraph">Graph (Radial)</a></li>
               </ul>
             </li>
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/80011d66/solr/webapp/web/js/angular/controllers/cloud.js
----------------------------------------------------------------------
diff --git a/solr/webapp/web/js/angular/controllers/cloud.js b/solr/webapp/web/js/angular/controllers/cloud.js
index 59c4825..119f00c 100644
--- a/solr/webapp/web/js/angular/controllers/cloud.js
+++ b/solr/webapp/web/js/angular/controllers/cloud.js
@@ -32,9 +32,6 @@ solrAdminApp.controller('CloudController',
         if (view === "tree") {
             $scope.resetMenu("cloud-tree", Constants.IS_ROOT_PAGE);
             treeSubController($scope, Zookeeper);
-        } else if (view === "rgraph") {
-            $scope.resetMenu("cloud-rgraph", Constants.IS_ROOT_PAGE);
-            graphSubController($scope, Zookeeper, true);
         } else if (view === "graph") {
             $scope.resetMenu("cloud-graph", Constants.IS_ROOT_PAGE);
             graphSubController($scope, Zookeeper, false);
@@ -584,7 +581,7 @@ function secondsForHumans ( seconds ) {
     return returntext.trim() === '' ? '0m' : returntext.trim();
 }
 
-var graphSubController = function ($scope, Zookeeper, isRadial) {
+var graphSubController = function ($scope, Zookeeper) {
     $scope.showZkStatus = false;
     $scope.showTree = false;
     $scope.showGraph = true;
@@ -740,7 +737,7 @@ var graphSubController = function ($scope, Zookeeper, isRadial) {
                     $scope.helperData.state = $.unique($scope.helperData.state);
                     $scope.helperData.core_node = $.unique($scope.helperData.core_node);
 
-                    if (!isRadial && data.znode && data.znode.paging) {
+                    if (data.znode && data.znode.paging) {
                         $scope.showPaging = true;
 
                         var parr = data.znode.paging.split('|');
@@ -774,7 +771,6 @@ var graphSubController = function ($scope, Zookeeper, isRadial) {
                     }
                     $scope.graphData = graph_data;
                     $scope.leafCount = leaf_count;
-                    $scope.isRadial = isRadial;
                 });
         });
     };
@@ -790,7 +786,6 @@ solrAdminApp.directive('graph', function(Constants) {
             data: "=",
             leafCount: "=",
             helperData: "=",
-            isRadial: "="
         },
         link: function (scope, element, attrs) {
             var helper_path_class = function (p) {
@@ -873,11 +868,7 @@ solrAdminApp.directive('graph', function(Constants) {
 
             scope.$watch("data", function(newValue, oldValue) {
                 if (newValue) {
-                    if (scope.isRadial) {
-                        radialGraph(element, scope.data, scope.leafCount);
-                    } else {
-                        flatGraph(element, scope.data, scope.leafCount);
-                    }
+                    flatGraph(element, scope.data, scope.leafCount);
                 }
 
                 $('text').tooltip({
@@ -964,61 +955,6 @@ solrAdminApp.directive('graph', function(Constants) {
 
                 setNodeNavigationBehavior(node);
             };
-
-            var radialGraph = function(element, graphData, leafCount) {
-                var max_val = Math.min(element.width(), $('body').height())
-                var r = max_val / 2;
-
-                var cluster = d3.layout.cluster()
-                    .size([360, r - 160]);
-
-                var diagonal = d3.svg.diagonal.radial()
-                    .projection(function (d) {
-                        return [d.y, d.x / 180 * Math.PI];
-                    });
-
-                d3.select('#canvas', element).html('');
-                var vis = d3.select('#canvas').append('svg')
-                    .attr('width', r * 2)
-                    .attr('height', r * 2)
-                    .append('g')
-                    .attr('transform', 'translate(' + r + ',' + r + ')');
-
-                var nodes = cluster.nodes(graphData);
-
-                var link = vis.selectAll('path.link')
-                    .data(cluster.links(nodes))
-                    .enter().append('path')
-                    .attr('class', helper_path_class)
-                    .attr('d', diagonal);
-
-                var node = vis.selectAll('g.node')
-                    .data(nodes)
-                    .enter().append('g')
-                    .attr('class', helper_node_class)
-                    .attr('transform', function (d) {
-                        return 'rotate(' + (d.x - 90) + ')translate(' + d.y + ')';
-                    })
-
-                node.append('circle')
-                    .attr('r', 4.5);
-
-                node.append('text')
-                    .attr('dx', function (d) {
-                        return d.x < 180 ? 8 : -8;
-                    })
-                    .attr('dy', '.31em')
-                    .attr('text-anchor', function (d) {
-                        return d.x < 180 ? 'start' : 'end';
-                    })
-                    .attr('transform', function (d) {
-                        return d.x < 180 ? null : 'rotate(180)';
-                    })
-                    .attr("title", helper_tooltip_text)
-                    .text(helper_node_text);
-
-                setNodeNavigationBehavior(node, "rgraph");
-            }
         }
     };
 });

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/80011d66/solr/webapp/web/partials/cloud.html
----------------------------------------------------------------------
diff --git a/solr/webapp/web/partials/cloud.html b/solr/webapp/web/partials/cloud.html
index 825e195..59e9a40 100644
--- a/solr/webapp/web/partials/cloud.html
+++ b/solr/webapp/web/partials/cloud.html
@@ -239,7 +239,7 @@ limitations under the License.
       </table>
     </div>
     
-    <div graph data="graphData" leaf-count="leafCount" helper-data="helperData" is-radial="isRadial" id="graph-content" class="content clearfix" ng-show="showGraph">
+    <div graph data="graphData" leaf-count="leafCount" helper-data="helperData" id="graph-content" class="content clearfix" ng-show="showGraph">
 
       <div id="canvas"></div>
 


[41/50] [abbrv] lucene-solr:jira/http2: SOLR-12806: use autoscaling policies with strict=false to prioritize node allocation

Posted by da...@apache.org.
SOLR-12806: use autoscaling policies with strict=false to prioritize node allocation


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/9c7b8564
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/9c7b8564
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/9c7b8564

Branch: refs/heads/jira/http2
Commit: 9c7b8564d8362afa33989d5f7d615868b408a1e6
Parents: 306065f
Author: Noble Paul <no...@apache.org>
Authored: Tue Oct 16 16:56:51 2018 +1100
Committer: Noble Paul <no...@apache.org>
Committed: Tue Oct 16 16:56:51 2018 +1100

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  2 +
 .../cloud/autoscaling/AddReplicaSuggester.java  |  7 +---
 .../client/solrj/cloud/autoscaling/Clause.java  | 31 +++++++++-------
 .../cloud/autoscaling/FreeDiskVariable.java     | 22 +++++++++--
 .../cloud/autoscaling/MoveReplicaSuggester.java |  4 +-
 .../client/solrj/cloud/autoscaling/Policy.java  |  5 ++-
 .../solrj/cloud/autoscaling/Suggester.java      | 39 +++++++++++++++-----
 .../solrj/cloud/autoscaling/Variable.java       | 20 ++++++++++
 .../testCreateCollectionWithEmptyPolicy.json    | 20 ++++++++++
 .../solr/autoscaling/testFreeDiskDeviation.json | 35 ++++++++++++++++++
 .../solrj/cloud/autoscaling/TestPolicy.java     | 33 +++++++++++++++++
 .../solrj/cloud/autoscaling/TestPolicy2.java    | 19 ++++++++++
 12 files changed, 202 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9c7b8564/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 3a58a68..b79ef5a 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -203,6 +203,8 @@ Improvements
 
 * SOLR-12739: Make autoscaling policy based replica placement the default strategy for placing replicas. (shalin)
 
+* SOLR-12806: use autoscaling policies with strict=false to prioritize node allocation (noble)
+
 ==================  7.5.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9c7b8564/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/AddReplicaSuggester.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/AddReplicaSuggester.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/AddReplicaSuggester.java
index a498932..2bb214c 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/AddReplicaSuggester.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/AddReplicaSuggester.java
@@ -47,19 +47,16 @@ class AddReplicaSuggester extends Suggester {
       //iterate through  nodes and identify the least loaded
       List<Violation> leastSeriousViolation = null;
       Row bestNode = null;
-      double[] bestDeviation = null;
       for (int i = getMatrix().size() - 1; i >= 0; i--) {
         Row row = getMatrix().get(i);
         if (!isNodeSuitableForReplicaAddition(row)) continue;
         Row tmpRow = row.addReplica(shard.first(), shard.second(), type, strict);
-        double[] deviation = new double[1];
-        List<Violation> errs = testChangedMatrix(strict, tmpRow.session, deviation);
+        List<Violation> errs = testChangedMatrix(strict, tmpRow.session);
         if (!containsNewErrors(errs)) {
-          if ((errs.isEmpty() && isLessDeviant(bestDeviation, deviation)) ||//there are no violations but this is deviating less
+          if ((errs.isEmpty() && isLessDeviant()) ||//there are no violations but this is deviating less
               isLessSerious(errs, leastSeriousViolation)) {//there are errors , but this has less serious violation
             leastSeriousViolation = errs;
             bestNode = tmpRow;
-            bestDeviation = deviation;
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9c7b8564/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Clause.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Clause.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Clause.java
index 820f335..2138ee8 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Clause.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Clause.java
@@ -60,6 +60,7 @@ public class Clause implements MapWriter, Comparable<Clause> {
 
   final boolean hasComputedValue;
   final Map<String, Object> original;
+  final Clause derivedFrom;
   Condition collection, shard, replica, tag, globalTag;
   final Replica.Type type;
   boolean strict;
@@ -74,6 +75,7 @@ public class Clause implements MapWriter, Comparable<Clause> {
     this.globalTag = evaluateValue(clause.globalTag, computedValueEvaluator);
     this.hasComputedValue = clause.hasComputedValue;
     this.strict = clause.strict;
+    derivedFrom = clause.derivedFrom;
   }
 
   // internal use only
@@ -85,9 +87,11 @@ public class Clause implements MapWriter, Comparable<Clause> {
     this.type = null;
     this.hasComputedValue = false;
     this.strict = isStrict;
+    derivedFrom = null;
   }
 
   private Clause(Map<String, Object> m) {
+    derivedFrom = (Clause) m.remove(Clause.class.getName());
     this.original = Utils.getDeepCopy(m, 10);
     String type = (String) m.get("type");
     this.type = type == null || ANY.equals(type) ? null : Replica.Type.valueOf(type.toUpperCase(Locale.ROOT));
@@ -371,10 +375,10 @@ public class Clause implements MapWriter, Comparable<Clause> {
       Condition tag = this.tag;
       if (tag.computedType != null) tag = evaluateValue(tag, eval);
       Object val = row.getVal(tag.name);
-      if (val != null && tag.isPass(val)) {
+      if (val != null) {
         if (tag.op == LESS_THAN || tag.op == GREATER_THAN) {
           tags.add(this.tag);
-        } else {
+        } else if (tag.isPass(val)) {
           tags.add(val);
         }
       }
@@ -413,23 +417,17 @@ public class Clause implements MapWriter, Comparable<Clause> {
               t);
           ctx.resetAndAddViolation(t, replicaCountCopy, violation);
           sealedClause.addViolatingReplicas(sealedClause.tag, eval, ctx, tag.name, t, violation, session);
+          if (!this.strict && deviations != null) {
+            tag.varType.computeDeviation(session, deviations, replicaCount, sealedClause);
+          }
         } else {
-          computeDeviation(deviations, replicaCount, sealedClause);
+          if (replica.op == RANGE_EQUAL) tag.varType.computeDeviation(session, deviations, replicaCount, sealedClause);
         }
       }
     }
     return ctx.allViolations;
   }
 
-  private void computeDeviation(double[] deviations, ReplicaCount replicaCount, SealedClause sealedClause) {
-    if (deviations != null && sealedClause.replica.op == RANGE_EQUAL) {
-      Number actualCount = replicaCount.getVal(type);
-      Double realDelta = ((RangeVal) sealedClause.replica.val).realDelta(actualCount.doubleValue());
-      realDelta = this.isReplicaZero() ? -1 * realDelta : realDelta;
-      deviations[0] += Math.abs(realDelta);
-    }
-  }
-
   void addViolatingReplicas(Condition tag,
                             ComputedValueEvaluator eval,
                             Violation.Ctx ctx, String tagName, Object tagVal,
@@ -485,8 +483,11 @@ public class Clause implements MapWriter, Comparable<Clause> {
               eval.node);
           ctx.resetAndAddViolation(row.node, replicaCountCopy, violation);
           sealedClause.addViolatingReplicas(sealedClause.tag, eval, ctx, NODE, row.node, violation, session);
+          if (!this.strict && deviations != null) {
+            tag.varType.computeDeviation(session, deviations, replicaCount, sealedClause);
+          }
         } else {
-          computeDeviation(deviations, replicaCount, sealedClause);
+          if (replica.op == RANGE_EQUAL) tag.varType.computeDeviation(session, deviations, replicaCount, sealedClause);
         }
       }
     }
@@ -627,6 +628,10 @@ public class Clause implements MapWriter, Comparable<Clause> {
     throw new RuntimeException(name + ": " + val + "not a valid number");
   }
 
+  @Override
+  public int hashCode() {
+    return original.hashCode();
+  }
 
   public static Double parseDouble(String name, Object val) {
     if (val == null) return null;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9c7b8564/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/FreeDiskVariable.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/FreeDiskVariable.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/FreeDiskVariable.java
index 778a837..2c2ff18 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/FreeDiskVariable.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/FreeDiskVariable.java
@@ -26,12 +26,13 @@ import java.util.function.Consumer;
 import java.util.stream.Collectors;
 
 import org.apache.solr.client.solrj.cloud.autoscaling.Suggester.Hint;
-import org.apache.solr.common.cloud.rule.ImplicitSnitch;
 import org.apache.solr.common.util.Pair;
 
 import static org.apache.solr.client.solrj.cloud.autoscaling.Suggestion.suggestNegativeViolations;
 import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.CORE_IDX;
+import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.FREEDISK;
 import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.TOTALDISK;
+import static org.apache.solr.common.cloud.rule.ImplicitSnitch.DISK;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOVEREPLICA;
 
 public class FreeDiskVariable extends VariableBase {
@@ -42,7 +43,7 @@ public class FreeDiskVariable extends VariableBase {
 
   @Override
   public Object convertVal(Object val) {
-    Number value = (Number) super.validate(ImplicitSnitch.DISK, val, false);
+    Number value = (Number) super.validate(FREEDISK.tagName, val, false);
     if (value != null) {
       value = value.doubleValue() / 1024.0d / 1024.0d / 1024.0d;
     }
@@ -70,6 +71,19 @@ public class FreeDiskVariable extends VariableBase {
   }
 
   @Override
+  public void computeDeviation(Policy.Session session, double[] deviation, ReplicaCount replicaCount,
+                               SealedClause sealedClause) {
+    if (deviation == null) return;
+    for (Row node : session.matrix) {
+      Object val = node.getVal(sealedClause.tag.name);
+      Double delta = sealedClause.tag.delta(val);
+      if (delta != null) {
+        deviation[0] += Math.abs(delta);
+      }
+    }
+  }
+
+  @Override
   public void getSuggestions(Suggestion.Ctx ctx) {
     if (ctx.violation == null) return;
     if (ctx.violation.replicaCountDelta > 0) {
@@ -77,7 +91,7 @@ public class FreeDiskVariable extends VariableBase {
           row -> ctx.violation.getViolatingReplicas()
               .stream()
               .anyMatch(p -> row.node.equals(p.replicaInfo.getNode())))
-          .sorted(Comparator.comparing(r -> ((Double) r.getVal(ImplicitSnitch.DISK, 0d))))
+          .sorted(Comparator.comparing(r -> ((Double) r.getVal(DISK, 0d))))
           .collect(Collectors.toList());
 
 
@@ -91,7 +105,7 @@ public class FreeDiskVariable extends VariableBase {
           if (s1 != null && s2 != null) return s1.compareTo(s2);
           return 0;
         });
-        double currentDelta = ctx.violation.getClause().tag.delta(node.getVal(ImplicitSnitch.DISK));
+        double currentDelta = ctx.violation.getClause().tag.delta(node.getVal(DISK));
         for (ReplicaInfo replica : replicas) {
           if (currentDelta < 1) break;
           if (replica.getVariables().get(CORE_IDX.tagName) == null) continue;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9c7b8564/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/MoveReplicaSuggester.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/MoveReplicaSuggester.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/MoveReplicaSuggester.java
index 9e778c0..ba59eec 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/MoveReplicaSuggester.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/MoveReplicaSuggester.java
@@ -49,6 +49,7 @@ public class MoveReplicaSuggester extends Suggester {
     List<Pair<ReplicaInfo, Row>> validReplicas = getValidReplicas(true, true, -1);
     validReplicas.sort(leaderLast);
     for (int i1 = 0; i1 < validReplicas.size(); i1++) {
+      lastBestDeviation = null;
       Pair<ReplicaInfo, Row> fromReplica = validReplicas.get(i1);
       Row fromRow = fromReplica.second();
       ReplicaInfo ri = fromReplica.first();
@@ -62,8 +63,7 @@ public class MoveReplicaSuggester extends Suggester {
         if (!isNodeSuitableForReplicaAddition(targetRow)) continue;
         targetRow = targetRow.addReplica(ri.getCollection(), ri.getShard(), ri.getType(), strict); // add replica to target first
         Row srcRowModified = targetRow.session.getNode(fromRow.node).removeReplica(ri.getCollection(), ri.getShard(), ri.getType());//then remove replica from source node
-        double[] deviation = new double[1];
-        List<Violation> errs = testChangedMatrix(strict, srcRowModified.session, deviation);
+        List<Violation> errs = testChangedMatrix(strict, srcRowModified.session);
         srcRowModified.session.applyRules(); // now resort the nodes with the new values
         Policy.Session tmpSession = srcRowModified.session;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9c7b8564/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
index db1015f..1642ca6 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
@@ -338,7 +338,10 @@ public class Policy implements MapWriter {
         .filter(Clause::isPerCollectiontag)
         .map(clause -> {
           Map<String, Object> copy = new LinkedHashMap<>(clause.original);
-          if (!copy.containsKey("collection")) copy.put("collection", coll);
+          if (!copy.containsKey("collection")) {
+            copy.put("collection", coll);
+            copy.put(Clause.class.getName(), clause);
+          }
           return Clause.create(copy);
         })
         .filter(it -> (it.getCollection().isPass(coll)))

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9c7b8564/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Suggester.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Suggester.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Suggester.java
index 0071978..bba5906 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Suggester.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Suggester.java
@@ -25,6 +25,7 @@ import java.util.Collections;
 import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Objects;
@@ -66,14 +67,27 @@ public abstract class Suggester implements MapWriter {
   boolean force;
   protected List<Violation> originalViolations = new ArrayList<>();
   private boolean isInitialized = false;
+  LinkedHashMap<Clause, double[]> deviations, lastBestDeviation;
+
 
   void _init(Policy.Session session) {
     this.session = session.copy();
   }
 
-  boolean isLessDeviant(double[] previousBest, double[] newDeviation) {
-    if (previousBest == null) return true;
-    return newDeviation[0] < previousBest[0];
+  boolean isLessDeviant() {
+    if (lastBestDeviation == null && deviations == null) return false;
+    if (deviations == null) return true;
+    if (lastBestDeviation == null) return false;
+    if (lastBestDeviation.size() < deviations.size()) return true;
+    for (Map.Entry<Clause, double[]> currentDeviation : deviations.entrySet()) {
+      double[] lastDeviation = lastBestDeviation.get(currentDeviation.getKey());
+      if (lastDeviation == null) return false;
+      int result = Preference.compareWithTolerance(currentDeviation.getValue()[0],
+          lastDeviation[0], 1);
+      if (result < 0) return true;
+      if (result > 0) return false;
+    }
+    return false;
   }
   public Suggester hint(Hint hint, Object value) {
     hint.validator.accept(value);
@@ -282,17 +296,22 @@ public abstract class Suggester implements MapWriter {
     }
   }
 
-  List<Violation> testChangedMatrix(boolean strict, Policy.Session session, double[] deviation) {
+  List<Violation> testChangedMatrix(boolean executeInStrictMode, Policy.Session session) {
+    if (this.deviations != null) this.lastBestDeviation = this.deviations;
+    this.deviations = null;
     Policy.setApproxValuesAndSortNodes(session.getPolicy().clusterPreferences, session.matrix);
     List<Violation> errors = new ArrayList<>();
     for (Clause clause : session.expandedClauses) {
-      if (strict || clause.strict) {
-        List<Violation> errs = clause.test(session, deviation);
-        if (!errs.isEmpty()) {
-          errors.addAll(errs);
-        }
-      }
+      Clause originalClause = clause.derivedFrom == null ? clause : clause.derivedFrom;
+//      if (!executeInStrictMode && !clause.strict) {
+      if (this.deviations == null) this.deviations = new LinkedHashMap<>();
+      this.deviations.put(originalClause, new double[1]);
+//      }
+      List<Violation> errs = clause.test(session, this.deviations == null ? null : this.deviations.get(originalClause));
+      if (!errs.isEmpty() &&
+          (executeInStrictMode || clause.strict)) errors.addAll(errs);
     }
+    if (!errors.isEmpty()) deviations = null;
     return errors;
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9c7b8564/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Variable.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Variable.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Variable.java
index 870483a..0dfe282 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Variable.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Variable.java
@@ -59,10 +59,24 @@ public interface Variable {
 
   void getSuggestions(Suggestion.Ctx ctx);
 
+  /**When a non constant value is used in a variable, the actual value needs to be computed at the runtime
+   *
+   */
   default Object computeValue(Policy.Session session, Condition condition, String collection, String shard, String node) {
     return condition.val;
   }
 
+  default void computeDeviation(Policy.Session session, double[] deviations, ReplicaCount replicaCount, SealedClause sealedClause) {
+    if (deviations != null) {
+      Number actualCount = replicaCount.getVal(sealedClause.type);
+      if(sealedClause.replica.val instanceof RangeVal) {
+        Double realDelta = ((RangeVal) sealedClause.replica.val).realDelta(actualCount.doubleValue());
+        realDelta = sealedClause.isReplicaZero() ? -1 * realDelta : realDelta;
+        deviations[0] += Math.abs(realDelta);
+      }
+    }
+  }
+
   int compareViolation(Violation v1, Violation v2);
 
   default void projectRemoveReplica(Cell cell, ReplicaInfo ri, Consumer<Row.OperationInfo> opCollector) {
@@ -328,6 +342,12 @@ public interface Variable {
     }
 
     @Override
+    public void computeDeviation(Policy.Session session, double[] deviations, ReplicaCount replicaCount, SealedClause sealedClause) {
+      impl.computeDeviation(session, deviations, replicaCount, sealedClause);
+    }
+
+
+    @Override
     public boolean match(Object inputVal, Operand op, Object val, String name, Row row) {
       return impl.match(inputVal, op, val, name, row);
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9c7b8564/solr/solrj/src/test-files/solrj/solr/autoscaling/testCreateCollectionWithEmptyPolicy.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testCreateCollectionWithEmptyPolicy.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testCreateCollectionWithEmptyPolicy.json
new file mode 100644
index 0000000..1d9a2b4
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testCreateCollectionWithEmptyPolicy.json
@@ -0,0 +1,20 @@
+{
+  "diagnostics":{
+    "sortedNodes":[{
+      "node":"127.0.0.1:49469_solr",
+      "isLive":true,
+      "cores":0.0,
+      "freedisk":672.6827087402344,
+      "totaldisk":1037.938980102539,
+      "replicas":{}}
+    ,{
+        "node":"127.0.0.1:49470_solr",
+        "isLive":true,
+        "cores":0.0,
+        "freedisk":672.6827087402344,
+        "totaldisk":1037.938980102539,
+        "replicas":{}}],
+    "liveNodes":["127.0.0.1:49469_solr",
+      "127.0.0.1:49470_solr"],
+    "violations":[],
+    "config":{}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9c7b8564/solr/solrj/src/test-files/solrj/solr/autoscaling/testFreeDiskDeviation.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testFreeDiskDeviation.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testFreeDiskDeviation.json
new file mode 100644
index 0000000..10e3670
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testFreeDiskDeviation.json
@@ -0,0 +1,35 @@
+{
+  "liveNodes": [
+    "node1",
+    "node2",
+    "node3"
+  ],
+  "replicaInfo": {
+    "node1": {
+      "mycoll1": {
+        "shard3": [{"r3": {"type": "NRT", "INDEX.sizeInGB": 700}}],
+        "shard4": [{"r4": {"type": "NRT", "INDEX.sizeInGB": 400}}]
+      }
+    },
+    "node2": {
+      "mycoll1": {
+        "shard1": [{"r1": {"type": "NRT", "INDEX.sizeInGB": 450}}],
+        "shard2": [{"r2": {"type": "NRT", "INDEX.sizeInGB": 750}}]
+      }
+    },
+    "node3": {
+      "mycoll2": {
+        "shard1": [{"r1": {"type": "NRT", "INDEX.sizeInGB": 250}}]
+      }
+    }
+  },
+  "nodeValues": {
+    "node1": {"node": "node1", "cores": 2, "freedisk": 900},
+    "node2": {"node": "node2", "cores": 2, "freedisk": 800},
+    "node3": {"node": "node3", "cores": 1, "freedisk": 1200}
+  },
+  "config": {
+    "cluster-policy": [{"replica":"<2", "shard":"#EACH", "node":"#ANY"},
+      {"replica": "#ALL", "freedisk": ">700", "strict": false}]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9c7b8564/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
index fecc749..fc0806b 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
@@ -41,6 +41,7 @@ import com.google.common.collect.ImmutableSet;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrResponse;
+import org.apache.solr.client.solrj.V2RequestSupport;
 import org.apache.solr.client.solrj.cloud.DistribStateManager;
 import org.apache.solr.client.solrj.cloud.DistributedQueueFactory;
 import org.apache.solr.client.solrj.cloud.NodeStateProvider;
@@ -2447,6 +2448,38 @@ public class TestPolicy extends SolrTestCaseJ4 {
     assertEquals(4, count.get());
 
   }
+  public void testFreeDiskDeviation() throws IOException {
+    Map map = (Map) TestPolicy2.loadFromResource("testFreeDiskDeviation.json");
+    AutoScalingConfig cfg = new AutoScalingConfig((Map<String, Object>) map.get("config"));
+    SolrCloudManager scm = cloudManagerWithData(map);
+    Suggester suggester = cfg.getPolicy()
+        .createSession(scm)
+        .getSuggester(ADDREPLICA);
+
+    MapWriter v2Request = (MapWriter) ((V2RequestSupport) suggester
+        .hint(Hint.COLL_SHARD, new Pair<>("mycoll2", "shard1"))
+        .getSuggestion()
+        .setUseV2(true))
+        .getV2Request();
+    assertEquals("/c/mycoll2/shards", v2Request._get("path",null));
+    assertEquals("add-replica", v2Request._get("command[0]/key",null));
+    assertEquals("node1", v2Request._get("command/add-replica/node",null));
+
+
+    suggester = suggester.getSession()
+        .getSuggester(ADDREPLICA);
+    v2Request = (MapWriter) ((V2RequestSupport) suggester
+        .hint(Hint.COLL_SHARD, new Pair<>("mycoll2", "shard1"))
+        .getSuggestion()
+        .setUseV2(true))
+        .getV2Request();
+
+    assertEquals("/c/mycoll2/shards", v2Request._get("path",null));
+    assertEquals("add-replica", v2Request._get("command[0]/key",null));
+    assertEquals("node2", v2Request._get("command/add-replica/node",null));
+
+
+  }
 
 
   public void testFreeDiskSuggestions() {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9c7b8564/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
index 391b210..afc5540 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
@@ -32,6 +32,7 @@ import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
 
 import com.google.common.collect.ImmutableSet;
 import org.apache.solr.SolrTestCaseJ4;
@@ -40,10 +41,12 @@ import org.apache.solr.client.solrj.cloud.SolrCloudManager;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
 import org.apache.solr.client.solrj.impl.SolrClientNodeStateProvider;
 import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.ReplicaPosition;
 import org.apache.solr.common.util.Utils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static java.util.Collections.EMPTY_MAP;
 import static java.util.Collections.emptyMap;
 import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.CORES;
 
@@ -448,6 +451,22 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
 
   }
 
+  public void testCreateCollectionWithEmptyPolicy() throws IOException {
+    Map m = (Map) loadFromResource("testCreateCollectionWithEmptyPolicy.json");
+    SolrCloudManager cloudManagerFromDiagnostics = createCloudManagerFromDiagnostics(m);
+    AutoScalingConfig autoScalingConfig = new AutoScalingConfig(new HashMap());
+    ///Users/noble/work/4solr/solr/core/src/test/org/apache/solr/handler/V2ApiIntegrationTest.java
+    //POSITIONS : [shard1:1[NRT] @127.0.0.1:49469_solr, shard1:2[NRT] @127.0.0.1:49469_solr]
+    List<ReplicaPosition> positions = PolicyHelper.getReplicaLocations("coll_new", autoScalingConfig, cloudManagerFromDiagnostics,
+        EMPTY_MAP, Collections.singletonList("shard1"), 2, 0, 0, null);
+
+    List<String> nodes = positions.stream().map(count -> count.node).collect(Collectors.toList());
+    assertTrue(nodes.contains("127.0.0.1:49469_solr"));
+    assertTrue(nodes.contains("127.0.0.1:49470_solr"));
+
+
+  }
+
   public static Object loadFromResource(String file) throws IOException {
     try (InputStream is = TestPolicy2.class.getResourceAsStream("/solrj/solr/autoscaling/" + file)) {
       return Utils.fromJSON(is);


[47/50] [abbrv] lucene-solr:jira/http2: SOLR-12876: remove @BadApple from ShardParamsTest.testGetShardsTolerantAsBool

Posted by da...@apache.org.
SOLR-12876: remove @BadApple from ShardParamsTest.testGetShardsTolerantAsBool


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/95af8d6a
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/95af8d6a
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/95af8d6a

Branch: refs/heads/jira/http2
Commit: 95af8d6ac20a46f940dcb93c6183f32aa5181a2d
Parents: 7fa19d2
Author: Christine Poerschke <cp...@apache.org>
Authored: Tue Oct 16 10:52:51 2018 -0400
Committer: Christine Poerschke <cp...@apache.org>
Committed: Tue Oct 16 10:52:51 2018 -0400

----------------------------------------------------------------------
 .../src/test/org/apache/solr/common/params/ShardParamsTest.java     | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/95af8d6a/solr/solrj/src/test/org/apache/solr/common/params/ShardParamsTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/common/params/ShardParamsTest.java b/solr/solrj/src/test/org/apache/solr/common/params/ShardParamsTest.java
index b17f217..b300a0f 100644
--- a/solr/solrj/src/test/org/apache/solr/common/params/ShardParamsTest.java
+++ b/solr/solrj/src/test/org/apache/solr/common/params/ShardParamsTest.java
@@ -52,7 +52,6 @@ public class ShardParamsTest extends LuceneTestCase
   public void testDistribSinglePass() { assertEquals(ShardParams.DISTRIB_SINGLE_PASS, "distrib.singlePass"); }
 
   @Test
-  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testGetShardsTolerantAsBool() {
     ModifiableSolrParams params = new ModifiableSolrParams();
     // shards.tolerant param is not set; default should be false


[32/50] [abbrv] lucene-solr:jira/http2: LUCENE-8523: Fix javadocs in JapaneseNumberFilterFactory

Posted by da...@apache.org.
LUCENE-8523: Fix javadocs in JapaneseNumberFilterFactory


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e14bacfa
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e14bacfa
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e14bacfa

Branch: refs/heads/jira/http2
Commit: e14bacfac48501f827997ba0ac8cb20702834fef
Parents: 6d1b2e2
Author: Alan Woodward <ro...@apache.org>
Authored: Sun Oct 14 15:18:08 2018 +0100
Committer: Alan Woodward <ro...@apache.org>
Committed: Sun Oct 14 15:18:14 2018 +0100

----------------------------------------------------------------------
 lucene/CHANGES.txt                                              | 5 +++++
 .../apache/lucene/analysis/ja/JapaneseNumberFilterFactory.java  | 2 +-
 2 files changed, 6 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e14bacfa/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 80d341e..941e603 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -200,6 +200,11 @@ New Features
   may be used to determine how to split the inner nodes, and dimensions N+1 to D
   are ignored and stored as data dimensions at the leaves. (Nick Knize)
 
+Other:
+
+* LUCENE-8523: Correct typo in JapaneseNumberFilterFactory javadocs (Ankush Jhalani
+  via Alan Woodward)
+
 ======================= Lucene 7.5.1 =======================
 
 Bug Fixes:

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e14bacfa/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseNumberFilterFactory.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseNumberFilterFactory.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseNumberFilterFactory.java
index 32c5df7..471f6cb 100644
--- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseNumberFilterFactory.java
+++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseNumberFilterFactory.java
@@ -29,7 +29,7 @@ import org.apache.lucene.analysis.util.TokenFilterFactory;
  * &lt;fieldType name="text_ja" class="solr.TextField"&gt;
  *   &lt;analyzer&gt;
  *     &lt;tokenizer class="solr.JapaneseTokenizerFactory" discardPunctuation="false"/&gt;
- *     &lt;filter class="solr.JapaneseNumberFilter"/&gt;
+ *     &lt;filter class="solr.JapaneseNumberFilterFactory"/&gt;
  *   &lt;/analyzer&gt;
  * &lt;/fieldType&gt;
  * </pre>


[38/50] [abbrv] lucene-solr:jira/http2: SOLR-12862: Add log10 Stream Evaluator and allow the pow Stream Evaluator to accept a vector of exponents

Posted by da...@apache.org.
SOLR-12862: Add log10 Stream Evaluator and allow the pow Stream Evaluator to accept a vector of exponents


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/6c0fbe5a
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/6c0fbe5a
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/6c0fbe5a

Branch: refs/heads/jira/http2
Commit: 6c0fbe5a9d544060c42c4a1ec241a71c47d14bb8
Parents: 1ccd555
Author: Joel Bernstein <jb...@apache.org>
Authored: Mon Oct 15 15:09:40 2018 -0400
Committer: Joel Bernstein <jb...@apache.org>
Committed: Mon Oct 15 15:47:03 2018 -0400

----------------------------------------------------------------------
 .../org/apache/solr/client/solrj/io/Lang.java   |  1 +
 .../client/solrj/io/eval/Log10Evaluator.java    | 50 +++++++++++++++++
 .../client/solrj/io/eval/PowerEvaluator.java    | 55 ++++++++++++++----
 .../solrj/io/stream/MathExpressionTest.java     | 59 ++++++++++++++++++++
 4 files changed, 153 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c0fbe5a/solr/solrj/src/java/org/apache/solr/client/solrj/io/Lang.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/Lang.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/Lang.java
index 75131ca..7cc842f 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/Lang.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/Lang.java
@@ -303,6 +303,7 @@ public class Lang {
         .withFunctionName("mult", MultiplyEvaluator.class)
         .withFunctionName("sub", SubtractEvaluator.class)
         .withFunctionName("log", NaturalLogEvaluator.class)
+        .withFunctionName("log10", Log10Evaluator.class)
         .withFunctionName("pow", PowerEvaluator.class)
         .withFunctionName("mod", ModuloEvaluator.class)
         .withFunctionName("ceil", CeilingEvaluator.class)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c0fbe5a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/Log10Evaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/Log10Evaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/Log10Evaluator.java
new file mode 100644
index 0000000..d8bc95d
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/Log10Evaluator.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Locale;
+import java.util.stream.Collectors;
+
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class Log10Evaluator extends RecursiveNumericEvaluator implements OneValueWorker {
+  protected static final long serialVersionUID = 1L;
+
+  public Log10Evaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+
+    if(1 != containedEvaluators.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting exactly 1 value but found %d",expression,containedEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Object doWork(Object value){
+    if(null == value){
+      return null;
+    }
+    else if(value instanceof List){
+      return ((List<?>)value).stream().map(innerValue -> doWork(innerValue)).collect(Collectors.toList());
+    }
+    else{
+      return Math.log10(((Number)value).doubleValue());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c0fbe5a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/PowerEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/PowerEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/PowerEvaluator.java
index 38d71c0..4de0906 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/PowerEvaluator.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/PowerEvaluator.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.solr.client.solrj.io.eval;
 
 import java.io.IOException;
@@ -27,7 +28,7 @@ import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
 public class PowerEvaluator extends RecursiveNumericEvaluator implements TwoValueWorker {
   protected static final long serialVersionUID = 1L;
   
-  public PowerEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+  public PowerEvaluator(StreamExpression expression, StreamFactory factory) throws IOException {
     super(expression, factory);
 
     if(2 != containedEvaluators.size()){
@@ -36,26 +37,56 @@ public class PowerEvaluator extends RecursiveNumericEvaluator implements TwoValu
   }
 
   @Override
-  public Object doWork(Object first, Object second) throws IOException{
+  public Object doWork(Object first, Object second) throws IOException {
     
-    if(null == first || null == second){
+    if(null == first || null == second) {
       return null;
     }
 
     if(first instanceof Number) {
       Number value = (Number) first;
-      Number exponent = (Number) second;
-      return Math.pow(value.doubleValue(), exponent.doubleValue());
-    } else {
+      if(second instanceof Number) {
+        Number exponent = (Number) second;
+        return Math.pow(value.doubleValue(), exponent.doubleValue());
+      } else if(second instanceof List)  {
+        List<Number> exponents = (List<Number>) second;
+        List<Number> pows = new ArrayList();
+        for(Number exponent : exponents) {
+          pows.add(Math.pow(value.doubleValue(), exponent.doubleValue()));
+        }
+        return pows;
+      } else {
+        throw new IOException("The second parameter to the pow function must either be a scalar or list of scalars");
+      }
+    } else if(first instanceof List) {
       List<Number> values = (List<Number>) first;
-      Number exponent = (Number) second;
+      if(second instanceof Number) {
+        Number exponent = (Number) second;
 
-      List<Number> out = new ArrayList(values.size());
-      for(Number value : values) {
-        out.add(Math.pow(value.doubleValue(), exponent.doubleValue()));
-      }
+        List<Number> out = new ArrayList(values.size());
+        for (Number value : values) {
+          out.add(Math.pow(value.doubleValue(), exponent.doubleValue()));
+        }
+
+        return out;
+      } else if(second instanceof List) {
 
-      return out;
+        List<Number> out = new ArrayList(values.size());
+        List<Number> exponents = (List<Number>)second;
+        if(values.size() != exponents.size()) {
+          throw new IOException("The pow function requires vectors of equal size if two vectors are provided.");
+        }
+
+        for(int i=0; i<exponents.size(); i++) {
+          out.add(Math.pow(values.get(i).doubleValue(), exponents.get(i).doubleValue()));
+        }
+
+        return out;
+      } else {
+        throw new IOException("The second parameter to the pow function must either be a scalar or list of scalars");
+      }
+    } else {
+      throw new IOException("The first parameter to the pow function must either be a scalar or list of scalars");
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c0fbe5a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
index 45633e3..78fc2ce 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
@@ -1761,6 +1761,65 @@ public class MathExpressionTest extends SolrCloudTestCase {
   }
 
   @Test
+  public void testLog10() throws Exception {
+    String cexpr = "let(echo=true, a=array(10, 20, 30), b=log10(a), c=log10(30.5))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertEquals(tuples.size(), 1);
+    Tuple tuple = tuples.get(0);
+    List<Number> logs = (List<Number>)tuple.get("b");
+    assertEquals(logs.size(), 3);
+    assertEquals(logs.get(0).doubleValue(), 1, 0.0);
+    assertEquals(logs.get(1).doubleValue(), 1.3010299956639813, 0.0);
+    assertEquals(logs.get(2).doubleValue(), 1.4771212547196624, 0.0);
+
+    Number log = (Number)tuple.get("c");
+    assertEquals(log.doubleValue(), 1.4842998393467859, 0.0);
+  }
+
+  @Test
+  public void testPow() throws Exception {
+    String cexpr = "let(echo=true, a=array(10, 20, 30), b=pow(a, 2), c=pow(2, a), d=pow(10, 3), e=pow(a, array(1, 2, 3)))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertEquals(tuples.size(), 1);
+    Tuple tuple = tuples.get(0);
+    List<Number> pows = (List<Number>)tuple.get("b");
+    assertEquals(pows.size(), 3);
+    assertEquals(pows.get(0).doubleValue(), 100, 0.0);
+    assertEquals(pows.get(1).doubleValue(), 400, 0.0);
+    assertEquals(pows.get(2).doubleValue(), 900, 0.0);
+
+    pows = (List<Number>)tuple.get("c");
+    assertEquals(pows.size(), 3);
+    assertEquals(pows.get(0).doubleValue(), 1024, 0.0);
+    assertEquals(pows.get(1).doubleValue(), 1048576, 0.0);
+    assertEquals(pows.get(2).doubleValue(), 1073741824, 0.0);
+
+    double p = tuple.getDouble("d");
+    assertEquals(p, 1000, 0.0);
+
+    pows = (List<Number>)tuple.get("e");
+    assertEquals(pows.size(), 3);
+    assertEquals(pows.get(0).doubleValue(), 10, 0.0);
+    assertEquals(pows.get(1).doubleValue(), 400, 0.0);
+    assertEquals(pows.get(2).doubleValue(), 27000, 0.0);
+
+  }
+
+  @Test
   public void testTermVectors() throws Exception {
     // Test termVectors with only documents and default termVector settings
     String cexpr = "let(echo=true," +


[13/50] [abbrv] lucene-solr:jira/http2: SOLR-12729: SplitShardCmd should lock the parent shard to prevent parallel splitting requests.

Posted by da...@apache.org.
SOLR-12729: SplitShardCmd should lock the parent shard to prevent parallel splitting requests.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/23e22e64
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/23e22e64
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/23e22e64

Branch: refs/heads/jira/http2
Commit: 23e22e6460a28d821fd4587410dbde80dd8bce62
Parents: a52d47a
Author: Andrzej Bialecki <ab...@apache.org>
Authored: Tue Oct 9 21:29:38 2018 +0200
Committer: Andrzej Bialecki <ab...@apache.org>
Committed: Tue Oct 9 21:30:10 2018 +0200

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  2 +
 .../cloud/api/collections/SplitShardCmd.java    | 37 +++++++++++-
 .../autoscaling/InactiveShardPlanAction.java    | 48 ++++++++++++++-
 .../solr/cloud/overseer/ReplicaMutator.java     |  9 +++
 .../solr/cloud/overseer/SliceMutator.java       | 14 ++---
 .../org/apache/solr/util/TestInjection.java     | 17 ++++++
 .../cloud/api/collections/ShardSplitTest.java   | 61 ++++++++++++++++++++
 .../ScheduledMaintenanceTriggerTest.java        | 47 +++++++++++----
 .../sim/SimClusterStateProvider.java            | 37 +++++++-----
 .../autoscaling/sim/TestSimExtremeIndexing.java |  4 ++
 10 files changed, 243 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/23e22e64/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 9ca6cf2..6fac318 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -182,6 +182,8 @@ Bug Fixes
 * SOLR-12836: ZkController creates a cloud solr client with no connection or read timeouts. Now the http client
   created by the update shard handler is used instead. (shalin)
 
+* SOLR-12729: SplitShardCmd should lock the parent shard to prevent parallel splitting requests. (ab)
+
 Improvements
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/23e22e64/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
index bac45ab..2e68f91 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
@@ -61,6 +61,7 @@ import org.apache.solr.handler.component.ShardHandler;
 import org.apache.solr.update.SolrIndexSplitter;
 import org.apache.solr.util.RTimerTree;
 import org.apache.solr.util.TestInjection;
+import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.data.Stat;
 import org.slf4j.Logger;
@@ -116,7 +117,8 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
 
     Slice parentSlice = getParentSlice(clusterState, collectionName, slice, splitKey);
     if (parentSlice.getState() != Slice.State.ACTIVE) {
-      throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Parent slice is not active: " + parentSlice.getState());
+      throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Parent slice is not active: " +
+          collectionName + "/ " + parentSlice.getName() + ", state=" + parentSlice.getState());
     }
 
     // find the leader for the shard
@@ -172,6 +174,14 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
             parentShardLeader.getType());
       }
 
+      // check for the lock
+      if (!lockForSplit(ocmh.cloudManager, collectionName, parentSlice.getName())) {
+        // mark as success to avoid clearing the lock in the "finally" block
+        success = true;
+        throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Can't lock parent slice for splitting (another split operation running?): " +
+            collectionName + "/" + parentSlice.getName());
+      }
+
       List<Map<String, Object>> replicas = new ArrayList<>((repFactor - 1) * 2);
 
       t = timings.sub("fillRanges");
@@ -502,6 +512,8 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
         results.add(CommonParams.TIMING, timings.asNamedList());
       }
       success = true;
+      // don't unlock the shard yet - only do this if the final switch-over in
+      // ReplicaMutator succeeds (or fails)
       return true;
     } catch (SolrException e) {
       throw e;
@@ -512,6 +524,7 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
       if (sessionWrapper != null) sessionWrapper.release();
       if (!success) {
         cleanupAfterFailure(zkStateReader, collectionName, parentSlice.getName(), subSlices, offlineSlices);
+        unlockForSplit(ocmh.cloudManager, collectionName, parentSlice.getName());
       }
     }
   }
@@ -740,4 +753,26 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
     }
     return rangesStr;
   }
+
+  public static boolean lockForSplit(SolrCloudManager cloudManager, String collection, String shard) throws Exception {
+    String path = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection + "/" + shard + "-splitting";
+    if (cloudManager.getDistribStateManager().hasData(path)) {
+      return false;
+    }
+    Map<String, Object> map = new HashMap<>();
+    map.put(ZkStateReader.STATE_TIMESTAMP_PROP, String.valueOf(cloudManager.getTimeSource().getEpochTimeNs()));
+    byte[] data = Utils.toJSON(map);
+    try {
+      cloudManager.getDistribStateManager().makePath(path, data, CreateMode.EPHEMERAL, true);
+    } catch (Exception e) {
+      throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Can't lock parent slice for splitting (another split operation running?): " +
+          collection + "/" + shard, e);
+    }
+    return true;
+  }
+
+  public static void unlockForSplit(SolrCloudManager cloudManager, String collection, String shard) throws Exception {
+    String path = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection + "/" + shard + "-splitting";
+    cloudManager.getDistribStateManager().removeRecursively(path, true, true);
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/23e22e64/solr/core/src/java/org/apache/solr/cloud/autoscaling/InactiveShardPlanAction.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/InactiveShardPlanAction.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/InactiveShardPlanAction.java
index cb561f5..6fca29a 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/InactiveShardPlanAction.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/InactiveShardPlanAction.java
@@ -21,7 +21,9 @@ import java.util.ArrayList;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.NoSuchElementException;
 import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
 
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.cloud.SolrCloudManager;
@@ -29,6 +31,7 @@ import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.util.Utils;
 import org.apache.solr.core.SolrResourceLoader;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -74,6 +77,7 @@ public class InactiveShardPlanAction extends TriggerActionBase {
     ClusterState state = cloudManager.getClusterStateProvider().getClusterState();
     Map<String, List<String>> cleanup = new LinkedHashMap<>();
     Map<String, List<String>> inactive = new LinkedHashMap<>();
+    Map<String, Map<String, Object>> staleLocks = new LinkedHashMap<>();
     state.forEachCollection(coll ->
       coll.getSlices().forEach(s -> {
         if (Slice.State.INACTIVE.equals(s.getState())) {
@@ -94,12 +98,54 @@ public class InactiveShardPlanAction extends TriggerActionBase {
             cleanup.computeIfAbsent(coll.getName(), c -> new ArrayList<>()).add(s.getName());
           }
         }
+        // check for stale shard split locks
+        String parentPath = ZkStateReader.COLLECTIONS_ZKNODE + "/" + coll.getName();
+        List<String> locks;
+        try {
+          locks = cloudManager.getDistribStateManager().listData(parentPath).stream()
+              .filter(name -> name.endsWith("-splitting"))
+              .collect(Collectors.toList());
+          for (String lock : locks) {
+            try {
+              String lockPath = parentPath + "/" + lock;
+              Map<String, Object> lockData = Utils.getJson(cloudManager.getDistribStateManager(), lockPath);
+              String tstampStr = (String)lockData.get(ZkStateReader.STATE_TIMESTAMP_PROP);
+              if (tstampStr == null || tstampStr.isEmpty()) {
+                return;
+              }
+              long timestamp = Long.parseLong(tstampStr);
+              // this timestamp uses epoch time
+              long currentTime = cloudManager.getTimeSource().getEpochTimeNs();
+              long delta = TimeUnit.NANOSECONDS.toSeconds(currentTime - timestamp);
+              log.debug("{}/{}: locktstamp={}, time={}, delta={}", coll.getName(), lock, timestamp, currentTime, delta);
+              if (delta > cleanupTTL) {
+                log.debug("-- delete inactive split lock for {}/{}, delta={}", coll.getName(), lock, delta);
+                cloudManager.getDistribStateManager().removeData(lockPath, -1);
+                lockData.put("currentTimeNs", currentTime);
+                lockData.put("deltaSec", delta);
+                lockData.put("ttlSec", cleanupTTL);
+                staleLocks.put(coll.getName() + "/" + lock, lockData);
+              } else {
+                log.debug("-- lock " + coll.getName() + "/" + lock + " still active (delta=" + delta + ")");
+              }
+            } catch (NoSuchElementException nse) {
+              // already removed by someone else - ignore
+            }
+          }
+        } catch (Exception e) {
+          log.warn("Exception checking for inactive shard split locks in " + parentPath, e);
+        }
       })
     );
+    Map<String, Object> results = new LinkedHashMap<>();
     if (!cleanup.isEmpty()) {
-      Map<String, Object> results = new LinkedHashMap<>();
       results.put("inactive", inactive);
       results.put("cleanup", cleanup);
+    }
+    if (!staleLocks.isEmpty()) {
+      results.put("staleLocks", staleLocks);
+    }
+    if (!results.isEmpty()) {
       context.getProperties().put(getName(), results);
     }
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/23e22e64/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
index 6cbdbfb..68a42b9 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
@@ -35,6 +35,7 @@ import org.apache.solr.cloud.CloudUtil;
 import org.apache.solr.cloud.Overseer;
 import org.apache.solr.cloud.api.collections.Assign;
 import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
+import org.apache.solr.cloud.api.collections.SplitShardCmd;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
@@ -43,6 +44,7 @@ import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.util.Utils;
+import org.apache.solr.util.TestInjection;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -476,6 +478,13 @@ public class ReplicaMutator {
                 propMap.put(subShardSlice.getName(), Slice.State.RECOVERY_FAILED.toString());
               }
             }
+            TestInjection.injectSplitLatch();
+            try {
+              SplitShardCmd.unlockForSplit(cloudManager, collection.getName(), parentSliceName);
+            } catch (Exception e) {
+              log.warn("Failed to unlock shard after " + (isLeaderSame ? "" : "un") + "successful split: {} / {}",
+                  collection.getName(), parentSliceName);
+            }
             ZkNodeProps m = new ZkNodeProps(propMap);
             return new SliceMutator(cloudManager).updateShardState(prevState, m).collection;
           }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/23e22e64/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
index 8aa2331..c0a8a7b 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
@@ -49,12 +49,12 @@ public class SliceMutator {
 
   public static final Set<String> SLICE_UNIQUE_BOOLEAN_PROPERTIES = ImmutableSet.of(PREFERRED_LEADER_PROP);
 
-  protected final SolrCloudManager dataProvider;
+  protected final SolrCloudManager cloudManager;
   protected final DistribStateManager stateManager;
 
-  public SliceMutator(SolrCloudManager dataProvider) {
-    this.dataProvider = dataProvider;
-    this.stateManager = dataProvider.getDistribStateManager();
+  public SliceMutator(SolrCloudManager cloudManager) {
+    this.cloudManager = cloudManager;
+    this.stateManager = cloudManager.getDistribStateManager();
   }
 
   public ZkWriteCommand addReplica(ClusterState clusterState, ZkNodeProps message) {
@@ -140,9 +140,9 @@ public class SliceMutator {
       String coreURL = ZkCoreNodeProps.getCoreUrl(replica.getStr(ZkStateReader.BASE_URL_PROP), replica.getStr(ZkStateReader.CORE_NAME_PROP));
 
       if (replica == oldLeader && !coreURL.equals(leaderUrl)) {
-        replica = new ReplicaMutator(dataProvider).unsetLeader(replica);
+        replica = new ReplicaMutator(cloudManager).unsetLeader(replica);
       } else if (coreURL.equals(leaderUrl)) {
-        replica = new ReplicaMutator(dataProvider).setLeader(replica);
+        replica = new ReplicaMutator(cloudManager).setLeader(replica);
       }
 
       newReplicas.put(replica.getName(), replica);
@@ -179,7 +179,7 @@ public class SliceMutator {
       }
       props.put(ZkStateReader.STATE_PROP, message.getStr(key));
       // we need to use epoch time so that it's comparable across Overseer restarts
-      props.put(ZkStateReader.STATE_TIMESTAMP_PROP, String.valueOf(dataProvider.getTimeSource().getEpochTimeNs()));
+      props.put(ZkStateReader.STATE_TIMESTAMP_PROP, String.valueOf(cloudManager.getTimeSource().getEpochTimeNs()));
       Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props);
       slicesCopy.put(slice.getName(), newSlice);
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/23e22e64/solr/core/src/java/org/apache/solr/util/TestInjection.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/util/TestInjection.java b/solr/core/src/java/org/apache/solr/util/TestInjection.java
index 4642eac..588cfcb 100644
--- a/solr/core/src/java/org/apache/solr/util/TestInjection.java
+++ b/solr/core/src/java/org/apache/solr/util/TestInjection.java
@@ -24,6 +24,8 @@ import java.util.Random;
 import java.util.Set;
 import java.util.Timer;
 import java.util.TimerTask;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -135,6 +137,8 @@ public class TestInjection {
 
   public static String splitFailureAfterReplicaCreation = null;
 
+  public static CountDownLatch splitLatch = null;
+
   public static String waitForReplicasInSync = "true:60";
 
   public static String failIndexFingerprintRequests = null;
@@ -159,6 +163,7 @@ public class TestInjection {
     randomDelayInCoreCreation = null;
     splitFailureBeforeReplicaCreation = null;
     splitFailureAfterReplicaCreation = null;
+    splitLatch = null;
     prepRecoveryOpPauseForever = null;
     countPrepRecoveryOpPauseForever = new AtomicInteger(0);
     waitForReplicasInSync = "true:60";
@@ -413,6 +418,18 @@ public class TestInjection {
     return injectSplitFailure(splitFailureAfterReplicaCreation, "after creating replica for sub-shard");
   }
 
+  public static boolean injectSplitLatch() {
+    if (splitLatch != null) {
+      try {
+        log.info("Waiting in ReplicaMutator for up to 60s");
+        return splitLatch.await(60, TimeUnit.SECONDS);
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+      }
+    }
+    return true;
+  }
+
   @SuppressForbidden(reason = "Need currentTimeMillis, because COMMIT_TIME_MSEC_KEY use currentTimeMillis as value")
   public static boolean waitForInSyncWithLeader(SolrCore core, ZkController zkController, String collection, String shardId) {
     if (waitForReplicasInSync == null) return true;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/23e22e64/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
index cd87bb5..abd887c 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
@@ -30,6 +30,7 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.client.solrj.SolrQuery;
@@ -60,10 +61,12 @@ import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CollectionParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.TimeSource;
 import org.apache.solr.common.util.Utils;
 import org.apache.solr.update.SolrIndexSplitter;
 import org.apache.solr.util.LogLevel;
 import org.apache.solr.util.TestInjection;
+import org.apache.solr.util.TimeOut;
 import org.apache.zookeeper.KeeperException;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -541,6 +544,64 @@ public class ShardSplitTest extends AbstractFullDistribZkTestBase {
   }
 
   @Test
+  public void testSplitLocking() throws Exception {
+    waitForThingsToLevelOut(15);
+    String collectionName = "testSplitLocking";
+    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 2);
+    create.setMaxShardsPerNode(5); // some high number so we can create replicas without hindrance
+    create.process(cloudClient);
+    waitForRecoveriesToFinish(collectionName, false);
+
+    TestInjection.splitLatch = new CountDownLatch(1); // simulate a long split operation
+    String path = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName + "/" + SHARD1 + "-splitting";
+    final AtomicReference<Exception> exc = new AtomicReference<>();
+    try {
+      Runnable r = () -> {
+        try {
+          trySplit(collectionName, null, SHARD1, 1);
+        } catch (Exception e) {
+          exc.set(e);
+        }
+      };
+      Thread t = new Thread(r);
+      t.start();
+      // wait for the split to start executing
+      TimeOut timeOut = new TimeOut(30, TimeUnit.SECONDS, TimeSource.NANO_TIME);
+      while (!timeOut.hasTimedOut()) {
+        timeOut.sleep(500);
+        if (cloudClient.getZkStateReader().getZkClient().exists(path, true)) {
+          log.info("=== found lock node");
+          break;
+        }
+      }
+      assertFalse("timed out waiting for the lock znode to appear", timeOut.hasTimedOut());
+      assertNull("unexpected exception: " + exc.get(), exc.get());
+      log.info("=== trying second split");
+      try {
+        trySplit(collectionName, null, SHARD1, 1);
+        fail("expected to fail due to locking but succeeded");
+      } catch (Exception e) {
+        log.info("Expected failure: " + e.toString());
+      }
+
+      // make sure the lock still exists
+      assertTrue("lock znode expected but missing", cloudClient.getZkStateReader().getZkClient().exists(path, true));
+      // let the first split proceed
+      TestInjection.splitLatch.countDown();
+      timeOut = new TimeOut(30, TimeUnit.SECONDS, TimeSource.NANO_TIME);
+      while (!timeOut.hasTimedOut()) {
+        timeOut.sleep(500);
+        if (!cloudClient.getZkStateReader().getZkClient().exists(path, true)) {
+          break;
+        }
+      }
+      assertFalse("timed out waiting for the lock znode to disappear", timeOut.hasTimedOut());
+    } finally {
+      TestInjection.reset();
+    }
+  }
+
+  @Test
   public void testSplitShardWithRule() throws Exception {
     doSplitShardWithRule(SolrIndexSplitter.SplitMethod.REWRITE);
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/23e22e64/solr/core/src/test/org/apache/solr/cloud/autoscaling/ScheduledMaintenanceTriggerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/ScheduledMaintenanceTriggerTest.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/ScheduledMaintenanceTriggerTest.java
index 36f4a13..b51d216 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/ScheduledMaintenanceTriggerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/ScheduledMaintenanceTriggerTest.java
@@ -19,6 +19,7 @@ package org.apache.solr.cloud.autoscaling;
 
 import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
@@ -35,10 +36,13 @@ import org.apache.solr.cloud.CloudTestUtils;
 import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.cloud.autoscaling.sim.SimCloudManager;
 import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.common.util.Utils;
 import org.apache.solr.core.SolrResourceLoader;
 import org.apache.solr.util.LogLevel;
+import org.apache.zookeeper.CreateMode;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -167,6 +171,17 @@ public class ScheduledMaintenanceTriggerTest extends SolrCloudTestCase {
     CloudTestUtils.waitForState(cloudManager, "failed to create " + collection1, collection1,
         CloudTestUtils.clusterShape(1, 1));
 
+    // also create a very stale lock
+    Map<String, Object> lockData = new HashMap<>();
+    lockData.put(ZkStateReader.STATE_TIMESTAMP_PROP, String.valueOf(cloudManager.getTimeSource().getEpochTimeNs() -
+        TimeUnit.NANOSECONDS.convert(48, TimeUnit.HOURS)));
+    String staleLockName = collection1 + "/staleShard-splitting";
+    cloudManager.getDistribStateManager().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" +
+        staleLockName, Utils.toJSON(lockData), CreateMode.EPHEMERAL, true);
+
+    // expect two events - one for a very stale lock, one for the cleanup
+    triggerFired = new CountDownLatch(2);
+
     String setListenerCommand = "{" +
         "'set-listener' : " +
         "{" +
@@ -186,10 +201,10 @@ public class ScheduledMaintenanceTriggerTest extends SolrCloudTestCase {
         "'set-trigger' : {" +
         "'name' : '" + AutoScaling.SCHEDULED_MAINTENANCE_TRIGGER_NAME + "'," +
         "'event' : 'scheduled'," +
-        "'startTime' : 'NOW+3SECONDS'," +
+        "'startTime' : 'NOW+10SECONDS'," +
         "'every' : '+2SECONDS'," +
         "'enabled' : true," +
-        "'actions' : [{'name' : 'inactive_shard_plan', 'class' : 'solr.InactiveShardPlanAction', 'ttl' : '10'}," +
+        "'actions' : [{'name' : 'inactive_shard_plan', 'class' : 'solr.InactiveShardPlanAction', 'ttl' : '20'}," +
         "{'name' : 'execute_plan', 'class' : '" + ExecutePlanAction.class.getName() + "'}," +
         "{'name' : 'test', 'class' : '" + TestTriggerAction.class.getName() + "'}]" +
         "}}";
@@ -208,7 +223,7 @@ public class ScheduledMaintenanceTriggerTest extends SolrCloudTestCase {
         CloudTestUtils.clusterShape(3, 1, true, true));
 
 
-    await = triggerFired.await(60, TimeUnit.SECONDS);
+    await = triggerFired.await(90, TimeUnit.SECONDS);
     assertTrue("cleanup action didn't run", await);
 
     // cleanup should have occurred
@@ -217,21 +232,27 @@ public class ScheduledMaintenanceTriggerTest extends SolrCloudTestCase {
     listenerEvents.clear();
 
     assertFalse(events.isEmpty());
-    int inactiveEvents = 0;
     CapturedEvent ce = null;
+    CapturedEvent staleLock = null;
     for (CapturedEvent e : events) {
       if (e.stage != TriggerEventProcessorStage.AFTER_ACTION) {
         continue;
       }
-      if (e.context.containsKey("properties.inactive_shard_plan")) {
+      Map<String, Object> plan = (Map<String, Object>)e.context.get("properties.inactive_shard_plan");
+      if (plan == null) {
+        continue;
+      }
+      if (plan.containsKey("cleanup")) {
         ce = e;
-        break;
-      } else {
-        inactiveEvents++;
+      }
+      // capture only the first
+      if (plan.containsKey("staleLocks") && staleLock == null) {
+        staleLock = e;
       }
     }
-    assertTrue("should be at least one inactive event", inactiveEvents > 0);
-    assertNotNull("missing cleanup event", ce);
+    assertNotNull("missing cleanup event: " + events, ce);
+    assertNotNull("missing staleLocks event: " + events, staleLock);
+
     Map<String, Object> map = (Map<String, Object>)ce.context.get("properties.inactive_shard_plan");
     assertNotNull(map);
 
@@ -242,6 +263,12 @@ public class ScheduledMaintenanceTriggerTest extends SolrCloudTestCase {
     assertEquals(1, cleanup.size());
     assertNotNull(cleanup.get(collection1));
 
+    map = (Map<String, Object>)staleLock.context.get("properties.inactive_shard_plan");
+    assertNotNull(map);
+    Map<String, Map<String, Object>> locks = (Map<String, Map<String, Object>>)map.get("staleLocks");
+    assertNotNull(locks);
+    assertTrue("missing stale lock data: " + locks + "\nevents: " + events, locks.containsKey(staleLockName));
+
     ClusterState state = cloudManager.getClusterStateProvider().getClusterState();
 
     CloudTestUtils.clusterShape(2, 1).matches(state.getLiveNodes(), state.getCollection(collection1));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/23e22e64/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
index 1d377b6..43e12ce 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
@@ -1151,12 +1151,12 @@ public class SimClusterStateProvider implements ClusterStateProvider {
       throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Shard " + collectionName +
           " /  " + sliceName.get() + " has no leader and can't be split");
     }
+    SplitShardCmd.lockForSplit(cloudManager, collectionName, sliceName.get());
     // start counting buffered updates
     Map<String, Object> props = sliceProperties.computeIfAbsent(collectionName, c -> new ConcurrentHashMap<>())
         .computeIfAbsent(sliceName.get(), ss -> new ConcurrentHashMap<>());
     if (props.containsKey(BUFFERED_UPDATES)) {
-      log.debug("--- SOLR-12729: Overlapping splitShard commands for {} / {}", collectionName, sliceName.get());
-      return;
+      throw new Exception("--- SOLR-12729: Overlapping splitShard commands for " + collectionName + "/" + sliceName.get());
     }
     props.put(BUFFERED_UPDATES, new AtomicLong());
 
@@ -1240,20 +1240,28 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     // delay it once again to better simulate replica recoveries
     //opDelay(collectionName, CollectionParams.CollectionAction.SPLITSHARD.name());
 
-    CloudTestUtils.waitForState(cloudManager, collectionName, 30, TimeUnit.SECONDS, (liveNodes, state) -> {
-      for (String subSlice : subSlices) {
-        Slice s = state.getSlice(subSlice);
-        if (s.getLeader() == null) {
-          log.debug("** no leader in {} / {}", collectionName, s);
-          return false;
-        }
-        if (s.getReplicas().size() < repFactor) {
-          log.debug("** expected {} repFactor but there are {} replicas", repFactor, s.getReplicas().size());
-          return false;
+    boolean success = false;
+    try {
+      CloudTestUtils.waitForState(cloudManager, collectionName, 30, TimeUnit.SECONDS, (liveNodes, state) -> {
+        for (String subSlice : subSlices) {
+          Slice s = state.getSlice(subSlice);
+          if (s.getLeader() == null) {
+            log.debug("** no leader in {} / {}", collectionName, s);
+            return false;
+          }
+          if (s.getReplicas().size() < repFactor) {
+            log.debug("** expected {} repFactor but there are {} replicas", repFactor, s.getReplicas().size());
+            return false;
+          }
         }
+        return true;
+      });
+      success = true;
+    } finally {
+      if (!success) {
+        SplitShardCmd.unlockForSplit(cloudManager, collectionName, sliceName.get());
       }
-      return true;
-    });
+    }
     // mark the new slices as active and the old slice as inactive
     log.trace("-- switching slice states after split shard: collection={}, parent={}, subSlices={}", collectionName,
         sliceName.get(), subSlices);
@@ -1292,6 +1300,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
       // invalidate cached state
       collectionsStatesRef.set(null);
     } finally {
+      SplitShardCmd.unlockForSplit(cloudManager, collectionName, sliceName.get());
       lock.unlock();
     }
     results.add("success", "");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/23e22e64/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimExtremeIndexing.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimExtremeIndexing.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimExtremeIndexing.java
index ab5295e..aea7a5f 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimExtremeIndexing.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimExtremeIndexing.java
@@ -66,6 +66,9 @@ public class TestSimExtremeIndexing extends SimSolrCloudTestCase {
   // tweak this threshold to test the number of splits
   private static final long ABOVE_SIZE = 20000000;
 
+  // tweak this to allow more operations in one event
+  private static final int MAX_OPS = 100;
+
 
   private static TimeSource timeSource;
   private static SolrClient solrClient;
@@ -100,6 +103,7 @@ public class TestSimExtremeIndexing extends SimSolrCloudTestCase {
         "'event' : 'indexSize'," +
         "'waitFor' : '" + waitForSeconds + "s'," +
         "'aboveDocs' : " + ABOVE_SIZE + "," +
+        "'maxOps' : " + MAX_OPS + "," +
         "'enabled' : true," +
         "'actions' : [{'name' : 'compute_plan', 'class' : 'solr.ComputePlanAction'}," +
         "{'name' : 'execute_plan', 'class' : '" + ExecutePlanAction.class.getName() + "'}]" +


[27/50] [abbrv] lucene-solr:jira/http2: SOLR-12851: Improvements and fixes to let and select Streaming Expressions

Posted by da...@apache.org.
SOLR-12851: Improvements and fixes to let and select Streaming Expressions


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a0bb5017
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a0bb5017
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a0bb5017

Branch: refs/heads/jira/http2
Commit: a0bb5017722ce698fc390f3990243697341d2b8d
Parents: c87778c
Author: Joel Bernstein <jb...@apache.org>
Authored: Thu Oct 11 10:43:30 2018 -0400
Committer: Joel Bernstein <jb...@apache.org>
Committed: Thu Oct 11 10:43:57 2018 -0400

----------------------------------------------------------------------
 .../solr/client/solrj/io/stream/LetStream.java  | 11 +++++-
 .../client/solrj/io/stream/SelectStream.java    | 11 ++++--
 .../solrj/io/stream/expr/StreamFactory.java     |  5 +--
 .../solrj/io/stream/MathExpressionTest.java     | 41 +++++++++++++++++++-
 4 files changed, 58 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a0bb5017/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/LetStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/LetStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/LetStream.java
index e88eaf6..23881c3 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/LetStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/LetStream.java
@@ -36,6 +36,7 @@ import org.apache.solr.client.solrj.io.stream.expr.StreamExplanation;
 import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
 import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionNamedParameter;
 import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionParameter;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionValue;
 import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
 
 /**
@@ -76,7 +77,11 @@ public class LetStream extends TupleStream implements Expressible {
       }
 
       StreamExpressionParameter param = ((StreamExpressionNamedParameter)np).getParameter();
-      if(factory.isEvaluator((StreamExpression)param)) {
+
+      if(param instanceof StreamExpressionValue) {
+        String paramValue = ((StreamExpressionValue) param).getValue();
+        letParams.put(name, factory.constructPrimitiveObject(paramValue));
+      } else if(factory.isEvaluator((StreamExpression)param)) {
         StreamEvaluator evaluator = factory.constructEvaluator((StreamExpression) param);
         letParams.put(name, evaluator);
       } else {
@@ -182,7 +187,7 @@ public class LetStream extends TupleStream implements Expressible {
         } finally {
           tStream.close();
         }
-      } else {
+      } else if(o instanceof StreamEvaluator) {
         //Add the data from the StreamContext to a tuple.
         //Let the evaluator works from this tuple.
         //This will allow columns to be created from tuples already in the StreamContext.
@@ -196,6 +201,8 @@ public class LetStream extends TupleStream implements Expressible {
         } else {
           lets.put(name, eo);
         }
+      } else {
+        lets.put(name, o);
       }
     }
     stream.open();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a0bb5017/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SelectStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SelectStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SelectStream.java
index d6664cd..d87a637 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SelectStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SelectStream.java
@@ -19,6 +19,7 @@ package org.apache.solr.client.solrj.io.stream;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
@@ -63,14 +64,14 @@ public class SelectStream extends TupleStream implements Expressible {
       this.selectedFields.put(selectedField, selectedField);
     }
     operations = new ArrayList<>();
-    selectedEvaluators = new HashMap<>();
+    selectedEvaluators = new LinkedHashMap();
   }
   
   public SelectStream(TupleStream stream, Map<String,String> selectedFields) throws IOException {
     this.stream = stream;
     this.selectedFields = selectedFields;
     operations = new ArrayList<>();
-    selectedEvaluators = new HashMap<>();
+    selectedEvaluators = new LinkedHashMap();
   }
   
   public SelectStream(StreamExpression expression,StreamFactory factory) throws IOException {
@@ -100,7 +101,7 @@ public class SelectStream extends TupleStream implements Expressible {
     stream = factory.constructStream(streamExpressions.get(0));
     
     selectedFields = new HashMap<String,String>();
-    selectedEvaluators = new HashMap<StreamEvaluator, String>();
+    selectedEvaluators = new LinkedHashMap();
     for(StreamExpressionParameter parameter : selectAsFieldsExpressions){
       StreamExpressionValue selectField = (StreamExpressionValue)parameter;
       String value = selectField.getValue().trim();
@@ -281,7 +282,9 @@ public class SelectStream extends TupleStream implements Expressible {
     
     // Apply all evaluators
     for(Map.Entry<StreamEvaluator, String> selectedEvaluator : selectedEvaluators.entrySet()) {
-      workingToReturn.put(selectedEvaluator.getValue(), selectedEvaluator.getKey().evaluate(workingForEvaluators));
+      Object o = selectedEvaluator.getKey().evaluate(workingForEvaluators);
+      workingForEvaluators.put(selectedEvaluator.getValue(), o);
+      workingToReturn.put(selectedEvaluator.getValue(), o);
     }
     
     return workingToReturn;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a0bb5017/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/expr/StreamFactory.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/expr/StreamFactory.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/expr/StreamFactory.java
index 321545b..0ba5d5c 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/expr/StreamFactory.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/expr/StreamFactory.java
@@ -439,9 +439,8 @@ public class StreamFactory implements Serializable {
     if("null".equals(lower)){ return null; }
     if("true".equals(lower) || "false".equals(lower)){ return Boolean.parseBoolean(lower); }
     try{ return Long.valueOf(original); } catch(Exception ignored){};
-    try{ if (original.matches(".{1,8}")){ return Double.valueOf(original); }} catch(Exception ignored){};
-    try{ if (original.matches(".{1,17}")){ return Double.valueOf(original); }} catch(Exception ignored){};
-    
+    try{ return Double.valueOf(original); } catch(Exception ignored){};
+
     // is a string
     return original;
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a0bb5017/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
index a095dd8..45633e3 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
@@ -1169,7 +1169,6 @@ public class MathExpressionTest extends SolrCloudTestCase {
     List<Tuple> tuples = getTuples(solrStream);
     assertTrue(tuples.size() == 1);
     List<List<Number>> out = (List<List<Number>>)tuples.get(0).get("c");
-    System.out.println("###### out:"+out);
     assertEquals(out.size(), 2);
     List<Number> row1 = out.get(0);
     assertEquals(row1.get(0).doubleValue(), 2.1, 0);
@@ -1722,6 +1721,46 @@ public class MathExpressionTest extends SolrCloudTestCase {
   }
 
   @Test
+  public void testSelectWithSequentialEvaluators() throws Exception {
+    String cexpr = "select(list(tuple(a=add(1,2)), tuple(a=add(2,2))), " +
+        "                  add(1, a) as blah, " +
+        "                  add(1, blah) as blah1)";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertEquals(tuples.size(), 2);
+    Tuple tuple0 = tuples.get(0);
+    assertEquals(tuple0.getLong("blah").longValue(), 4L);
+    assertEquals(tuple0.getLong("blah1").longValue(), 5L);
+
+    Tuple tuple1 = tuples.get(1);
+    assertEquals(tuple1.getLong("blah").longValue(), 5L);
+    assertEquals(tuple1.getLong("blah1").longValue(), 6L);
+  }
+
+  @Test
+  public void testLetWithNumericVariables() throws Exception {
+    String cexpr = "let(echo=true, a=1.88888, b=8888888888.98)";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertEquals(tuples.size(), 1);
+    Tuple tuple = tuples.get(0);
+    assertEquals(tuple.getDouble("a").doubleValue(), 1.88888, 0.0);
+    assertEquals(tuple.getDouble("b").doubleValue(), 8888888888.98, 0.0);
+  }
+
+  @Test
   public void testTermVectors() throws Exception {
     // Test termVectors with only documents and default termVector settings
     String cexpr = "let(echo=true," +


[06/50] [abbrv] lucene-solr:jira/http2: LUCENE-8496: Selective indexing - modify BKDReader/BKDWriter to allow users to select a fewer number of dimensions to be used for creating the index than the total number of dimensions used for field encoding. i.e.

Posted by da...@apache.org.
LUCENE-8496: Selective indexing - modify BKDReader/BKDWriter to allow users to select a fewer number of dimensions to be used for creating the index than the total number of dimensions used for field encoding. i.e., dimensions 0 to N may be used to determine how to split the inner nodes, and dimensions N+1 to D are ignored and stored as data dimensions at the leaves.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/1118299c
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/1118299c
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/1118299c

Branch: refs/heads/jira/http2
Commit: 1118299c338253cea09640acdc48dc930dc27fda
Parents: df07a43
Author: Nicholas Knize <nk...@gmail.com>
Authored: Mon Oct 8 18:51:03 2018 -0500
Committer: Nicholas Knize <nk...@gmail.com>
Committed: Mon Oct 8 18:51:03 2018 -0500

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |   8 +
 .../codecs/simpletext/SimpleTextBKDReader.java  |  59 ++++---
 .../codecs/simpletext/SimpleTextBKDWriter.java  | 144 ++++++++-------
 .../simpletext/SimpleTextFieldInfosFormat.java  |  21 ++-
 .../simpletext/SimpleTextPointsReader.java      |  20 ++-
 .../simpletext/SimpleTextPointsWriter.java      |   6 +-
 .../org/apache/lucene/codecs/PointsWriter.java  |  13 +-
 .../lucene50/Lucene50FieldInfosFormat.java      |   2 +-
 .../lucene60/Lucene60FieldInfosFormat.java      |  21 ++-
 .../codecs/lucene60/Lucene60PointsReader.java   |   2 +-
 .../codecs/lucene60/Lucene60PointsWriter.java   |  14 +-
 .../codecs/perfield/PerFieldMergeState.java     |   2 +-
 .../org/apache/lucene/document/BinaryPoint.java |   4 +-
 .../org/apache/lucene/document/DoublePoint.java |  10 +-
 .../org/apache/lucene/document/DoubleRange.java |  10 +-
 .../org/apache/lucene/document/FieldType.java   |  66 +++++--
 .../org/apache/lucene/document/FloatPoint.java  |  10 +-
 .../org/apache/lucene/document/FloatRange.java  |  10 +-
 .../org/apache/lucene/document/IntPoint.java    |  10 +-
 .../org/apache/lucene/document/IntRange.java    |  10 +-
 .../org/apache/lucene/document/LatLonPoint.java |   6 +-
 .../org/apache/lucene/document/LongPoint.java   |  10 +-
 .../org/apache/lucene/document/LongRange.java   |  10 +-
 .../apache/lucene/document/RangeFieldQuery.java |   4 +-
 .../org/apache/lucene/index/CheckIndex.java     |  37 ++--
 .../org/apache/lucene/index/CodecReader.java    |   2 +-
 .../lucene/index/DefaultIndexingChain.java      |  15 +-
 .../java/org/apache/lucene/index/FieldInfo.java |  73 +++++---
 .../org/apache/lucene/index/FieldInfos.java     |  61 ++++---
 .../org/apache/lucene/index/IndexWriter.java    |   6 +-
 .../apache/lucene/index/IndexableFieldType.java |   9 +-
 .../org/apache/lucene/index/PointValues.java    |  11 +-
 .../apache/lucene/index/PointValuesWriter.java  |  20 ++-
 .../apache/lucene/index/SortingLeafReader.java  |   9 +-
 .../apache/lucene/search/PointInSetQuery.java   |   4 +-
 .../apache/lucene/search/PointRangeQuery.java   |   4 +-
 .../org/apache/lucene/util/bkd/BKDReader.java   | 140 ++++++++-------
 .../org/apache/lucene/util/bkd/BKDWriter.java   | 174 ++++++++++---------
 .../apache/lucene/document/TestFieldType.java   |  11 +-
 .../apache/lucene/index/TestIndexableField.java |   7 +-
 .../lucene/index/TestPendingSoftDeletes.java    |  10 +-
 .../apache/lucene/index/TestPointValues.java    |  14 +-
 .../apache/lucene/search/TestPointQueries.java  |   2 +-
 .../apache/lucene/util/TestDocIdSetBuilder.java |   7 +-
 .../apache/lucene/util/bkd/Test2BBKDPoints.java |   4 +-
 .../org/apache/lucene/util/bkd/TestBKD.java     | 142 ++++++++-------
 .../util/bkd/TestMutablePointsReaderUtils.java  |   7 +-
 .../search/highlight/TermVectorLeafReader.java  |   2 +-
 .../join/PointInSetIncludingScoreQuery.java     |   4 +-
 .../apache/lucene/index/memory/MemoryIndex.java |  24 ++-
 .../apache/lucene/document/BigIntegerPoint.java |  10 +-
 .../apache/lucene/document/HalfFloatPoint.java  |  10 +-
 .../lucene/spatial/bbox/BBoxStrategy.java       |   2 +-
 .../spatial/vector/PointVectorStrategy.java     |   2 +-
 .../codecs/asserting/AssertingPointsFormat.java |   4 +-
 .../codecs/cranky/CrankyPointsFormat.java       |  12 +-
 .../lucene/index/AssertingLeafReader.java       |  41 +++--
 .../index/BaseIndexFileFormatTestCase.java      |   2 +-
 .../lucene/index/BasePointsFormatTestCase.java  |  76 +++++---
 .../lucene/index/MismatchedLeafReader.java      |   3 +-
 .../org/apache/lucene/index/RandomCodec.java    |   9 +-
 .../lucene/index/RandomPostingsTester.java      |   4 +-
 .../org/apache/lucene/util/LuceneTestCase.java  |  16 +-
 .../java/org/apache/lucene/util/TestUtil.java   |   2 +-
 .../solr/handler/component/ExpandComponent.java |   3 +-
 .../org/apache/solr/legacy/BBoxStrategy.java    |   2 +-
 .../apache/solr/legacy/PointVectorStrategy.java |   2 +-
 .../org/apache/solr/schema/SchemaField.java     |   7 +-
 .../solr/search/CollapsingQParserPlugin.java    |   2 +-
 .../java/org/apache/solr/search/Insanity.java   |   2 +-
 .../apache/solr/uninverting/FieldCacheImpl.java |   6 +-
 .../solr/uninverting/UninvertingReader.java     |   6 +-
 .../solr/uninverting/TestUninvertingReader.java |   6 +-
 73 files changed, 892 insertions(+), 608 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index ad83ff9..80d341e 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -192,6 +192,14 @@ Bug fixes:
 * LUCENE-8479: QueryBuilder#analyzeGraphPhrase now throws TooManyClause exception
   if the number of expanded path reaches the BooleanQuery#maxClause limit. (Jim Ferenczi)
 
+New Features
+
+* LUCENE-8496: Selective indexing - modify BKDReader/BKDWriter to allow users
+  to select a fewer number of dimensions to be used for creating the index than
+  the total number of dimensions used for field encoding. i.e., dimensions 0 to N
+  may be used to determine how to split the inner nodes, and dimensions N+1 to D
+  are ignored and stored as data dimensions at the leaves. (Nick Knize)
+
 ======================= Lucene 7.5.1 =======================
 
 Bug Fixes:

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDReader.java
----------------------------------------------------------------------
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDReader.java
index b7af45a..5227d6d 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDReader.java
@@ -41,7 +41,8 @@ final class SimpleTextBKDReader extends PointValues implements Accountable {
   final private byte[] splitPackedValues; 
   final long[] leafBlockFPs;
   final private int leafNodeOffset;
-  final int numDims;
+  final int numDataDims;
+  final int numIndexDims;
   final int bytesPerDim;
   final int bytesPerIndexEntry;
   final IndexInput in;
@@ -52,16 +53,19 @@ final class SimpleTextBKDReader extends PointValues implements Accountable {
   final int docCount;
   final int version;
   protected final int packedBytesLength;
+  protected final int packedIndexBytesLength;
 
-  public SimpleTextBKDReader(IndexInput in, int numDims, int maxPointsInLeafNode, int bytesPerDim, long[] leafBlockFPs, byte[] splitPackedValues,
+  public SimpleTextBKDReader(IndexInput in, int numDataDims, int numIndexDims, int maxPointsInLeafNode, int bytesPerDim, long[] leafBlockFPs, byte[] splitPackedValues,
                              byte[] minPackedValue, byte[] maxPackedValue, long pointCount, int docCount) throws IOException {
     this.in = in;
-    this.numDims = numDims;
+    this.numDataDims = numDataDims;
+    this.numIndexDims = numIndexDims;
     this.maxPointsInLeafNode = maxPointsInLeafNode;
     this.bytesPerDim = bytesPerDim;
     // no version check here because callers of this API (SimpleText) have no back compat:
-    bytesPerIndexEntry = numDims == 1 ? bytesPerDim : bytesPerDim + 1;
-    packedBytesLength = numDims * bytesPerDim;
+    bytesPerIndexEntry = numIndexDims == 1 ? bytesPerDim : bytesPerDim + 1;
+    packedBytesLength = numDataDims * bytesPerDim;
+    packedIndexBytesLength = numIndexDims * bytesPerDim;
     this.leafNodeOffset = leafBlockFPs.length;
     this.leafBlockFPs = leafBlockFPs;
     this.splitPackedValues = splitPackedValues;
@@ -70,8 +74,8 @@ final class SimpleTextBKDReader extends PointValues implements Accountable {
     this.pointCount = pointCount;
     this.docCount = docCount;
     this.version = SimpleTextBKDWriter.VERSION_CURRENT;
-    assert minPackedValue.length == packedBytesLength;
-    assert maxPackedValue.length == packedBytesLength;
+    assert minPackedValue.length == packedIndexBytesLength;
+    assert maxPackedValue.length == packedIndexBytesLength;
   }
 
   /** Used to track all state for a single call to {@link #intersect}. */
@@ -115,7 +119,7 @@ final class SimpleTextBKDReader extends PointValues implements Accountable {
 
   /** Create a new {@link IntersectState} */
   public IntersectState getIntersectState(IntersectVisitor visitor) {
-    return new IntersectState(in.clone(), numDims,
+    return new IntersectState(in.clone(), numDataDims,
                               packedBytesLength,
                               maxPointsInLeafNode,
                               visitor);
@@ -181,7 +185,7 @@ final class SimpleTextBKDReader extends PointValues implements Accountable {
       scratchPackedValue[compressedByteOffset] = in.readByte();
       final int runLen = Byte.toUnsignedInt(in.readByte());
       for (int j = 0; j < runLen; ++j) {
-        for(int dim=0;dim<numDims;dim++) {
+        for(int dim=0;dim<numDataDims;dim++) {
           int prefix = commonPrefixLengths[dim];
           in.readBytes(scratchPackedValue, dim*bytesPerDim + prefix, bytesPerDim - prefix);
         }
@@ -196,14 +200,14 @@ final class SimpleTextBKDReader extends PointValues implements Accountable {
 
   private int readCompressedDim(IndexInput in) throws IOException {
     int compressedDim = in.readByte();
-    if (compressedDim < -1 || compressedDim >= numDims) {
+    if (compressedDim < -1 || compressedDim >= numIndexDims) {
       throw new CorruptIndexException("Got compressedDim="+compressedDim, in);
     }
     return compressedDim;
   }
 
   private void readCommonPrefixes(int[] commonPrefixLengths, byte[] scratchPackedValue, IndexInput in) throws IOException {
-    for(int dim=0;dim<numDims;dim++) {
+    for(int dim=0;dim<numDataDims;dim++) {
       int prefix = in.readVInt();
       commonPrefixLengths[dim] = prefix;
       if (prefix > 0) {
@@ -258,27 +262,27 @@ final class SimpleTextBKDReader extends PointValues implements Accountable {
 
       int address = nodeID * bytesPerIndexEntry;
       int splitDim;
-      if (numDims == 1) {
+      if (numIndexDims == 1) {
         splitDim = 0;
       } else {
         splitDim = splitPackedValues[address++] & 0xff;
       }
       
-      assert splitDim < numDims;
+      assert splitDim < numIndexDims;
 
       // TODO: can we alloc & reuse this up front?
 
-      byte[] splitPackedValue = new byte[packedBytesLength];
+      byte[] splitPackedValue = new byte[packedIndexBytesLength];
 
       // Recurse on left sub-tree:
-      System.arraycopy(cellMaxPacked, 0, splitPackedValue, 0, packedBytesLength);
+      System.arraycopy(cellMaxPacked, 0, splitPackedValue, 0, packedIndexBytesLength);
       System.arraycopy(splitPackedValues, address, splitPackedValue, splitDim*bytesPerDim, bytesPerDim);
       intersect(state,
                 2*nodeID,
                 cellMinPacked, splitPackedValue);
 
       // Recurse on right sub-tree:
-      System.arraycopy(cellMinPacked, 0, splitPackedValue, 0, packedBytesLength);
+      System.arraycopy(cellMinPacked, 0, splitPackedValue, 0, packedIndexBytesLength);
       System.arraycopy(splitPackedValues, address, splitPackedValue, splitDim*bytesPerDim, bytesPerDim);
       intersect(state,
                 2*nodeID+1,
@@ -307,27 +311,27 @@ final class SimpleTextBKDReader extends PointValues implements Accountable {
 
       int address = nodeID * bytesPerIndexEntry;
       int splitDim;
-      if (numDims == 1) {
+      if (numIndexDims == 1) {
         splitDim = 0;
       } else {
         splitDim = splitPackedValues[address++] & 0xff;
       }
       
-      assert splitDim < numDims;
+      assert splitDim < numIndexDims;
 
       // TODO: can we alloc & reuse this up front?
 
-      byte[] splitPackedValue = new byte[packedBytesLength];
+      byte[] splitPackedValue = new byte[packedIndexBytesLength];
 
       // Recurse on left sub-tree:
-      System.arraycopy(cellMaxPacked, 0, splitPackedValue, 0, packedBytesLength);
+      System.arraycopy(cellMaxPacked, 0, splitPackedValue, 0, packedIndexBytesLength);
       System.arraycopy(splitPackedValues, address, splitPackedValue, splitDim*bytesPerDim, bytesPerDim);
       final long leftCost = estimatePointCount(state,
                 2*nodeID,
                 cellMinPacked, splitPackedValue);
 
       // Recurse on right sub-tree:
-      System.arraycopy(cellMinPacked, 0, splitPackedValue, 0, packedBytesLength);
+      System.arraycopy(cellMinPacked, 0, splitPackedValue, 0, packedIndexBytesLength);
       System.arraycopy(splitPackedValues, address, splitPackedValue, splitDim*bytesPerDim, bytesPerDim);
       final long rightCost = estimatePointCount(state,
                 2*nodeID+1,
@@ -340,13 +344,13 @@ final class SimpleTextBKDReader extends PointValues implements Accountable {
   public void copySplitValue(int nodeID, byte[] splitPackedValue) {
     int address = nodeID * bytesPerIndexEntry;
     int splitDim;
-    if (numDims == 1) {
+    if (numIndexDims == 1) {
       splitDim = 0;
     } else {
       splitDim = splitPackedValues[address++] & 0xff;
     }
     
-    assert splitDim < numDims;
+    assert splitDim < numIndexDims;
     System.arraycopy(splitPackedValues, address, splitPackedValue, splitDim*bytesPerDim, bytesPerDim);
   }
 
@@ -367,8 +371,13 @@ final class SimpleTextBKDReader extends PointValues implements Accountable {
   }
 
   @Override
-  public int getNumDimensions() {
-    return numDims;
+  public int getNumDataDimensions() {
+    return numDataDims;
+  }
+
+  @Override
+  public int getNumIndexDimensions() {
+    return numIndexDims;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDWriter.java
----------------------------------------------------------------------
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDWriter.java
index 1ddc111..f45209c 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDWriter.java
@@ -62,7 +62,8 @@ import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.INDEX_C
 import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.MAX_LEAF_POINTS;
 import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.MAX_VALUE;
 import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.MIN_VALUE;
-import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.NUM_DIMS;
+import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.NUM_DATA_DIMS;
+import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.NUM_INDEX_DIMS;
 import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.POINT_COUNT;
 import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.SPLIT_COUNT;
 import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.SPLIT_DIM;
@@ -102,15 +103,21 @@ final class SimpleTextBKDWriter implements Closeable {
   /** Maximum number of dimensions */
   public static final int MAX_DIMS = 8;
 
-  /** How many dimensions we are indexing */
-  protected final int numDims;
+  /** How many dimensions we are storing at the leaf (data) nodes */
+  protected final int numDataDims;
+
+  /** How many dimensions we are indexing in the internal nodes */
+  protected final int numIndexDims;
 
   /** How many bytes each value in each dimension takes. */
   protected final int bytesPerDim;
 
-  /** numDims * bytesPerDim */
+  /** numDataDims * bytesPerDim */
   protected final int packedBytesLength;
 
+  /** numIndexDims * bytesPerDim */
+  protected final int packedIndexBytesLength;
+
   final BytesRefBuilder scratch = new BytesRefBuilder();
 
   final TrackingDirectoryWrapper tempDir;
@@ -158,37 +165,39 @@ final class SimpleTextBKDWriter implements Closeable {
 
   private final int maxDoc;
 
-  public SimpleTextBKDWriter(int maxDoc, Directory tempDir, String tempFileNamePrefix, int numDims, int bytesPerDim,
+  public SimpleTextBKDWriter(int maxDoc, Directory tempDir, String tempFileNamePrefix, int numDataDims, int numIndexDims, int bytesPerDim,
                              int maxPointsInLeafNode, double maxMBSortInHeap, long totalPointCount, boolean singleValuePerDoc) throws IOException {
-    this(maxDoc, tempDir, tempFileNamePrefix, numDims, bytesPerDim, maxPointsInLeafNode, maxMBSortInHeap, totalPointCount, singleValuePerDoc,
+    this(maxDoc, tempDir, tempFileNamePrefix, numDataDims, numIndexDims, bytesPerDim, maxPointsInLeafNode, maxMBSortInHeap, totalPointCount, singleValuePerDoc,
          totalPointCount > Integer.MAX_VALUE, Math.max(1, (long) maxMBSortInHeap), OfflineSorter.MAX_TEMPFILES);
   }
 
-  private SimpleTextBKDWriter(int maxDoc, Directory tempDir, String tempFileNamePrefix, int numDims, int bytesPerDim,
+  private SimpleTextBKDWriter(int maxDoc, Directory tempDir, String tempFileNamePrefix, int numDataDims, int numIndexDims, int bytesPerDim,
                               int maxPointsInLeafNode, double maxMBSortInHeap, long totalPointCount,
                               boolean singleValuePerDoc, boolean longOrds, long offlineSorterBufferMB, int offlineSorterMaxTempFiles) throws IOException {
-    verifyParams(numDims, maxPointsInLeafNode, maxMBSortInHeap, totalPointCount);
+    verifyParams(numDataDims, numIndexDims, maxPointsInLeafNode, maxMBSortInHeap, totalPointCount);
     // We use tracking dir to deal with removing files on exception, so each place that
     // creates temp files doesn't need crazy try/finally/sucess logic:
     this.tempDir = new TrackingDirectoryWrapper(tempDir);
     this.tempFileNamePrefix = tempFileNamePrefix;
     this.maxPointsInLeafNode = maxPointsInLeafNode;
-    this.numDims = numDims;
+    this.numDataDims = numDataDims;
+    this.numIndexDims = numIndexDims;
     this.bytesPerDim = bytesPerDim;
     this.totalPointCount = totalPointCount;
     this.maxDoc = maxDoc;
     this.offlineSorterBufferMB = OfflineSorter.BufferSize.megabytes(offlineSorterBufferMB);
     this.offlineSorterMaxTempFiles = offlineSorterMaxTempFiles;
     docsSeen = new FixedBitSet(maxDoc);
-    packedBytesLength = numDims * bytesPerDim;
+    packedBytesLength = numDataDims * bytesPerDim;
+    packedIndexBytesLength = numIndexDims * bytesPerDim;
 
     scratchDiff = new byte[bytesPerDim];
     scratch1 = new byte[packedBytesLength];
     scratch2 = new byte[packedBytesLength];
-    commonPrefixLengths = new int[numDims];
+    commonPrefixLengths = new int[numDataDims];
 
-    minPackedValue = new byte[packedBytesLength];
-    maxPackedValue = new byte[packedBytesLength];
+    minPackedValue = new byte[packedIndexBytesLength];
+    maxPackedValue = new byte[packedIndexBytesLength];
 
     // If we may have more than 1+Integer.MAX_VALUE values, then we must encode ords with long (8 bytes), else we can use int (4 bytes).
     this.longOrds = longOrds;
@@ -215,7 +224,7 @@ final class SimpleTextBKDWriter implements Closeable {
     // bytes to points here.  Each dimension has its own sorted partition, so
     // we must divide by numDims as wel.
 
-    maxPointsSortInHeap = (int) (0.5 * (maxMBSortInHeap * 1024 * 1024) / (bytesPerDoc * numDims));
+    maxPointsSortInHeap = (int) (0.5 * (maxMBSortInHeap * 1024 * 1024) / (bytesPerDoc * numDataDims));
 
     // Finally, we must be able to hold at least the leaf node in heap during build:
     if (maxPointsSortInHeap < maxPointsInLeafNode) {
@@ -228,11 +237,14 @@ final class SimpleTextBKDWriter implements Closeable {
     this.maxMBSortInHeap = maxMBSortInHeap;
   }
 
-  public static void verifyParams(int numDims, int maxPointsInLeafNode, double maxMBSortInHeap, long totalPointCount) {
+  public static void verifyParams(int numDataDims, int numIndexDims, int maxPointsInLeafNode, double maxMBSortInHeap, long totalPointCount) {
     // We encode dim in a single byte in the splitPackedValues, but we only expose 4 bits for it now, in case we want to use
     // remaining 4 bits for another purpose later
-    if (numDims < 1 || numDims > MAX_DIMS) {
-      throw new IllegalArgumentException("numDims must be 1 .. " + MAX_DIMS + " (got: " + numDims + ")");
+    if (numDataDims < 1 || numDataDims > MAX_DIMS) {
+      throw new IllegalArgumentException("numDataDims must be 1 .. " + MAX_DIMS + " (got: " + numDataDims + ")");
+    }
+    if (numIndexDims < 1 || numIndexDims > numDataDims) {
+      throw new IllegalArgumentException("numIndexDims must be 1 .. " + numDataDims + " (got: " + numIndexDims + ")");
     }
     if (maxPointsInLeafNode <= 0) {
       throw new IllegalArgumentException("maxPointsInLeafNode must be > 0; got " + maxPointsInLeafNode);
@@ -281,10 +293,10 @@ final class SimpleTextBKDWriter implements Closeable {
 
     // TODO: we could specialize for the 1D case:
     if (pointCount == 0) {
-      System.arraycopy(packedValue, 0, minPackedValue, 0, packedBytesLength);
-      System.arraycopy(packedValue, 0, maxPackedValue, 0, packedBytesLength);
+      System.arraycopy(packedValue, 0, minPackedValue, 0, packedIndexBytesLength);
+      System.arraycopy(packedValue, 0, maxPackedValue, 0, packedIndexBytesLength);
     } else {
-      for(int dim=0;dim<numDims;dim++) {
+      for(int dim=0;dim<numIndexDims;dim++) {
         int offset = dim*bytesPerDim;
         if (FutureArrays.compareUnsigned(packedValue, offset, offset + bytesPerDim, minPackedValue, offset, offset + bytesPerDim) < 0) {
           System.arraycopy(packedValue, offset, minPackedValue, offset, bytesPerDim);
@@ -313,7 +325,7 @@ final class SimpleTextBKDWriter implements Closeable {
    *  disk. This method does not use transient disk in order to reorder points.
    */
   public long writeField(IndexOutput out, String fieldName, MutablePointValues reader) throws IOException {
-    if (numDims == 1) {
+    if (numIndexDims == 1) {
       return writeField1Dim(out, fieldName, reader);
     } else {
       return writeFieldNDims(out, fieldName, reader);
@@ -356,7 +368,7 @@ final class SimpleTextBKDWriter implements Closeable {
     Arrays.fill(maxPackedValue, (byte) 0);
     for (int i = 0; i < Math.toIntExact(pointCount); ++i) {
       values.getValue(i, scratchBytesRef1);
-      for(int dim=0;dim<numDims;dim++) {
+      for(int dim=0;dim<numIndexDims;dim++) {
         int offset = dim*bytesPerDim;
         if (FutureArrays.compareUnsigned(scratchBytesRef1.bytes, scratchBytesRef1.offset + offset, scratchBytesRef1.offset + offset + bytesPerDim, minPackedValue, offset, offset + bytesPerDim) < 0) {
           System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + offset, minPackedValue, offset, bytesPerDim);
@@ -382,7 +394,7 @@ final class SimpleTextBKDWriter implements Closeable {
   /* In the 1D case, we can simply sort points in ascending order and use the
    * same writing logic as we use at merge time. */
   private long writeField1Dim(IndexOutput out, String fieldName, MutablePointValues reader) throws IOException {
-    MutablePointsReaderUtils.sort(maxDoc, packedBytesLength, reader, 0, Math.toIntExact(reader.size()));
+    MutablePointsReaderUtils.sort(maxDoc, packedIndexBytesLength, reader, 0, Math.toIntExact(reader.size()));
 
     final OneDimensionBKDWriter oneDimWriter = new OneDimensionBKDWriter(out);
 
@@ -418,8 +430,8 @@ final class SimpleTextBKDWriter implements Closeable {
     int leafCount;
 
     OneDimensionBKDWriter(IndexOutput out) {
-      if (numDims != 1) {
-        throw new UnsupportedOperationException("numDims must be 1 but got " + numDims);
+      if (numIndexDims != 1) {
+        throw new UnsupportedOperationException("numIndexDims must be 1 but got " + numIndexDims);
       }
       if (pointCount != 0) {
         throw new IllegalStateException("cannot mix add and merge");
@@ -497,9 +509,9 @@ final class SimpleTextBKDWriter implements Closeable {
     private void writeLeafBlock() throws IOException {
       assert leafCount != 0;
       if (valueCount == 0) {
-        System.arraycopy(leafValues, 0, minPackedValue, 0, packedBytesLength);
+        System.arraycopy(leafValues, 0, minPackedValue, 0, packedIndexBytesLength);
       }
-      System.arraycopy(leafValues, (leafCount - 1) * packedBytesLength, maxPackedValue, 0, packedBytesLength);
+      System.arraycopy(leafValues, (leafCount - 1) * packedBytesLength, maxPackedValue, 0, packedIndexBytesLength);
 
       valueCount += leafCount;
 
@@ -512,7 +524,7 @@ final class SimpleTextBKDWriter implements Closeable {
 
       Arrays.fill(commonPrefixLengths, bytesPerDim);
       // Find per-dim common prefix:
-      for(int dim=0;dim<numDims;dim++) {
+      for(int dim=0;dim<numDataDims;dim++) {
         int offset1 = dim * bytesPerDim;
         int offset2 = (leafCount - 1) * packedBytesLength + offset1;
         for(int j=0;j<commonPrefixLengths[dim];j++) {
@@ -658,7 +670,7 @@ final class SimpleTextBKDWriter implements Closeable {
   }
 
   private PointWriter sort(int dim) throws IOException {
-    assert dim >= 0 && dim < numDims;
+    assert dim >= 0 && dim < numDataDims;
 
     if (heapPointWriter != null) {
 
@@ -691,7 +703,7 @@ final class SimpleTextBKDWriter implements Closeable {
       final int offset = bytesPerDim * dim;
 
       Comparator<BytesRef> cmp;
-      if (dim == numDims - 1) {
+      if (dim == numDataDims - 1) {
         // in that case the bytes for the dimension and for the doc id are contiguous,
         // so we don't need a branch
         cmp = new BytesRefComparator(bytesPerDim + Integer.BYTES) {
@@ -776,7 +788,7 @@ final class SimpleTextBKDWriter implements Closeable {
     }
 
     LongBitSet ordBitSet;
-    if (numDims > 1) {
+    if (numDataDims > 1) {
       if (singleValuePerDoc) {
         ordBitSet = new LongBitSet(maxDoc);
       } else {
@@ -811,7 +823,7 @@ final class SimpleTextBKDWriter implements Closeable {
     assert pointCount / numLeaves <= maxPointsInLeafNode: "pointCount=" + pointCount + " numLeaves=" + numLeaves + " maxPointsInLeafNode=" + maxPointsInLeafNode;
 
     // Sort all docs once by each dimension:
-    PathSlice[] sortedPointWriters = new PathSlice[numDims];
+    PathSlice[] sortedPointWriters = new PathSlice[numDataDims];
 
     // This is only used on exception; on normal code paths we close all files we opened:
     List<Closeable> toCloseHeroically = new ArrayList<>();
@@ -819,7 +831,7 @@ final class SimpleTextBKDWriter implements Closeable {
     boolean success = false;
     try {
       //long t0 = System.nanoTime();
-      for(int dim=0;dim<numDims;dim++) {
+      for(int dim=0;dim<numDataDims;dim++) {
         sortedPointWriters[dim] = new PathSlice(sort(dim), 0, pointCount);
       }
       //long t1 = System.nanoTime();
@@ -867,8 +879,12 @@ final class SimpleTextBKDWriter implements Closeable {
 
   /** Subclass can change how it writes the index. */
   private void writeIndex(IndexOutput out, long[] leafBlockFPs, byte[] splitPackedValues) throws IOException {
-    write(out, NUM_DIMS);
-    writeInt(out, numDims);
+    write(out, NUM_DATA_DIMS);
+    writeInt(out, numDataDims);
+    newline(out);
+
+    write(out, NUM_INDEX_DIMS);
+    writeInt(out, numIndexDims);
     newline(out);
 
     write(out, BYTES_PER_DIM);
@@ -952,7 +968,7 @@ final class SimpleTextBKDWriter implements Closeable {
       BytesRef ref = packedValues.apply(i);
       assert ref.length == packedBytesLength;
 
-      for(int dim=0;dim<numDims;dim++) {
+      for(int dim=0;dim<numDataDims;dim++) {
         int prefix = commonPrefixLengths[dim];
         out.writeBytes(ref.bytes, ref.offset + dim*bytesPerDim + prefix, bytesPerDim-prefix);
       }
@@ -1036,7 +1052,7 @@ final class SimpleTextBKDWriter implements Closeable {
       boolean result = reader.next();
       assert result;
       System.arraycopy(reader.packedValue(), splitDim*bytesPerDim, scratch1, 0, bytesPerDim);
-      if (numDims > 1) {
+      if (numDataDims > 1) {
         assert ordBitSet.get(reader.ord()) == false;
         ordBitSet.set(reader.ord());
         // Subtract 1 from rightCount because we already did the first value above (so we could record the split value):
@@ -1051,7 +1067,7 @@ final class SimpleTextBKDWriter implements Closeable {
 
   /** Called only in assert */
   private boolean valueInBounds(BytesRef packedValue, byte[] minPackedValue, byte[] maxPackedValue) {
-    for(int dim=0;dim<numDims;dim++) {
+    for(int dim=0;dim<numIndexDims;dim++) {
       int offset = bytesPerDim*dim;
       if (FutureArrays.compareUnsigned(packedValue.bytes, packedValue.offset + offset, packedValue.offset + offset + bytesPerDim, minPackedValue, offset, offset + bytesPerDim) < 0) {
         return false;
@@ -1067,7 +1083,7 @@ final class SimpleTextBKDWriter implements Closeable {
   protected int split(byte[] minPackedValue, byte[] maxPackedValue) {
     // Find which dim has the largest span so we can split on it:
     int splitDim = -1;
-    for(int dim=0;dim<numDims;dim++) {
+    for(int dim=0;dim<numIndexDims;dim++) {
       NumericUtils.subtract(bytesPerDim, dim, maxPackedValue, minPackedValue, scratchDiff);
       if (splitDim == -1 || FutureArrays.compareUnsigned(scratchDiff, 0, bytesPerDim, scratch1, 0, bytesPerDim) > 0) {
         System.arraycopy(scratchDiff, 0, scratch1, 0, bytesPerDim);
@@ -1115,7 +1131,7 @@ final class SimpleTextBKDWriter implements Closeable {
       reader.getValue(from, scratchBytesRef1);
       for (int i = from + 1; i < to; ++i) {
         reader.getValue(i, scratchBytesRef2);
-        for (int dim=0;dim<numDims;dim++) {
+        for (int dim=0;dim<numDataDims;dim++) {
           final int offset = dim * bytesPerDim;
           for(int j=0;j<commonPrefixLengths[dim];j++) {
             if (scratchBytesRef1.bytes[scratchBytesRef1.offset+offset+j] != scratchBytesRef2.bytes[scratchBytesRef2.offset+offset+j]) {
@@ -1127,14 +1143,14 @@ final class SimpleTextBKDWriter implements Closeable {
       }
 
       // Find the dimension that has the least number of unique bytes at commonPrefixLengths[dim]
-      FixedBitSet[] usedBytes = new FixedBitSet[numDims];
-      for (int dim = 0; dim < numDims; ++dim) {
+      FixedBitSet[] usedBytes = new FixedBitSet[numDataDims];
+      for (int dim = 0; dim < numDataDims; ++dim) {
         if (commonPrefixLengths[dim] < bytesPerDim) {
           usedBytes[dim] = new FixedBitSet(256);
         }
       }
       for (int i = from + 1; i < to; ++i) {
-        for (int dim=0;dim<numDims;dim++) {
+        for (int dim=0;dim<numDataDims;dim++) {
           if (usedBytes[dim] != null) {
             byte b = reader.getByteAt(i, dim * bytesPerDim + commonPrefixLengths[dim]);
             usedBytes[dim].set(Byte.toUnsignedInt(b));
@@ -1143,7 +1159,7 @@ final class SimpleTextBKDWriter implements Closeable {
       }
       int sortedDim = 0;
       int sortedDimCardinality = Integer.MAX_VALUE;
-      for (int dim = 0; dim < numDims; ++dim) {
+      for (int dim = 0; dim < numDataDims; ++dim) {
         if (usedBytes[dim] != null) {
           final int cardinality = usedBytes[dim].cardinality();
           if (cardinality < sortedDimCardinality) {
@@ -1206,8 +1222,8 @@ final class SimpleTextBKDWriter implements Closeable {
       reader.getValue(mid, scratchBytesRef1);
       System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + splitDim * bytesPerDim, splitPackedValues, address + 1, bytesPerDim);
 
-      byte[] minSplitPackedValue = ArrayUtil.copyOfSubArray(minPackedValue, 0, packedBytesLength);
-      byte[] maxSplitPackedValue = ArrayUtil.copyOfSubArray(maxPackedValue, 0, packedBytesLength);
+      byte[] minSplitPackedValue = ArrayUtil.copyOfSubArray(minPackedValue, 0, packedIndexBytesLength);
+      byte[] maxSplitPackedValue = ArrayUtil.copyOfSubArray(maxPackedValue, 0, packedIndexBytesLength);
       System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + splitDim * bytesPerDim,
           minSplitPackedValue, splitDim * bytesPerDim, bytesPerDim);
       System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + splitDim * bytesPerDim,
@@ -1235,7 +1251,7 @@ final class SimpleTextBKDWriter implements Closeable {
       assert slice.count == slices[0].count;
     }
 
-    if (numDims == 1 && slices[0].writer instanceof OfflinePointWriter && slices[0].count <= maxPointsSortInHeap) {
+    if (numDataDims == 1 && slices[0].writer instanceof OfflinePointWriter && slices[0].count <= maxPointsSortInHeap) {
       // Special case for 1D, to cutover to heap once we recurse deeply enough:
       slices[0] = switchToHeap(slices[0], toCloseHeroically);
     }
@@ -1248,7 +1264,7 @@ final class SimpleTextBKDWriter implements Closeable {
       int sortedDim = 0;
       int sortedDimCardinality = Integer.MAX_VALUE;
 
-      for (int dim=0;dim<numDims;dim++) {
+      for (int dim=0;dim<numDataDims;dim++) {
         if (slices[dim].writer instanceof HeapPointWriter == false) {
           // Adversarial cases can cause this, e.g. very lopsided data, all equal points, such that we started
           // offline, but then kept splitting only in one dimension, and so never had to rewrite into heap writer
@@ -1333,7 +1349,7 @@ final class SimpleTextBKDWriter implements Closeable {
       // Inner node: partition/recurse
 
       int splitDim;
-      if (numDims > 1) {
+      if (numIndexDims > 1) {
         splitDim = split(minPackedValue, maxPackedValue);
       } else {
         splitDim = 0;
@@ -1354,24 +1370,24 @@ final class SimpleTextBKDWriter implements Closeable {
 
       // Partition all PathSlice that are not the split dim into sorted left and right sets, so we can recurse:
 
-      PathSlice[] leftSlices = new PathSlice[numDims];
-      PathSlice[] rightSlices = new PathSlice[numDims];
+      PathSlice[] leftSlices = new PathSlice[numDataDims];
+      PathSlice[] rightSlices = new PathSlice[numDataDims];
 
-      byte[] minSplitPackedValue = new byte[packedBytesLength];
-      System.arraycopy(minPackedValue, 0, minSplitPackedValue, 0, packedBytesLength);
+      byte[] minSplitPackedValue = new byte[packedIndexBytesLength];
+      System.arraycopy(minPackedValue, 0, minSplitPackedValue, 0, packedIndexBytesLength);
 
-      byte[] maxSplitPackedValue = new byte[packedBytesLength];
-      System.arraycopy(maxPackedValue, 0, maxSplitPackedValue, 0, packedBytesLength);
+      byte[] maxSplitPackedValue = new byte[packedIndexBytesLength];
+      System.arraycopy(maxPackedValue, 0, maxSplitPackedValue, 0, packedIndexBytesLength);
 
       // When we are on this dim, below, we clear the ordBitSet:
       int dimToClear;
-      if (numDims - 1 == splitDim) {
-        dimToClear = numDims - 2;
+      if (numDataDims - 1 == splitDim) {
+        dimToClear = numDataDims - 2;
       } else {
-        dimToClear = numDims - 1;
+        dimToClear = numDataDims - 1;
       }
 
-      for(int dim=0;dim<numDims;dim++) {
+      for(int dim=0;dim<numDataDims;dim++) {
 
         if (dim == splitDim) {
           // No need to partition on this dim since it's a simple slice of the incoming already sorted slice, and we
@@ -1392,7 +1408,7 @@ final class SimpleTextBKDWriter implements Closeable {
 
           long nextRightCount = reader.split(source.count, ordBitSet, leftPointWriter, rightPointWriter, dim == dimToClear);
           if (rightCount != nextRightCount) {
-            throw new IllegalStateException("wrong number of points in split: expected=" + rightCount + " but actual=" + nextRightCount);
+            throw new IllegalStateException("wrong number of points in split: expected=" + rightCount + " but actual=" + nextRightCount + " in dim " + dim);
           }
 
           leftSlices[dim] = new PathSlice(leftPointWriter, 0, leftCount);
@@ -1407,7 +1423,7 @@ final class SimpleTextBKDWriter implements Closeable {
             ordBitSet, out,
             minPackedValue, maxSplitPackedValue,
             splitPackedValues, leafBlockFPs, toCloseHeroically);
-      for(int dim=0;dim<numDims;dim++) {
+      for(int dim=0;dim<numDataDims;dim++) {
         // Don't destroy the dim we split on because we just re-used what our caller above gave us for that dim:
         if (dim != splitDim) {
           leftSlices[dim].writer.destroy();
@@ -1420,7 +1436,7 @@ final class SimpleTextBKDWriter implements Closeable {
             ordBitSet, out,
             minSplitPackedValue, maxPackedValue,
             splitPackedValues, leafBlockFPs, toCloseHeroically);
-      for(int dim=0;dim<numDims;dim++) {
+      for(int dim=0;dim<numDataDims;dim++) {
         // Don't destroy the dim we split on because we just re-used what our caller above gave us for that dim:
         if (dim != splitDim) {
           rightSlices[dim].writer.destroy();
@@ -1454,10 +1470,10 @@ final class SimpleTextBKDWriter implements Closeable {
     if (ord > 0) {
       int cmp = FutureArrays.compareUnsigned(lastPackedValue, dimOffset, dimOffset + bytesPerDim, packedValue, packedValueOffset + dimOffset, packedValueOffset + dimOffset + bytesPerDim);
       if (cmp > 0) {
-        throw new AssertionError("values out of order: last value=" + new BytesRef(lastPackedValue) + " current value=" + new BytesRef(packedValue, packedValueOffset, packedBytesLength) + " ord=" + ord);
+        throw new AssertionError("values out of order: last value=" + new BytesRef(lastPackedValue) + " current value=" + new BytesRef(packedValue, packedValueOffset, packedBytesLength) + " ord=" + ord + " sortedDim=" + sortedDim);
       }
       if (cmp == 0 && doc < lastDoc) {
-        throw new AssertionError("docs out of order: last doc=" + lastDoc + " current doc=" + doc + " ord=" + ord);
+        throw new AssertionError("docs out of order: last doc=" + lastDoc + " current doc=" + doc + " ord=" + ord + " sortedDim=" + sortedDim);
       }
     }
     System.arraycopy(packedValue, packedValueOffset, lastPackedValue, 0, packedBytesLength);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldInfosFormat.java
----------------------------------------------------------------------
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldInfosFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldInfosFormat.java
index 1c40cbd..f52a84a 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldInfosFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldInfosFormat.java
@@ -64,7 +64,8 @@ public class SimpleTextFieldInfosFormat extends FieldInfosFormat {
   static final BytesRef NUM_ATTS        =  new BytesRef("  attributes ");
   static final BytesRef ATT_KEY         =  new BytesRef("    key ");
   static final BytesRef ATT_VALUE       =  new BytesRef("    value ");
-  static final BytesRef DIM_COUNT       =  new BytesRef("  dimensional count ");
+  static final BytesRef DATA_DIM_COUNT  =  new BytesRef("  data dimensional count ");
+  static final BytesRef INDEX_DIM_COUNT =  new BytesRef("  index dimensional count ");
   static final BytesRef DIM_NUM_BYTES   =  new BytesRef("  dimensional num bytes ");
   static final BytesRef SOFT_DELETES    =  new BytesRef("  soft-deletes ");
   
@@ -134,8 +135,12 @@ public class SimpleTextFieldInfosFormat extends FieldInfosFormat {
         }
 
         SimpleTextUtil.readLine(input, scratch);
-        assert StringHelper.startsWith(scratch.get(), DIM_COUNT);
-        int dimensionalCount = Integer.parseInt(readString(DIM_COUNT.length, scratch));
+        assert StringHelper.startsWith(scratch.get(), DATA_DIM_COUNT);
+        int dataDimensionalCount = Integer.parseInt(readString(DATA_DIM_COUNT.length, scratch));
+
+        SimpleTextUtil.readLine(input, scratch);
+        assert StringHelper.startsWith(scratch.get(), INDEX_DIM_COUNT);
+        int indexDimensionalCount = Integer.parseInt(readString(INDEX_DIM_COUNT.length, scratch));
 
         SimpleTextUtil.readLine(input, scratch);
         assert StringHelper.startsWith(scratch.get(), DIM_NUM_BYTES);
@@ -147,7 +152,7 @@ public class SimpleTextFieldInfosFormat extends FieldInfosFormat {
 
         infos[i] = new FieldInfo(name, fieldNumber, storeTermVector, 
                                  omitNorms, storePayloads, indexOptions, docValuesType, dvGen, Collections.unmodifiableMap(atts),
-                                 dimensionalCount, dimensionalNumBytes, isSoftDeletesField);
+                                 dataDimensionalCount, indexDimensionalCount, dimensionalNumBytes, isSoftDeletesField);
       }
 
       SimpleTextUtil.checkFooter(input);
@@ -236,8 +241,12 @@ public class SimpleTextFieldInfosFormat extends FieldInfosFormat {
           }
         }
 
-        SimpleTextUtil.write(out, DIM_COUNT);
-        SimpleTextUtil.write(out, Integer.toString(fi.getPointDimensionCount()), scratch);
+        SimpleTextUtil.write(out, DATA_DIM_COUNT);
+        SimpleTextUtil.write(out, Integer.toString(fi.getPointDataDimensionCount()), scratch);
+        SimpleTextUtil.writeNewline(out);
+
+        SimpleTextUtil.write(out, INDEX_DIM_COUNT);
+        SimpleTextUtil.write(out, Integer.toString(fi.getPointIndexDimensionCount()), scratch);
         SimpleTextUtil.writeNewline(out);
         
         SimpleTextUtil.write(out, DIM_NUM_BYTES);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsReader.java
----------------------------------------------------------------------
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsReader.java
index 453bd23..728aad1 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsReader.java
@@ -47,7 +47,8 @@ import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.INDEX_C
 import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.MAX_LEAF_POINTS;
 import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.MAX_VALUE;
 import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.MIN_VALUE;
-import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.NUM_DIMS;
+import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.NUM_DATA_DIMS;
+import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.NUM_INDEX_DIMS;
 import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.POINT_COUNT;
 import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.SPLIT_COUNT;
 import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.SPLIT_DIM;
@@ -101,7 +102,10 @@ class SimpleTextPointsReader extends PointsReader {
     // NOTE: matches what writeIndex does in SimpleTextPointsWriter
     dataIn.seek(fp);
     readLine(dataIn);
-    int numDims = parseInt(NUM_DIMS);
+    int numDataDims = parseInt(NUM_DATA_DIMS);
+
+    readLine(dataIn);
+    int numIndexDims = parseInt(NUM_INDEX_DIMS);
 
     readLine(dataIn);
     int bytesPerDim = parseInt(BYTES_PER_DIM);
@@ -115,12 +119,12 @@ class SimpleTextPointsReader extends PointsReader {
     readLine(dataIn);
     assert startsWith(MIN_VALUE);
     BytesRef minValue = SimpleTextUtil.fromBytesRefString(stripPrefix(MIN_VALUE));
-    assert minValue.length == numDims*bytesPerDim;
+    assert minValue.length == numIndexDims*bytesPerDim;
 
     readLine(dataIn);
     assert startsWith(MAX_VALUE);
     BytesRef maxValue = SimpleTextUtil.fromBytesRefString(stripPrefix(MAX_VALUE));
-    assert maxValue.length == numDims*bytesPerDim;
+    assert maxValue.length == numIndexDims*bytesPerDim;
 
     readLine(dataIn);
     assert startsWith(POINT_COUNT);
@@ -140,7 +144,7 @@ class SimpleTextPointsReader extends PointsReader {
 
     byte[] splitPackedValues;
     int bytesPerIndexEntry;
-    if (numDims == 1) {
+    if (numIndexDims == 1) {
       bytesPerIndexEntry = bytesPerDim;
     } else {
       bytesPerIndexEntry = 1 + bytesPerDim;
@@ -150,7 +154,7 @@ class SimpleTextPointsReader extends PointsReader {
       readLine(dataIn);
       int address = bytesPerIndexEntry * i;
       int splitDim = parseInt(SPLIT_DIM);
-      if (numDims != 1) {
+      if (numIndexDims != 1) {
         splitPackedValues[address++] = (byte) splitDim;
       }
       readLine(dataIn);
@@ -160,7 +164,7 @@ class SimpleTextPointsReader extends PointsReader {
       System.arraycopy(br.bytes, br.offset, splitPackedValues, address, bytesPerDim);
     }
 
-    return new SimpleTextBKDReader(dataIn, numDims, maxPointsInLeafNode, bytesPerDim, leafBlockFPs, splitPackedValues, minValue.bytes, maxValue.bytes, pointCount, docCount);
+    return new SimpleTextBKDReader(dataIn, numDataDims, numIndexDims, maxPointsInLeafNode, bytesPerDim, leafBlockFPs, splitPackedValues, minValue.bytes, maxValue.bytes, pointCount, docCount);
   }
 
   private void readLine(IndexInput in) throws IOException {
@@ -191,7 +195,7 @@ class SimpleTextPointsReader extends PointsReader {
     if (fieldInfo == null) {
       throw new IllegalArgumentException("field=\"" + fieldName + "\" is unrecognized");
     }
-    if (fieldInfo.getPointDimensionCount() == 0) {
+    if (fieldInfo.getPointDataDimensionCount() == 0) {
       throw new IllegalArgumentException("field=\"" + fieldName + "\" did not index points");
     }
     return readers.get(fieldName);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsWriter.java
----------------------------------------------------------------------
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsWriter.java
index c3217f3..2da74d6 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsWriter.java
@@ -35,7 +35,8 @@ import org.apache.lucene.util.BytesRefBuilder;
 
 class SimpleTextPointsWriter extends PointsWriter {
 
-  public final static BytesRef NUM_DIMS      = new BytesRef("num dims ");
+  public final static BytesRef NUM_DATA_DIMS  = new BytesRef("num data dims ");
+  public final static BytesRef NUM_INDEX_DIMS = new BytesRef("num index dims ");
   public final static BytesRef BYTES_PER_DIM = new BytesRef("bytes per dim ");
   public final static BytesRef MAX_LEAF_POINTS = new BytesRef("max leaf points ");
   public final static BytesRef INDEX_COUNT = new BytesRef("index count ");
@@ -76,7 +77,8 @@ class SimpleTextPointsWriter extends PointsWriter {
     try (SimpleTextBKDWriter writer = new SimpleTextBKDWriter(writeState.segmentInfo.maxDoc(),
                                                               writeState.directory,
                                                               writeState.segmentInfo.name,
-                                                              fieldInfo.getPointDimensionCount(),
+                                                              fieldInfo.getPointDataDimensionCount(),
+                                                              fieldInfo.getPointIndexDimensionCount(),
                                                               fieldInfo.getPointNumBytes(),
                                                               SimpleTextBKDWriter.DEFAULT_MAX_POINTS_IN_LEAF_NODE,
                                                               SimpleTextBKDWriter.DEFAULT_MAX_MB_SORT_IN_HEAP,

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/codecs/PointsWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/PointsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/PointsWriter.java
index d9a0b30..dd1eaeb 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/PointsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/PointsWriter.java
@@ -48,7 +48,7 @@ public abstract class PointsWriter implements Closeable {
       PointsReader pointsReader = mergeState.pointsReaders[i];
       if (pointsReader != null) {
         FieldInfo readerFieldInfo = mergeState.fieldInfos[i].fieldInfo(fieldInfo.name);
-        if (readerFieldInfo != null && readerFieldInfo.getPointDimensionCount() > 0) {
+        if (readerFieldInfo != null && readerFieldInfo.getPointDataDimensionCount() > 0) {
           PointValues values = pointsReader.getValues(fieldInfo.name);
           if (values != null) {
             maxPointCount += values.size();
@@ -92,7 +92,7 @@ public abstract class PointsWriter implements Closeable {
                     continue;
                   }
 
-                  if (readerFieldInfo.getPointDimensionCount() == 0) {
+                  if (readerFieldInfo.getPointDataDimensionCount() == 0) {
                     // This segment saw this field, but the field did not index points in it:
                     continue;
                   }
@@ -143,7 +143,12 @@ public abstract class PointsWriter implements Closeable {
               }
 
               @Override
-              public int getNumDimensions() {
+              public int getNumDataDimensions() {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public int getNumIndexDimensions() {
                 throw new UnsupportedOperationException();
               }
 
@@ -182,7 +187,7 @@ public abstract class PointsWriter implements Closeable {
     }
     // merge field at a time
     for (FieldInfo fieldInfo : mergeState.mergeFieldInfos) {
-      if (fieldInfo.getPointDimensionCount() != 0) {
+      if (fieldInfo.getPointDataDimensionCount() != 0) {
         mergeOneField(mergeState, fieldInfo);
       }
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50FieldInfosFormat.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50FieldInfosFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50FieldInfosFormat.java
index 30dca70..0ad0cad 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50FieldInfosFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50FieldInfosFormat.java
@@ -148,7 +148,7 @@ public final class Lucene50FieldInfosFormat extends FieldInfosFormat {
           lastAttributes = attributes;
           try {
             infos[i] = new FieldInfo(name, fieldNumber, storeTermVector, omitNorms, storePayloads, 
-                                     indexOptions, docValuesType, dvGen, attributes, 0, 0, false);
+                                     indexOptions, docValuesType, dvGen, attributes, 0, 0, 0, false);
             infos[i].checkConsistency();
           } catch (IllegalStateException e) {
             throw new CorruptIndexException("invalid fieldinfo for field: " + name + ", fieldNumber=" + fieldNumber, input, e);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60FieldInfosFormat.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60FieldInfosFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60FieldInfosFormat.java
index 522a73f..e2ca9ee 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60FieldInfosFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60FieldInfosFormat.java
@@ -114,7 +114,7 @@ public final class Lucene60FieldInfosFormat extends FieldInfosFormat {
       Throwable priorE = null;
       FieldInfo infos[] = null;
       try {
-        CodecUtil.checkIndexHeader(input,
+        int version = CodecUtil.checkIndexHeader(input,
                                    Lucene60FieldInfosFormat.CODEC_NAME, 
                                    Lucene60FieldInfosFormat.FORMAT_START, 
                                    Lucene60FieldInfosFormat.FORMAT_CURRENT,
@@ -149,9 +149,13 @@ public final class Lucene60FieldInfosFormat extends FieldInfosFormat {
             attributes = lastAttributes;
           }
           lastAttributes = attributes;
-          int pointDimensionCount = input.readVInt();
+          int pointDataDimensionCount = input.readVInt();
           int pointNumBytes;
-          if (pointDimensionCount != 0) {
+          int pointIndexDimensionCount = pointDataDimensionCount;
+          if (pointDataDimensionCount != 0) {
+            if (version >= Lucene60FieldInfosFormat.FORMAT_SELECTIVE_INDEXING) {
+              pointIndexDimensionCount = input.readVInt();
+            }
             pointNumBytes = input.readVInt();
           } else {
             pointNumBytes = 0;
@@ -160,7 +164,7 @@ public final class Lucene60FieldInfosFormat extends FieldInfosFormat {
           try {
             infos[i] = new FieldInfo(name, fieldNumber, storeTermVector, omitNorms, storePayloads, 
                                      indexOptions, docValuesType, dvGen, attributes,
-                                     pointDimensionCount, pointNumBytes, isSoftDeletesField);
+                                     pointDataDimensionCount, pointIndexDimensionCount, pointNumBytes, isSoftDeletesField);
             infos[i].checkConsistency();
           } catch (IllegalStateException e) {
             throw new CorruptIndexException("invalid fieldinfo for field: " + name + ", fieldNumber=" + fieldNumber, input, e);
@@ -287,9 +291,9 @@ public final class Lucene60FieldInfosFormat extends FieldInfosFormat {
         output.writeByte(docValuesByte(fi.getDocValuesType()));
         output.writeLong(fi.getDocValuesGen());
         output.writeMapOfStrings(fi.attributes());
-        int pointDimensionCount = fi.getPointDimensionCount();
-        output.writeVInt(pointDimensionCount);
-        if (pointDimensionCount != 0) {
+        output.writeVInt(fi.getPointDataDimensionCount());
+        if (fi.getPointDataDimensionCount() != 0) {
+          output.writeVInt(fi.getPointIndexDimensionCount());
           output.writeVInt(fi.getPointNumBytes());
         }
       }
@@ -304,7 +308,8 @@ public final class Lucene60FieldInfosFormat extends FieldInfosFormat {
   static final String CODEC_NAME = "Lucene60FieldInfos";
   static final int FORMAT_START = 0;
   static final int FORMAT_SOFT_DELETES = 1;
-  static final int FORMAT_CURRENT = FORMAT_SOFT_DELETES;
+  static final int FORMAT_SELECTIVE_INDEXING = 2;
+  static final int FORMAT_CURRENT = FORMAT_SELECTIVE_INDEXING;
   
   // Field flags
   static final byte STORE_TERMVECTOR = 0x1;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60PointsReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60PointsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60PointsReader.java
index 0eecdbb..b5ff680 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60PointsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60PointsReader.java
@@ -124,7 +124,7 @@ public class Lucene60PointsReader extends PointsReader implements Closeable {
     if (fieldInfo == null) {
       throw new IllegalArgumentException("field=\"" + fieldName + "\" is unrecognized");
     }
-    if (fieldInfo.getPointDimensionCount() == 0) {
+    if (fieldInfo.getPointDataDimensionCount() == 0) {
       throw new IllegalArgumentException("field=\"" + fieldName + "\" did not index point values");
     }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60PointsWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60PointsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60PointsWriter.java
index 4f51e26..fddf08c 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60PointsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60PointsWriter.java
@@ -94,7 +94,8 @@ public class Lucene60PointsWriter extends PointsWriter implements Closeable {
     try (BKDWriter writer = new BKDWriter(writeState.segmentInfo.maxDoc(),
                                           writeState.directory,
                                           writeState.segmentInfo.name,
-                                          fieldInfo.getPointDimensionCount(),
+                                          fieldInfo.getPointDataDimensionCount(),
+                                          fieldInfo.getPointIndexDimensionCount(),
                                           fieldInfo.getPointNumBytes(),
                                           maxPointsInLeafNode,
                                           maxMBSortInHeap,
@@ -152,8 +153,8 @@ public class Lucene60PointsWriter extends PointsWriter implements Closeable {
     }
 
     for (FieldInfo fieldInfo : mergeState.mergeFieldInfos) {
-      if (fieldInfo.getPointDimensionCount() != 0) {
-        if (fieldInfo.getPointDimensionCount() == 1) {
+      if (fieldInfo.getPointDataDimensionCount() != 0) {
+        if (fieldInfo.getPointDataDimensionCount() == 1) {
 
           boolean singleValuePerDoc = true;
 
@@ -164,7 +165,7 @@ public class Lucene60PointsWriter extends PointsWriter implements Closeable {
             if (reader != null) {
               FieldInfos readerFieldInfos = mergeState.fieldInfos[i];
               FieldInfo readerFieldInfo = readerFieldInfos.fieldInfo(fieldInfo.name);
-              if (readerFieldInfo != null && readerFieldInfo.getPointDimensionCount() > 0) {
+              if (readerFieldInfo != null && readerFieldInfo.getPointDataDimensionCount() > 0) {
                 PointValues values = reader.getValues(fieldInfo.name);
                 if (values != null) {
                   totMaxSize += values.size();
@@ -181,7 +182,8 @@ public class Lucene60PointsWriter extends PointsWriter implements Closeable {
           try (BKDWriter writer = new BKDWriter(writeState.segmentInfo.maxDoc(),
                                                 writeState.directory,
                                                 writeState.segmentInfo.name,
-                                                fieldInfo.getPointDimensionCount(),
+                                                fieldInfo.getPointDataDimensionCount(),
+                                                fieldInfo.getPointIndexDimensionCount(),
                                                 fieldInfo.getPointNumBytes(),
                                                 maxPointsInLeafNode,
                                                 maxMBSortInHeap,
@@ -204,7 +206,7 @@ public class Lucene60PointsWriter extends PointsWriter implements Closeable {
 
                 FieldInfos readerFieldInfos = mergeState.fieldInfos[i];
                 FieldInfo readerFieldInfo = readerFieldInfos.fieldInfo(fieldInfo.name);
-                if (readerFieldInfo != null && readerFieldInfo.getPointDimensionCount() > 0) {
+                if (readerFieldInfo != null && readerFieldInfo.getPointDataDimensionCount() > 0) {
                   BKDReader bkdReader = reader60.readers.get(readerFieldInfo.number);
                   if (bkdReader != null) {
                     bkdReaders.add(bkdReader);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldMergeState.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldMergeState.java b/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldMergeState.java
index 991eedf..291a384 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldMergeState.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldMergeState.java
@@ -134,7 +134,7 @@ final class PerFieldMergeState {
           hasNorms |= fi.hasNorms();
           hasDocValues |= fi.getDocValuesType() != DocValuesType.NONE;
           hasPayloads |= fi.hasPayloads();
-          hasPointValues |= (fi.getPointDimensionCount() != 0);
+          hasPointValues |= (fi.getPointDataDimensionCount() != 0);
         }
       }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/document/BinaryPoint.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/BinaryPoint.java b/lucene/core/src/java/org/apache/lucene/document/BinaryPoint.java
index 693a00f..f4b625e 100644
--- a/lucene/core/src/java/org/apache/lucene/document/BinaryPoint.java
+++ b/lucene/core/src/java/org/apache/lucene/document/BinaryPoint.java
@@ -124,8 +124,8 @@ public final class BinaryPoint extends Field {
   /** Expert API */
   public BinaryPoint(String name, byte[] packedPoint, IndexableFieldType type) {
     super(name, packedPoint, type);
-    if (packedPoint.length != type.pointDimensionCount() * type.pointNumBytes()) {
-      throw new IllegalArgumentException("packedPoint is length=" + packedPoint.length + " but type.pointDimensionCount()=" + type.pointDimensionCount() + " and type.pointNumBytes()=" + type.pointNumBytes());
+    if (packedPoint.length != type.pointDataDimensionCount() * type.pointNumBytes()) {
+      throw new IllegalArgumentException("packedPoint is length=" + packedPoint.length + " but type.pointDimensionCount()=" + type.pointDataDimensionCount() + " and type.pointNumBytes()=" + type.pointNumBytes());
     }
   }
   

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/document/DoublePoint.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/DoublePoint.java b/lucene/core/src/java/org/apache/lucene/document/DoublePoint.java
index 6547402..e442bec 100644
--- a/lucene/core/src/java/org/apache/lucene/document/DoublePoint.java
+++ b/lucene/core/src/java/org/apache/lucene/document/DoublePoint.java
@@ -85,8 +85,8 @@ public final class DoublePoint extends Field {
 
   /** Change the values of this field */
   public void setDoubleValues(double... point) {
-    if (type.pointDimensionCount() != point.length) {
-      throw new IllegalArgumentException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot change to (incoming) " + point.length + " dimensions");
+    if (type.pointDataDimensionCount() != point.length) {
+      throw new IllegalArgumentException("this field (name=" + name + ") uses " + type.pointDataDimensionCount() + " dimensions; cannot change to (incoming) " + point.length + " dimensions");
     }
     fieldsData = pack(point);
   }
@@ -98,8 +98,8 @@ public final class DoublePoint extends Field {
 
   @Override
   public Number numericValue() {
-    if (type.pointDimensionCount() != 1) {
-      throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot convert to a single numeric value");
+    if (type.pointDataDimensionCount() != 1) {
+      throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDataDimensionCount() + " dimensions; cannot convert to a single numeric value");
     }
     BytesRef bytes = (BytesRef) fieldsData;
     assert bytes.length == Double.BYTES;
@@ -142,7 +142,7 @@ public final class DoublePoint extends Field {
     result.append(':');
 
     BytesRef bytes = (BytesRef) fieldsData;
-    for (int dim = 0; dim < type.pointDimensionCount(); dim++) {
+    for (int dim = 0; dim < type.pointDataDimensionCount(); dim++) {
       if (dim > 0) {
         result.append(',');
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/document/DoubleRange.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/DoubleRange.java b/lucene/core/src/java/org/apache/lucene/document/DoubleRange.java
index cf308c3..726d0a1 100644
--- a/lucene/core/src/java/org/apache/lucene/document/DoubleRange.java
+++ b/lucene/core/src/java/org/apache/lucene/document/DoubleRange.java
@@ -77,8 +77,8 @@ public class DoubleRange extends Field {
    */
   public void setRangeValues(double[] min, double[] max) {
     checkArgs(min, max);
-    if (min.length*2 != type.pointDimensionCount() || max.length*2 != type.pointDimensionCount()) {
-      throw new IllegalArgumentException("field (name=" + name + ") uses " + type.pointDimensionCount()/2
+    if (min.length*2 != type.pointDataDimensionCount() || max.length*2 != type.pointDataDimensionCount()) {
+      throw new IllegalArgumentException("field (name=" + name + ") uses " + type.pointDataDimensionCount()/2
           + " dimensions; cannot change to (incoming) " + min.length + " dimensions");
     }
 
@@ -148,7 +148,7 @@ public class DoubleRange extends Field {
    * @return the decoded min value
    */
   public double getMin(int dimension) {
-    FutureObjects.checkIndex(dimension, type.pointDimensionCount()/2);
+    FutureObjects.checkIndex(dimension, type.pointDataDimensionCount()/2);
     return decodeMin(((BytesRef)fieldsData).bytes, dimension);
   }
 
@@ -158,7 +158,7 @@ public class DoubleRange extends Field {
    * @return the decoded max value
    */
   public double getMax(int dimension) {
-    FutureObjects.checkIndex(dimension, type.pointDimensionCount()/2);
+    FutureObjects.checkIndex(dimension, type.pointDataDimensionCount()/2);
     return decodeMax(((BytesRef)fieldsData).bytes, dimension);
   }
 
@@ -244,7 +244,7 @@ public class DoubleRange extends Field {
     sb.append(':');
     byte[] b = ((BytesRef)fieldsData).bytes;
     toString(b, 0);
-    for (int d = 0; d < type.pointDimensionCount() / 2; ++d) {
+    for (int d = 0; d < type.pointDataDimensionCount() / 2; ++d) {
       sb.append(' ');
       sb.append(toString(b, d));
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/document/FieldType.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/FieldType.java b/lucene/core/src/java/org/apache/lucene/document/FieldType.java
index 3c7d276..a21572e 100644
--- a/lucene/core/src/java/org/apache/lucene/document/FieldType.java
+++ b/lucene/core/src/java/org/apache/lucene/document/FieldType.java
@@ -38,7 +38,8 @@ public class FieldType implements IndexableFieldType  {
   private IndexOptions indexOptions = IndexOptions.NONE;
   private boolean frozen;
   private DocValuesType docValuesType = DocValuesType.NONE;
-  private int dimensionCount;
+  private int dataDimensionCount;
+  private int indexDimensionCount;
   private int dimensionNumBytes;
 
   /**
@@ -54,7 +55,8 @@ public class FieldType implements IndexableFieldType  {
     this.omitNorms = ref.omitNorms();
     this.indexOptions = ref.indexOptions();
     this.docValuesType = ref.docValuesType();
-    this.dimensionCount = ref.pointDimensionCount();
+    this.dataDimensionCount = ref.pointDataDimensionCount();
+    this.indexDimensionCount = ref.pointIndexDimensionCount();
     this.dimensionNumBytes = ref.pointNumBytes();
     // Do not copy frozen!
   }
@@ -279,11 +281,24 @@ public class FieldType implements IndexableFieldType  {
    * Enables points indexing.
    */
   public void setDimensions(int dimensionCount, int dimensionNumBytes) {
-    if (dimensionCount < 0) {
-      throw new IllegalArgumentException("dimensionCount must be >= 0; got " + dimensionCount);
+    this.setDimensions(dimensionCount, dimensionCount, dimensionNumBytes);
+  }
+
+  /**
+   * Enables points indexing with selectable dimension indexing.
+   */
+  public void setDimensions(int dataDimensionCount, int indexDimensionCount, int dimensionNumBytes) {
+    if (dataDimensionCount < 0) {
+      throw new IllegalArgumentException("dataDimensionCount must be >= 0; got " + dataDimensionCount);
+    }
+    if (dataDimensionCount > PointValues.MAX_DIMENSIONS) {
+      throw new IllegalArgumentException("dataDimensionCount must be <= " + PointValues.MAX_DIMENSIONS + "; got " + dataDimensionCount);
     }
-    if (dimensionCount > PointValues.MAX_DIMENSIONS) {
-      throw new IllegalArgumentException("dimensionCount must be <= " + PointValues.MAX_DIMENSIONS + "; got " + dimensionCount);
+    if (indexDimensionCount < 0) {
+      throw new IllegalArgumentException("indexDimensionCount must be >= 0; got " + indexDimensionCount);
+    }
+    if (indexDimensionCount > dataDimensionCount) {
+      throw new IllegalArgumentException("indexDimensionCount must be <= dataDimensionCount: " + dataDimensionCount + "; got " + indexDimensionCount);
     }
     if (dimensionNumBytes < 0) {
       throw new IllegalArgumentException("dimensionNumBytes must be >= 0; got " + dimensionNumBytes);
@@ -291,23 +306,34 @@ public class FieldType implements IndexableFieldType  {
     if (dimensionNumBytes > PointValues.MAX_NUM_BYTES) {
       throw new IllegalArgumentException("dimensionNumBytes must be <= " + PointValues.MAX_NUM_BYTES + "; got " + dimensionNumBytes);
     }
-    if (dimensionCount == 0) {
+    if (dataDimensionCount == 0) {
+      if (indexDimensionCount != 0) {
+        throw new IllegalArgumentException("when dataDimensionCount is 0, indexDimensionCount must be 0; got " + indexDimensionCount);
+      }
       if (dimensionNumBytes != 0) {
-        throw new IllegalArgumentException("when dimensionCount is 0, dimensionNumBytes must 0; got " + dimensionNumBytes);
+        throw new IllegalArgumentException("when dataDimensionCount is 0, dimensionNumBytes must be 0; got " + dimensionNumBytes);
       }
+    } else if (indexDimensionCount == 0) {
+      throw new IllegalArgumentException("when dataDimensionCount is > 0, indexDimensionCount must be > 0; got " + indexDimensionCount);
     } else if (dimensionNumBytes == 0) {
-      if (dimensionCount != 0) {
-        throw new IllegalArgumentException("when dimensionNumBytes is 0, dimensionCount must 0; got " + dimensionCount);
+      if (dataDimensionCount != 0) {
+        throw new IllegalArgumentException("when dimensionNumBytes is 0, dataDimensionCount must be 0; got " + dataDimensionCount);
       }
     }
 
-    this.dimensionCount = dimensionCount;
+    this.dataDimensionCount = dataDimensionCount;
+    this.indexDimensionCount = indexDimensionCount;
     this.dimensionNumBytes = dimensionNumBytes;
   }
 
   @Override
-  public int pointDimensionCount() {
-    return dimensionCount;
+  public int pointDataDimensionCount() {
+    return dataDimensionCount;
+  }
+
+  @Override
+  public int pointIndexDimensionCount() {
+    return indexDimensionCount;
   }
 
   @Override
@@ -349,12 +375,14 @@ public class FieldType implements IndexableFieldType  {
         result.append(indexOptions);
       }
     }
-    if (dimensionCount != 0) {
+    if (dataDimensionCount != 0) {
       if (result.length() > 0) {
         result.append(",");
       }
-      result.append("pointDimensionCount=");
-      result.append(dimensionCount);
+      result.append("pointDataDimensionCount=");
+      result.append(dataDimensionCount);
+      result.append(",pointIndexDimensionCount=");
+      result.append(indexDimensionCount);
       result.append(",pointNumBytes=");
       result.append(dimensionNumBytes);
     }
@@ -399,7 +427,8 @@ public class FieldType implements IndexableFieldType  {
   public int hashCode() {
     final int prime = 31;
     int result = 1;
-    result = prime * result + dimensionCount;
+    result = prime * result + dataDimensionCount;
+    result = prime * result + indexDimensionCount;
     result = prime * result + dimensionNumBytes;
     result = prime * result + ((docValuesType == null) ? 0 : docValuesType.hashCode());
     result = prime * result + indexOptions.hashCode();
@@ -419,7 +448,8 @@ public class FieldType implements IndexableFieldType  {
     if (obj == null) return false;
     if (getClass() != obj.getClass()) return false;
     FieldType other = (FieldType) obj;
-    if (dimensionCount != other.dimensionCount) return false;
+    if (dataDimensionCount != other.dataDimensionCount) return false;
+    if (indexDimensionCount != other.indexDimensionCount) return false;
     if (dimensionNumBytes != other.dimensionNumBytes) return false;
     if (docValuesType != other.docValuesType) return false;
     if (indexOptions != other.indexOptions) return false;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/document/FloatPoint.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/FloatPoint.java b/lucene/core/src/java/org/apache/lucene/document/FloatPoint.java
index 0ec67fd..b6d508f 100644
--- a/lucene/core/src/java/org/apache/lucene/document/FloatPoint.java
+++ b/lucene/core/src/java/org/apache/lucene/document/FloatPoint.java
@@ -85,8 +85,8 @@ public final class FloatPoint extends Field {
 
   /** Change the values of this field */
   public void setFloatValues(float... point) {
-    if (type.pointDimensionCount() != point.length) {
-      throw new IllegalArgumentException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot change to (incoming) " + point.length + " dimensions");
+    if (type.pointDataDimensionCount() != point.length) {
+      throw new IllegalArgumentException("this field (name=" + name + ") uses " + type.pointDataDimensionCount() + " dimensions; cannot change to (incoming) " + point.length + " dimensions");
     }
     fieldsData = pack(point);
   }
@@ -98,8 +98,8 @@ public final class FloatPoint extends Field {
 
   @Override
   public Number numericValue() {
-    if (type.pointDimensionCount() != 1) {
-      throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot convert to a single numeric value");
+    if (type.pointDataDimensionCount() != 1) {
+      throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDataDimensionCount() + " dimensions; cannot convert to a single numeric value");
     }
     BytesRef bytes = (BytesRef) fieldsData;
     assert bytes.length == Float.BYTES;
@@ -142,7 +142,7 @@ public final class FloatPoint extends Field {
     result.append(':');
 
     BytesRef bytes = (BytesRef) fieldsData;
-    for (int dim = 0; dim < type.pointDimensionCount(); dim++) {
+    for (int dim = 0; dim < type.pointDataDimensionCount(); dim++) {
       if (dim > 0) {
         result.append(',');
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/document/FloatRange.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/FloatRange.java b/lucene/core/src/java/org/apache/lucene/document/FloatRange.java
index 9b555d6..191631b 100644
--- a/lucene/core/src/java/org/apache/lucene/document/FloatRange.java
+++ b/lucene/core/src/java/org/apache/lucene/document/FloatRange.java
@@ -77,8 +77,8 @@ public class FloatRange extends Field {
    */
   public void setRangeValues(float[] min, float[] max) {
     checkArgs(min, max);
-    if (min.length*2 != type.pointDimensionCount() || max.length*2 != type.pointDimensionCount()) {
-      throw new IllegalArgumentException("field (name=" + name + ") uses " + type.pointDimensionCount()/2
+    if (min.length*2 != type.pointDataDimensionCount() || max.length*2 != type.pointDataDimensionCount()) {
+      throw new IllegalArgumentException("field (name=" + name + ") uses " + type.pointDataDimensionCount()/2
           + " dimensions; cannot change to (incoming) " + min.length + " dimensions");
     }
 
@@ -148,7 +148,7 @@ public class FloatRange extends Field {
    * @return the decoded min value
    */
   public float getMin(int dimension) {
-    FutureObjects.checkIndex(dimension, type.pointDimensionCount()/2);
+    FutureObjects.checkIndex(dimension, type.pointDataDimensionCount()/2);
     return decodeMin(((BytesRef)fieldsData).bytes, dimension);
   }
 
@@ -158,7 +158,7 @@ public class FloatRange extends Field {
    * @return the decoded max value
    */
   public float getMax(int dimension) {
-    FutureObjects.checkIndex(dimension, type.pointDimensionCount()/2);
+    FutureObjects.checkIndex(dimension, type.pointDataDimensionCount()/2);
     return decodeMax(((BytesRef)fieldsData).bytes, dimension);
   }
 
@@ -244,7 +244,7 @@ public class FloatRange extends Field {
     sb.append(':');
     byte[] b = ((BytesRef)fieldsData).bytes;
     toString(b, 0);
-    for (int d = 0; d < type.pointDimensionCount() / 2; ++d) {
+    for (int d = 0; d < type.pointDataDimensionCount() / 2; ++d) {
       sb.append(' ');
       sb.append(toString(b, d));
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/document/IntPoint.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/IntPoint.java b/lucene/core/src/java/org/apache/lucene/document/IntPoint.java
index 8b3484b..da4f391 100644
--- a/lucene/core/src/java/org/apache/lucene/document/IntPoint.java
+++ b/lucene/core/src/java/org/apache/lucene/document/IntPoint.java
@@ -59,8 +59,8 @@ public final class IntPoint extends Field {
 
   /** Change the values of this field */
   public void setIntValues(int... point) {
-    if (type.pointDimensionCount() != point.length) {
-      throw new IllegalArgumentException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot change to (incoming) " + point.length + " dimensions");
+    if (type.pointDataDimensionCount() != point.length) {
+      throw new IllegalArgumentException("this field (name=" + name + ") uses " + type.pointDataDimensionCount() + " dimensions; cannot change to (incoming) " + point.length + " dimensions");
     }
     fieldsData = pack(point);
   }
@@ -72,8 +72,8 @@ public final class IntPoint extends Field {
 
   @Override
   public Number numericValue() {
-    if (type.pointDimensionCount() != 1) {
-      throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot convert to a single numeric value");
+    if (type.pointDataDimensionCount() != 1) {
+      throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDataDimensionCount() + " dimensions; cannot convert to a single numeric value");
     }
     BytesRef bytes = (BytesRef) fieldsData;
     assert bytes.length == Integer.BYTES;
@@ -116,7 +116,7 @@ public final class IntPoint extends Field {
     result.append(':');
 
     BytesRef bytes = (BytesRef) fieldsData;
-    for (int dim = 0; dim < type.pointDimensionCount(); dim++) {
+    for (int dim = 0; dim < type.pointDataDimensionCount(); dim++) {
       if (dim > 0) {
         result.append(',');
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/document/IntRange.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/IntRange.java b/lucene/core/src/java/org/apache/lucene/document/IntRange.java
index e67b94f..b24e80a 100644
--- a/lucene/core/src/java/org/apache/lucene/document/IntRange.java
+++ b/lucene/core/src/java/org/apache/lucene/document/IntRange.java
@@ -77,8 +77,8 @@ public class IntRange extends Field {
    */
   public void setRangeValues(int[] min, int[] max) {
     checkArgs(min, max);
-    if (min.length*2 != type.pointDimensionCount() || max.length*2 != type.pointDimensionCount()) {
-      throw new IllegalArgumentException("field (name=" + name + ") uses " + type.pointDimensionCount()/2
+    if (min.length*2 != type.pointDataDimensionCount() || max.length*2 != type.pointDataDimensionCount()) {
+      throw new IllegalArgumentException("field (name=" + name + ") uses " + type.pointDataDimensionCount()/2
           + " dimensions; cannot change to (incoming) " + min.length + " dimensions");
     }
 
@@ -148,7 +148,7 @@ public class IntRange extends Field {
    * @return the decoded min value
    */
   public int getMin(int dimension) {
-    FutureObjects.checkIndex(dimension, type.pointDimensionCount()/2);
+    FutureObjects.checkIndex(dimension, type.pointDataDimensionCount()/2);
     return decodeMin(((BytesRef)fieldsData).bytes, dimension);
   }
 
@@ -158,7 +158,7 @@ public class IntRange extends Field {
    * @return the decoded max value
    */
   public int getMax(int dimension) {
-    FutureObjects.checkIndex(dimension, type.pointDimensionCount()/2);
+    FutureObjects.checkIndex(dimension, type.pointDataDimensionCount()/2);
     return decodeMax(((BytesRef)fieldsData).bytes, dimension);
   }
 
@@ -244,7 +244,7 @@ public class IntRange extends Field {
     sb.append(':');
     byte[] b = ((BytesRef)fieldsData).bytes;
     toString(b, 0);
-    for (int d = 0; d < type.pointDimensionCount() / 2; ++d) {
+    for (int d = 0; d < type.pointDataDimensionCount() / 2; ++d) {
       sb.append(' ');
       sb.append(toString(b, d));
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/document/LatLonPoint.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/LatLonPoint.java b/lucene/core/src/java/org/apache/lucene/document/LatLonPoint.java
index e3edead..c3ecc8a 100644
--- a/lucene/core/src/java/org/apache/lucene/document/LatLonPoint.java
+++ b/lucene/core/src/java/org/apache/lucene/document/LatLonPoint.java
@@ -147,9 +147,9 @@ public class LatLonPoint extends Field {
   /** helper: checks a fieldinfo and throws exception if its definitely not a LatLonPoint */
   static void checkCompatible(FieldInfo fieldInfo) {
     // point/dv properties could be "unset", if you e.g. used only StoredField with this same name in the segment.
-    if (fieldInfo.getPointDimensionCount() != 0 && fieldInfo.getPointDimensionCount() != TYPE.pointDimensionCount()) {
-      throw new IllegalArgumentException("field=\"" + fieldInfo.name + "\" was indexed with numDims=" + fieldInfo.getPointDimensionCount() + 
-                                         " but this point type has numDims=" + TYPE.pointDimensionCount() + 
+    if (fieldInfo.getPointDataDimensionCount() != 0 && fieldInfo.getPointDataDimensionCount() != TYPE.pointDataDimensionCount()) {
+      throw new IllegalArgumentException("field=\"" + fieldInfo.name + "\" was indexed with numDims=" + fieldInfo.getPointDataDimensionCount() +
+          " but this point type has numDims=" + TYPE.pointDataDimensionCount() +
                                          ", is the field really a LatLonPoint?");
     }
     if (fieldInfo.getPointNumBytes() != 0 && fieldInfo.getPointNumBytes() != TYPE.pointNumBytes()) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/document/LongPoint.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/LongPoint.java b/lucene/core/src/java/org/apache/lucene/document/LongPoint.java
index 5311114..835a37a 100644
--- a/lucene/core/src/java/org/apache/lucene/document/LongPoint.java
+++ b/lucene/core/src/java/org/apache/lucene/document/LongPoint.java
@@ -62,8 +62,8 @@ public final class LongPoint extends Field {
 
   /** Change the values of this field */
   public void setLongValues(long... point) {
-    if (type.pointDimensionCount() != point.length) {
-      throw new IllegalArgumentException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot change to (incoming) " + point.length + " dimensions");
+    if (type.pointDataDimensionCount() != point.length) {
+      throw new IllegalArgumentException("this field (name=" + name + ") uses " + type.pointDataDimensionCount() + " dimensions; cannot change to (incoming) " + point.length + " dimensions");
     }
     fieldsData = pack(point);
   }
@@ -75,8 +75,8 @@ public final class LongPoint extends Field {
 
   @Override
   public Number numericValue() {
-    if (type.pointDimensionCount() != 1) {
-      throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot convert to a single numeric value");
+    if (type.pointDataDimensionCount() != 1) {
+      throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDataDimensionCount() + " dimensions; cannot convert to a single numeric value");
     }
     BytesRef bytes = (BytesRef) fieldsData;
     assert bytes.length == Long.BYTES;
@@ -119,7 +119,7 @@ public final class LongPoint extends Field {
     result.append(':');
 
     BytesRef bytes = (BytesRef) fieldsData;
-    for (int dim = 0; dim < type.pointDimensionCount(); dim++) {
+    for (int dim = 0; dim < type.pointDataDimensionCount(); dim++) {
       if (dim > 0) {
         result.append(',');
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1118299c/lucene/core/src/java/org/apache/lucene/document/LongRange.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/LongRange.java b/lucene/core/src/java/org/apache/lucene/document/LongRange.java
index 1a1b19a..9056d2d 100644
--- a/lucene/core/src/java/org/apache/lucene/document/LongRange.java
+++ b/lucene/core/src/java/org/apache/lucene/document/LongRange.java
@@ -77,8 +77,8 @@ public class LongRange extends Field {
    */
   public void setRangeValues(long[] min, long[] max) {
     checkArgs(min, max);
-    if (min.length*2 != type.pointDimensionCount() || max.length*2 != type.pointDimensionCount()) {
-      throw new IllegalArgumentException("field (name=" + name + ") uses " + type.pointDimensionCount()/2
+    if (min.length*2 != type.pointDataDimensionCount() || max.length*2 != type.pointDataDimensionCount()) {
+      throw new IllegalArgumentException("field (name=" + name + ") uses " + type.pointDataDimensionCount()/2
           + " dimensions; cannot change to (incoming) " + min.length + " dimensions");
     }
 
@@ -146,7 +146,7 @@ public class LongRange extends Field {
    * @return the decoded min value
    */
   public long getMin(int dimension) {
-    FutureObjects.checkIndex(dimension, type.pointDimensionCount()/2);
+    FutureObjects.checkIndex(dimension, type.pointDataDimensionCount()/2);
     return decodeMin(((BytesRef)fieldsData).bytes, dimension);
   }
 
@@ -156,7 +156,7 @@ public class LongRange extends Field {
    * @return the decoded max value
    */
   public long getMax(int dimension) {
-    FutureObjects.checkIndex(dimension, type.pointDimensionCount()/2);
+    FutureObjects.checkIndex(dimension, type.pointDataDimensionCount()/2);
     return decodeMax(((BytesRef)fieldsData).bytes, dimension);
   }
 
@@ -242,7 +242,7 @@ public class LongRange extends Field {
     sb.append(':');
     byte[] b = ((BytesRef)fieldsData).bytes;
     toString(b, 0);
-    for (int d = 0; d < type.pointDimensionCount() / 2; ++d) {
+    for (int d = 0; d < type.pointDataDimensionCount() / 2; ++d) {
       sb.append(' ');
       sb.append(toString(b, d));
     }


[14/50] [abbrv] lucene-solr:jira/http2: SOLR-11812: fix precommit

Posted by da...@apache.org.
SOLR-11812: fix precommit


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/50478ea7
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/50478ea7
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/50478ea7

Branch: refs/heads/jira/http2
Commit: 50478ea72d2645b4c4a0a90f2757d467278e3eeb
Parents: 23e22e6
Author: Steve Rowe <sa...@apache.org>
Authored: Tue Oct 9 17:27:21 2018 -0400
Committer: Steve Rowe <sa...@apache.org>
Committed: Tue Oct 9 17:27:21 2018 -0400

----------------------------------------------------------------------
 solr/core/src/java/org/apache/solr/cloud/ElectionContext.java | 2 --
 solr/core/src/java/org/apache/solr/cloud/ZkController.java    | 2 --
 2 files changed, 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/50478ea7/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java b/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
index a698f83..d4f84f9 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
@@ -21,9 +21,7 @@ import java.io.IOException;
 import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
 import java.util.EnumSet;
-import java.util.HashSet;
 import java.util.List;
-import java.util.Set;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/50478ea7/solr/core/src/java/org/apache/solr/cloud/ZkController.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkController.java b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
index be5a3f4..5caad81 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ZkController.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
@@ -110,7 +110,6 @@ import org.apache.solr.util.RTimer;
 import org.apache.solr.util.RefCounted;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.KeeperException.ConnectionLossException;
 import org.apache.zookeeper.KeeperException.NoNodeException;
 import org.apache.zookeeper.KeeperException.SessionExpiredException;
 import org.apache.zookeeper.Op;
@@ -119,7 +118,6 @@ import org.apache.zookeeper.Watcher;
 import org.apache.zookeeper.data.Stat;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.slf4j.MDC;
 
 import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
 import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;


[44/50] [abbrv] lucene-solr:jira/http2: SOLR-12792: extract test data into separate files in autoscaling tests

Posted by da...@apache.org.
SOLR-12792: extract test data into separate files in autoscaling tests


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/f1a30bfb
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/f1a30bfb
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/f1a30bfb

Branch: refs/heads/jira/http2
Commit: f1a30bfb00cc3f72ee30a3356c153aee3a402433
Parents: aa0a528
Author: Noble Paul <no...@apache.org>
Authored: Tue Oct 16 19:11:08 2018 +1100
Committer: Noble Paul <no...@apache.org>
Committed: Tue Oct 16 19:11:08 2018 +1100

----------------------------------------------------------------------
 ...tAutoscalingPreferencesUsedWithNoPolicy.json |  53 ++
 .../testComputePlanAfterNodeAdded.json          |  16 +
 .../solr/autoscaling/testCoresSuggestions.json  |  17 +
 .../solr/autoscaling/testDiskSpaceHint.json     |  16 +
 .../solr/autoscaling/testEqualOnNonNode.json    |  83 +++
 .../autoscaling/testFreeDiskSuggestions.json    |  27 +
 .../autoscaling/testFreediskPercentage.json     |  25 +
 .../autoscaling/testMoveReplicaSuggester.json   |  15 +
 .../solrj/solr/autoscaling/testPolicy.json      |  41 ++
 .../solr/autoscaling/testPortSuggestions.json   |  22 +
 .../testReplicaCountSuggestions.json            |  15 +
 .../solr/autoscaling/testReplicaPercentage.json |  46 ++
 .../autoscaling/testReplicaZonesPercentage.json |  15 +
 .../autoscaling/testSyspropSuggestions1.json    |  24 +
 .../solr/autoscaling/testViolationOutput.json   |  22 +
 .../solr/autoscaling/testWithCollection.json    |  21 +
 .../testWithCollectionMoveReplica.json          |  28 +
 .../testWithCollectionMoveVsAddSuggestions.json |  49 ++
 .../testWithCollectionSuggestions.json          |  21 +
 .../solrj/cloud/autoscaling/TestPolicy.java     | 722 ++-----------------
 .../solrj/cloud/autoscaling/TestPolicy2.java    |  98 +--
 21 files changed, 630 insertions(+), 746 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f1a30bfb/solr/solrj/src/test-files/solrj/solr/autoscaling/testAutoscalingPreferencesUsedWithNoPolicy.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testAutoscalingPreferencesUsedWithNoPolicy.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testAutoscalingPreferencesUsedWithNoPolicy.json
new file mode 100644
index 0000000..a4c39d4
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testAutoscalingPreferencesUsedWithNoPolicy.json
@@ -0,0 +1,53 @@
+{
+  "liveNodes":["node1:8983",
+    "node2:8984",
+    "node3:8985"],
+  "replicaInfo":{"node1:8983":{"c1":{
+    "s1":[{"r1":{
+      "type":"NRT",
+      "INDEX.sizeInGB":"1100"}},
+      {"r2":{"type":"NRT"}}],
+    "s2":[{"r1":{
+      "type":"NRT",
+      "INDEX.sizeInGB":"1100"}},
+      {"r2":{"type":"NRT"}}]}}},
+  "nodeValues":{
+    "node1:8983":{
+      "cores":4,
+      "freedisk":300,
+      "totaldisk":4700,
+      "port":8983},
+    "node2:8984":{
+      "cores":0,
+      "freedisk":1000,
+      "totaldisk":1200,
+      "port":8984},
+    "node3:8985":{
+      "cores":0,
+      "freedisk":1651,
+      "totaldisk":1700,
+      "port":8985}},
+"clusterstate":{"c1":{
+  "router":{"name":"compositeId"},
+  "maxShardsPerNode":-1,
+  "shards":{
+    "s1":{"replicas":{
+      "r1":{
+        "type":"NRT",
+        "node_name":"node1:8983",
+        "state":"active",
+        "leader":"true"},
+      "r2":{
+        "type":"NRT",
+        "node_name":"node1:8983",
+        "state":"active"}}},
+    "s2":{"replicas":{
+      "r1":{
+        "type":"NRT",
+        "node_name":"node1:8983",
+        "state":"active",
+        "leader":"true"},
+      "r2":{
+        "type":"NRT",
+        "node_name":"node1:8983",
+        "state":"active"}}}}}}}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f1a30bfb/solr/solrj/src/test-files/solrj/solr/autoscaling/testComputePlanAfterNodeAdded.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testComputePlanAfterNodeAdded.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testComputePlanAfterNodeAdded.json
new file mode 100644
index 0000000..2171c38
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testComputePlanAfterNodeAdded.json
@@ -0,0 +1,16 @@
+{
+  "liveNodes":["127.0.0.1:51078_solr",
+    "127.0.0.1:51147_solr"],
+  "replicaInfo":{
+    "127.0.0.1:51147_solr":{},
+    "127.0.0.1:51078_solr":{"testNodeAdded":{"shard1":[{"core_node3":{"type":"NRT"}},
+      {"core_node4":{"type":"NRT"}}]}}},
+  "nodeValues":{
+    "127.0.0.1:51147_solr":{
+      "node":"127.0.0.1:51147_solr",
+      "cores":0,
+      "freedisk":880.5428657531738},
+    "127.0.0.1:51078_solr":{
+      "node":"127.0.0.1:51078_solr",
+      "cores":2,
+      "freedisk":880.5428695678711}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f1a30bfb/solr/solrj/src/test-files/solrj/solr/autoscaling/testCoresSuggestions.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testCoresSuggestions.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testCoresSuggestions.json
new file mode 100644
index 0000000..a6901db
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testCoresSuggestions.json
@@ -0,0 +1,17 @@
+{
+  "liveNodes":["10.0.0.6:7574_solr",
+    "10.0.0.6:8983_solr"],
+  "replicaInfo":{
+    "10.0.0.6:7574_solr":{},
+    "10.0.0.6:8983_solr":{"mycoll1":{
+      "shard1":[{"core_node1":{"type":"NRT"}}],
+      "shard2":[{"core_node2":{"type":"NRT"}}],
+      "shard3":[{"core_node3":{"type":"NRT"}}],
+      "shard4":[{"core_node4":{"type":"NRT"}}]}}},
+  "nodeValues":{
+    "10.0.0.6:7574_solr":{
+      "node":"10.0.0.6:7574_solr",
+      "cores":0},
+    "10.0.0.6:8983_solr":{
+      "node":"10.0.0.6:8983_solr",
+      "cores":4}}}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f1a30bfb/solr/solrj/src/test-files/solrj/solr/autoscaling/testDiskSpaceHint.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testDiskSpaceHint.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testDiskSpaceHint.json
new file mode 100644
index 0000000..edfcf7f
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testDiskSpaceHint.json
@@ -0,0 +1,16 @@
+{
+  "liveNodes":["127.0.0.1:51078_solr",
+    "127.0.0.1:51147_solr"],
+  "replicaInfo":{
+    "127.0.0.1:51147_solr":{},
+    "127.0.0.1:51078_solr":{"testNodeAdded":{"shard1":[{"core_node3":{"type":"NRT"}},
+      {"core_node4":{"type":"NRT"}}]}}},
+  "nodeValues":{
+    "127.0.0.1:51147_solr":{
+      "node":"127.0.0.1:51147_solr",
+      "cores":0,
+      "freedisk":100},
+    "127.0.0.1:51078_solr":{
+      "node":"127.0.0.1:51078_solr",
+      "cores":2,
+      "freedisk":200}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f1a30bfb/solr/solrj/src/test-files/solrj/solr/autoscaling/testEqualOnNonNode.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testEqualOnNonNode.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testEqualOnNonNode.json
new file mode 100644
index 0000000..c2dc13d
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testEqualOnNonNode.json
@@ -0,0 +1,83 @@
+[{"coll1":{
+  "router":{"name":"compositeId"},
+  "shards":{
+    "shard1":{
+      "range":"80000000-ffffffff",
+      "replicas":{
+        "r1":{//east
+          "core":"r1",
+          "base_url":"http://10.0.0.4:8983/solr",
+          "node_name":"node1",
+          "state":"active"},
+        "r2":{//west
+          "core":"r2",
+          "base_url":"http://10.0.0.4:7574/solr",
+          "node_name":"node2",
+          "state":"active"}}},
+    "shard2":{
+      "range":"0-7fffffff",
+      "replicas":{
+        "r3":{//east
+          "core":"r3",
+          "base_url":"http://10.0.0.4:8983/solr",
+          "node_name":"node1",
+          "state":"active"},
+        "r4":{//west
+          "core":"r4",
+          "base_url":"http://10.0.0.4:8987/solr",
+          "node_name":"node4",
+          "state":"active"},
+        "r6":{//east
+          "core":"r6",
+          "base_url":"http://10.0.0.4:8989/solr",
+          "node_name":"node3",
+          "state":"active"},
+        "r5":{//east
+          "core":"r5",
+          "base_url":"http://10.0.0.4:8983/solr",
+          "node_name":"node1",
+          "state":"active"}}}}}},
+  {"nodeValues":{
+    "node1":{
+      "cores":3,
+      "freedisk":700,
+      "totaldisk":1000,
+      "sysprop.zone":"east"},
+    "node2":{
+      "cores":1,
+      "freedisk":900,
+      "totaldisk":1000,
+      "sysprop.zone":"west"},
+    "node3":{
+      "cores":1,
+      "freedisk":900,
+      "totaldisk":1000,
+      "sysprop.zone":"east"},
+    "node4":{
+      "cores":1,
+      "freedisk":900,
+      "totaldisk":1000,
+      "sysprop.zone":"west"},
+    "node5":{
+      "cores":0,
+      "freedisk":1000,
+      "totaldisk":1000,
+      "sysprop.zone":"west"}},
+  "replicaValues":[{
+    "INDEX.sizeInGB":100,
+    "core":"r1"},
+    {
+      "INDEX.sizeInGB":100,
+      "core":"r2"},
+    {
+      "INDEX.sizeInGB":100,
+      "core":"r3"},
+    {
+      "INDEX.sizeInGB":100,
+      "core":"r4"},
+    {
+      "INDEX.sizeInGB":100,
+      "core":"r5"},
+    {
+      "INDEX.sizeInGB":100,
+      "core":"r6"}]}]
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f1a30bfb/solr/solrj/src/test-files/solrj/solr/autoscaling/testFreeDiskSuggestions.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testFreeDiskSuggestions.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testFreeDiskSuggestions.json
new file mode 100644
index 0000000..2b8897b
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testFreeDiskSuggestions.json
@@ -0,0 +1,27 @@
+{
+  "liveNodes":["node1",
+    "node2"],
+  "replicaInfo":{
+    "node1":{},
+    "node2":{"mycoll1":{
+      "shard1":[{"r1":{
+        "type":"NRT",
+        "INDEX.sizeInGB":900}}],
+      "shard2":[{"r2":{
+        "type":"NRT",
+        "INDEX.sizeInGB":300}}],
+      "shard3":[{"r3":{
+        "type":"NRT",
+        "INDEX.sizeInGB":200}}],
+      "shard4":[{"r4":{
+        "type":"NRT",
+        "INDEX.sizeInGB":100}}]}}},
+  "nodeValues":{
+    "node1":{
+      "node":"node1",
+      "cores":0,
+      "freedisk":2000},
+    "node2":{
+      "node":"node2",
+      "cores":4,
+      "freedisk":500}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f1a30bfb/solr/solrj/src/test-files/solrj/solr/autoscaling/testFreediskPercentage.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testFreediskPercentage.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testFreediskPercentage.json
new file mode 100644
index 0000000..174f862
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testFreediskPercentage.json
@@ -0,0 +1,25 @@
+{
+  "liveNodes":["node1:8983",
+    "node2:8984",
+    "node3:8985"],
+  "replicaInfo":{"node1:8983":{"c1":{
+    "s1":[{"r1":{"type":"NRT"}},
+      {"r2":{"type":"NRT"}}],
+    "s2":[{"r1":{"type":"NRT"}},
+      {"r2":{"type":"NRT"}}]}}},
+  "nodeValues":{
+    "node1:8983":{
+      "cores":4,
+      "freedisk":230,
+      "totaldisk":800,
+      "port":8983},
+    "node2:8984":{
+      "cores":0,
+      "freedisk":1000,
+      "totaldisk":1200,
+      "port":8984},
+    "node3:8985":{
+      "cores":0,
+      "freedisk":1500,
+      "totaldisk":1700,
+      "port":8985}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f1a30bfb/solr/solrj/src/test-files/solrj/solr/autoscaling/testMoveReplicaSuggester.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testMoveReplicaSuggester.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testMoveReplicaSuggester.json
new file mode 100644
index 0000000..e45c87f
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testMoveReplicaSuggester.json
@@ -0,0 +1,15 @@
+{
+  "liveNodes":["10.0.0.6:7574_solr",
+    "10.0.0.6:8983_solr"],
+  "replicaInfo":{
+    "10.0.0.6:7574_solr":{},
+    "10.0.0.6:8983_solr":{"mycoll1":{
+      "shard2":[{"core_node2":{"type":"NRT"}}],
+      "shard1":[{"core_node1":{"type":"NRT"}}]}}},
+  "nodeValues":{
+    "10.0.0.6:7574_solr":{
+      "node":"10.0.0.6:7574_solr",
+      "cores":0},
+    "10.0.0.6:8983_solr":{
+      "node":"10.0.0.6:8983_solr",
+      "cores":2}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f1a30bfb/solr/solrj/src/test-files/solrj/solr/autoscaling/testPolicy.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testPolicy.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testPolicy.json
new file mode 100644
index 0000000..373607e
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testPolicy.json
@@ -0,0 +1,41 @@
+{"gettingstarted":{
+  "router":{"name":"compositeId"},
+  "shards":{
+    "shard1":{
+      "range":"80000000-ffffffff",
+      "replicas":{
+        "r1":{
+          "core":"r1",
+          "base_url":"http://10.0.0.4:8983/solr",
+          "node_name":"node1",
+          "state":"active",
+          "leader":"true"},
+        "r2":{
+          "core":"r2",
+          "base_url":"http://10.0.0.4:7574/solr",
+          "node_name":"node2",
+          "state":"active"}}},
+    "shard2":{
+      "range":"0-7fffffff",
+      "replicas":{
+        "r3":{
+          "core":"r3",
+          "base_url":"http://10.0.0.4:8983/solr",
+          "node_name":"node1",
+          "state":"active",
+          "leader":"true"},
+        "r4":{
+          "core":"r4",
+          "base_url":"http://10.0.0.4:8987/solr",
+          "node_name":"node4",
+          "state":"active"},
+        "r6":{
+          "core":"r6",
+          "base_url":"http://10.0.0.4:8989/solr",
+          "node_name":"node3",
+          "state":"active"},
+        "r5":{
+          "core":"r5",
+          "base_url":"http://10.0.0.4:8983/solr",
+          "node_name":"node1",
+          "state":"active"}}}}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f1a30bfb/solr/solrj/src/test-files/solrj/solr/autoscaling/testPortSuggestions.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testPortSuggestions.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testPortSuggestions.json
new file mode 100644
index 0000000..f518de1
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testPortSuggestions.json
@@ -0,0 +1,22 @@
+{
+  "liveNodes":["node1:8983",
+    "node2:8984",
+    "node3:8985"],
+  "replicaInfo":{"node1:8983":{"c1":{
+    "s1":[{"r1":{"type":"NRT"}},
+      {"r2":{"type":"NRT"}}],
+    "s2":[{"r1":{"type":"NRT"}},
+      {"r2":{"type":"NRT"}}]}}},
+  "nodeValues":{
+    "node1:8983":{
+      "cores":4,
+      "freedisk":334,
+      "port":8983},
+    "node2:8984":{
+      "cores":0,
+      "freedisk":1000,
+      "port":8984},
+    "node3:8985":{
+      "cores":0,
+      "freedisk":1500,
+      "port":8985}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f1a30bfb/solr/solrj/src/test-files/solrj/solr/autoscaling/testReplicaCountSuggestions.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testReplicaCountSuggestions.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testReplicaCountSuggestions.json
new file mode 100644
index 0000000..e45c87f
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testReplicaCountSuggestions.json
@@ -0,0 +1,15 @@
+{
+  "liveNodes":["10.0.0.6:7574_solr",
+    "10.0.0.6:8983_solr"],
+  "replicaInfo":{
+    "10.0.0.6:7574_solr":{},
+    "10.0.0.6:8983_solr":{"mycoll1":{
+      "shard2":[{"core_node2":{"type":"NRT"}}],
+      "shard1":[{"core_node1":{"type":"NRT"}}]}}},
+  "nodeValues":{
+    "10.0.0.6:7574_solr":{
+      "node":"10.0.0.6:7574_solr",
+      "cores":0},
+    "10.0.0.6:8983_solr":{
+      "node":"10.0.0.6:8983_solr",
+      "cores":2}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f1a30bfb/solr/solrj/src/test-files/solrj/solr/autoscaling/testReplicaPercentage.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testReplicaPercentage.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testReplicaPercentage.json
new file mode 100644
index 0000000..a6b3801
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testReplicaPercentage.json
@@ -0,0 +1,46 @@
+[{
+  "liveNodes":["10.0.0.6:7574_solr",
+    "10.0.0.6:8983_solr"],
+  "replicaInfo":{
+    "10.0.0.6:7574_solr":{},
+    "10.0.0.6:8983_solr":{"mycoll1":{"shard1":[{"core_node1":{"type":"NRT"}},
+      {"core_node2":{"type":"NRT"}},
+      {"core_node3":{"type":"NRT"}}]}}},
+  "nodeValues":{
+    "10.0.0.6:7574_solr":{
+      "node":"10.0.0.6:7574_solr",
+      "cores":0},
+    "10.0.0.6:8983_solr":{
+      "node":"10.0.0.6:8983_solr",
+      "cores":3}}},
+{
+  "liveNodes":["10.0.0.6:7574_solr",
+    "10.0.0.6:8983_solr"],
+  "replicaInfo":{
+    "10.0.0.6:7574_solr":{},
+    "10.0.0.6:8983_solr":{"mycoll1":{
+      "shard2":[{"core_node2":{"type":"NRT"}}],
+      "shard1":[{"core_node1":{"type":"NRT"}}]}}},
+  "nodeValues":{
+    "10.0.0.6:7574_solr":{
+      "node":"10.0.0.6:7574_solr",
+      "cores":0},
+    "10.0.0.6:8983_solr":{
+      "node":"10.0.0.6:8983_solr",
+      "cores":2}}},
+{
+  "liveNodes":["10.0.0.6:7574_solr",
+    "10.0.0.6:8983_solr"],
+  "replicaInfo":{
+    "10.0.0.6:7574_solr":{},
+    "10.0.0.6:8983_solr":{"mycoll1":{
+      "shard1":[{"core_node3":{"type":"PULL"}}],
+      "shard3":[{"core_node2":{"type":"TLOG"}}],
+      "shard2":[{"core_node1":{"type":"TLOG"}}]}}},
+  "nodeValues":{
+    "10.0.0.6:7574_solr":{
+      "node":"10.0.0.6:7574_solr",
+      "cores":0},
+    "10.0.0.6:8983_solr":{
+      "node":"10.0.0.6:8983_solr",
+      "cores":2}}}]
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f1a30bfb/solr/solrj/src/test-files/solrj/solr/autoscaling/testReplicaZonesPercentage.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testReplicaZonesPercentage.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testReplicaZonesPercentage.json
new file mode 100644
index 0000000..eb4e3a8
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testReplicaZonesPercentage.json
@@ -0,0 +1,15 @@
+{
+  "liveNodes":["10.0.0.6:7574_solr",
+    "10.0.0.6:8983_solr"],
+  "replicaInfo":{
+    "10.0.0.6:7574_solr":{},
+    "10.0.0.6:8983_solr":{}},
+  "nodeValues":{
+    "10.0.0.6:7574_solr":{
+      "node":"10.0.0.6:7574_solr",
+      "cores":0,
+      "sysprop.az":"west"},
+    "10.0.0.6:8983_solr":{
+      "node":"10.0.0.6:8983_solr",
+      "cores":0,
+      "sysprop.az":"east"}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f1a30bfb/solr/solrj/src/test-files/solrj/solr/autoscaling/testSyspropSuggestions1.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testSyspropSuggestions1.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testSyspropSuggestions1.json
new file mode 100644
index 0000000..085fe60
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testSyspropSuggestions1.json
@@ -0,0 +1,24 @@
+{
+  "liveNodes":["node1",
+    "node2",
+    "node3"],
+  "replicaInfo":{"node1":{"c1":{
+    "s1":[{
+      "r1":{"type":"NRT"},
+      "r2":{"type":"NRT"}}],
+    "s2":[{
+      "r1":{"type":"NRT"},
+      "r2":{"type":"NRT"}}]}}},
+  "nodeValues":{
+    "node1":{
+      "cores":2,
+      "freedisk":334,
+      "sysprop.fs":"slowdisk"},
+    "node2":{
+      "cores":2,
+      "freedisk":749,
+      "sysprop.fs":"slowdisk"},
+    "node3":{
+      "cores":0,
+      "freedisk":262,
+      "sysprop.fs":"ssd"}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f1a30bfb/solr/solrj/src/test-files/solrj/solr/autoscaling/testViolationOutput.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testViolationOutput.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testViolationOutput.json
new file mode 100644
index 0000000..f518de1
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testViolationOutput.json
@@ -0,0 +1,22 @@
+{
+  "liveNodes":["node1:8983",
+    "node2:8984",
+    "node3:8985"],
+  "replicaInfo":{"node1:8983":{"c1":{
+    "s1":[{"r1":{"type":"NRT"}},
+      {"r2":{"type":"NRT"}}],
+    "s2":[{"r1":{"type":"NRT"}},
+      {"r2":{"type":"NRT"}}]}}},
+  "nodeValues":{
+    "node1:8983":{
+      "cores":4,
+      "freedisk":334,
+      "port":8983},
+    "node2:8984":{
+      "cores":0,
+      "freedisk":1000,
+      "port":8984},
+    "node3:8985":{
+      "cores":0,
+      "freedisk":1500,
+      "port":8985}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f1a30bfb/solr/solrj/src/test-files/solrj/solr/autoscaling/testWithCollection.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testWithCollection.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testWithCollection.json
new file mode 100644
index 0000000..d171998
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testWithCollection.json
@@ -0,0 +1,21 @@
+{
+  "comments_coll":{
+    "router":{"name":"compositeId"},
+    "shards":{},
+    "withCollection":"articles_coll"},
+  "articles_coll":{
+    "router":{"name":"compositeId"},
+    "shards":{"shard1":{
+      "range":"80000000-ffffffff",
+      "replicas":{
+        "r1":{
+          "core":"r1",
+          "base_url":"http://10.0.0.4:8983/solr",
+          "node_name":"node1",
+          "state":"active",
+          "leader":"true"},
+        "r2":{
+          "core":"r2",
+          "base_url":"http://10.0.0.4:7574/solr",
+          "node_name":"node2",
+          "state":"active"}}}}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f1a30bfb/solr/solrj/src/test-files/solrj/solr/autoscaling/testWithCollectionMoveReplica.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testWithCollectionMoveReplica.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testWithCollectionMoveReplica.json
new file mode 100644
index 0000000..469eef1
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testWithCollectionMoveReplica.json
@@ -0,0 +1,28 @@
+{
+  "comments_coll":{
+    "router":{"name":"compositeId"},
+    "shards":{"shard1":{
+      "range":"80000000-ffffffff",
+      "replicas":{"r1":{
+        "core":"r1",
+        "base_url":"http://10.0.0.4:8983/solr",
+        "node_name":"node1",
+        "state":"active",
+        "leader":"true"}}}},
+    "withCollection":"articles_coll"},
+  "articles_coll":{
+    "router":{"name":"compositeId"},
+    "shards":{"shard1":{
+      "range":"80000000-ffffffff",
+      "replicas":{
+        "r1":{
+          "core":"r1",
+          "base_url":"http://10.0.0.4:8983/solr",
+          "node_name":"node1",
+          "state":"active",
+          "leader":"true"},
+        "r2":{
+          "core":"r2",
+          "base_url":"http://10.0.0.4:7574/solr",
+          "node_name":"node2",
+          "state":"active"}}}}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f1a30bfb/solr/solrj/src/test-files/solrj/solr/autoscaling/testWithCollectionMoveVsAddSuggestions.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testWithCollectionMoveVsAddSuggestions.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testWithCollectionMoveVsAddSuggestions.json
new file mode 100644
index 0000000..0d99d4a
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testWithCollectionMoveVsAddSuggestions.json
@@ -0,0 +1,49 @@
+{
+  "articles_coll":{
+    "router":{"name":"compositeId"},
+    "shards":{"shard1":{
+      "range":"80000000-ffffffff",
+      "replicas":{
+        "r1":{
+          "core":"r1",
+          "base_url":"http://10.0.0.4:8983/solr",
+          "node_name":"node1",
+          "state":"active",
+          "leader":"true"},
+        "r2":{
+          "core":"r2",
+          "base_url":"http://10.0.0.4:7574/solr",
+          "node_name":"node2",
+          "state":"active"},
+        "r3":{
+          "core":"r3",
+          "base_url":"http://10.0.0.4:7579/solr",
+          "node_name":"node6",
+          "state":"active"}}}}},
+  "comments_coll":{
+    "withCollection":"articles_coll",
+    "router":{"name":"compositeId"},
+    "shards":{"shard1":{
+      "range":"80000000-ffffffff",
+      "replicas":{
+        "r1":{
+          "core":"r1",
+          "base_url":"http://10.0.0.4:7576/solr",
+          "node_name":"node3",
+          "state":"active",
+          "leader":"true"},
+        "r2":{
+          "core":"r2",
+          "base_url":"http://10.0.0.4:7577/solr",
+          "node_name":"node4",
+          "state":"active"},
+        "r3":{
+          "core":"r3",
+          "base_url":"http://10.0.0.4:7578/solr",
+          "node_name":"node5",
+          "state":"active"},
+        "r4":{
+          "core":"r4",
+          "base_url":"http://10.0.0.4:7579/solr",
+          "node_name":"node6",
+          "state":"active"}}}}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f1a30bfb/solr/solrj/src/test-files/solrj/solr/autoscaling/testWithCollectionSuggestions.json
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/autoscaling/testWithCollectionSuggestions.json b/solr/solrj/src/test-files/solrj/solr/autoscaling/testWithCollectionSuggestions.json
new file mode 100644
index 0000000..c4a29db
--- /dev/null
+++ b/solr/solrj/src/test-files/solrj/solr/autoscaling/testWithCollectionSuggestions.json
@@ -0,0 +1,21 @@
+{
+  "articles_coll":{
+    "router":{"name":"compositeId"},
+    "shards":{"shard1":{}}},
+  "comments_coll":{
+    "withCollection":"articles_coll",
+    "router":{"name":"compositeId"},
+    "shards":{"shard1":{
+      "range":"80000000-ffffffff",
+      "replicas":{
+        "r1":{
+          "core":"r1",
+          "base_url":"http://10.0.0.4:8983/solr",
+          "node_name":"node1",
+          "state":"active",
+          "leader":"true"},
+        "r2":{
+          "core":"r2",
+          "base_url":"http://10.0.0.4:7574/solr",
+          "node_name":"node2",
+          "state":"active"}}}}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f1a30bfb/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
index fc0806b..64ba357 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java
@@ -21,7 +21,6 @@ package org.apache.solr.client.solrj.cloud.autoscaling;
 import java.io.IOException;
 import java.io.StringWriter;
 import java.lang.invoke.MethodHandles;
-import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -77,9 +76,11 @@ import org.slf4j.LoggerFactory;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.solr.client.solrj.cloud.autoscaling.Policy.CLUSTER_PREFERENCES;
+import static org.apache.solr.client.solrj.cloud.autoscaling.TestPolicy2.loadFromResource;
 import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.CORES;
 import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.FREEDISK;
 import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.REPLICA;
+import static org.apache.solr.common.cloud.ZkStateReader.CLUSTER_STATE;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOVEREPLICA;
 
@@ -107,47 +108,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
     return cloudManagerWithData(jsonObj);
   }
 
-  public static String clusterState = "{'gettingstarted':{" +
-      "    'router':{'name':'compositeId'}," +
-      "    'shards':{" +
-      "      'shard1':{" +
-      "        'range':'80000000-ffffffff'," +
-      "        'replicas':{" +
-      "          'r1':{" +
-      "            'core':r1," +
-      "            'base_url':'http://10.0.0.4:8983/solr'," +
-      "            'node_name':'node1'," +
-      "            'state':'active'," +
-      "            'leader':'true'}," +
-      "          'r2':{" +
-      "            'core':r2," +
-      "            'base_url':'http://10.0.0.4:7574/solr'," +
-      "            'node_name':'node2'," +
-      "            'state':'active'}}}," +
-      "      'shard2':{" +
-      "        'range':'0-7fffffff'," +
-      "        'replicas':{" +
-      "          'r3':{" +
-      "            'core':r3," +
-      "            'base_url':'http://10.0.0.4:8983/solr'," +
-      "            'node_name':'node1'," +
-      "            'state':'active'," +
-      "            'leader':'true'}," +
-      "          'r4':{" +
-      "            'core':r4," +
-      "            'base_url':'http://10.0.0.4:8987/solr'," +
-      "            'node_name':'node4'," +
-      "            'state':'active'}," +
-      "          'r6':{" +
-      "            'core':r6," +
-      "            'base_url':'http://10.0.0.4:8989/solr'," +
-      "            'node_name':'node3'," +
-      "            'state':'active'}," +
-      "          'r5':{" +
-      "            'core':r5," +
-      "            'base_url':'http://10.0.0.4:8983/solr'," +
-      "            'node_name':'node1'," +
-      "            'state':'active'}}}}}}";
+  public static String clusterState = Utils.toJSONString(loadFromResource("testPolicy.json"));
 
   public static Map<String, Map<String, List<ReplicaInfo>>> getReplicaDetails(String node, Map clusterState) {
     ValidatingJsonMap m = ValidatingJsonMap
@@ -174,42 +135,9 @@ public class TestPolicy extends SolrTestCaseJ4 {
 
 
   public void testWithCollection() {
-    String clusterStateStr = "{" +
-        "  'comments_coll':{" +
-        "    'router': {" +
-        "      'name': 'compositeId'" +
-        "    }," +
-        "    'shards':{}," +
-        "    'withCollection' :'articles_coll'" +
-        "  }," +
-        "  'articles_coll': {" +
-        "    'router': {" +
-        "      'name': 'compositeId'" +
-        "    }," +
-        "    'shards': {" +
-        "      'shard1': {" +
-        "        'range': '80000000-ffffffff'," +
-        "        'replicas': {" +
-        "          'r1': {" +
-        "            'core': 'r1'," +
-        "            'base_url': 'http://10.0.0.4:8983/solr'," +
-        "            'node_name': 'node1'," +
-        "            'state': 'active'," +
-        "            'leader': 'true'" +
-        "          }," +
-        "          'r2': {" +
-        "            'core': 'r2'," +
-        "            'base_url': 'http://10.0.0.4:7574/solr'," +
-        "            'node_name': 'node2'," +
-        "            'state': 'active'" +
-        "          }" +
-        "        }" +
-        "      }" +
-        "    }" +
-        "  }" +
-        "}";
-    ClusterState clusterState = ClusterState.load(1, clusterStateStr.getBytes(UTF_8),
-        ImmutableSet.of("node1", "node2", "node3", "node4", "node5"));
+    ClusterState clusterState = ClusterState.load(1,
+        (Map) loadFromResource("testWithCollection.json"),
+        ImmutableSet.of("node1", "node2", "node3", "node4", "node5"), CLUSTER_STATE);
     DelegatingClusterStateProvider clusterStateProvider = new DelegatingClusterStateProvider(null) {
       @Override
       public ClusterState getClusterState() throws IOException {
@@ -298,42 +226,10 @@ public class TestPolicy extends SolrTestCaseJ4 {
   }
 
   public void testWithCollectionSuggestions() {
-    String clusterStateStr = "{" +
-        "  'articles_coll':{" +
-        "    'router': {" +
-        "      'name': 'compositeId'" +
-        "    }," +
-        "    'shards':{'shard1':{}}," +
-        "  }," +
-        "  'comments_coll': {" +
-        "    'withCollection' :'articles_coll'," +
-        "    'router': {" +
-        "      'name': 'compositeId'" +
-        "    }," +
-        "    'shards': {" +
-        "      'shard1': {" +
-        "        'range': '80000000-ffffffff'," +
-        "        'replicas': {" +
-        "          'r1': {" +
-        "            'core': 'r1'," +
-        "            'base_url': 'http://10.0.0.4:8983/solr'," +
-        "            'node_name': 'node1'," +
-        "            'state': 'active'," +
-        "            'leader': 'true'" +
-        "          }," +
-        "          'r2': {" +
-        "            'core': 'r2'," +
-        "            'base_url': 'http://10.0.0.4:7574/solr'," +
-        "            'node_name': 'node2'," +
-        "            'state': 'active'" +
-        "          }" +
-        "        }" +
-        "      }" +
-        "    }" +
-        "  }" +
-        "}";
-    ClusterState clusterState = ClusterState.load(1, clusterStateStr.getBytes(UTF_8),
-        ImmutableSet.of("node1", "node2", "node3", "node4", "node5"));
+    ClusterState clusterState =
+        ClusterState.load(1,
+            (Map) loadFromResource("testWithCollectionSuggestions.json"),
+            ImmutableSet.of("node1", "node2", "node3", "node4", "node5"), CLUSTER_STATE);
     DelegatingClusterStateProvider clusterStateProvider = new DelegatingClusterStateProvider(null) {
       @Override
       public ClusterState getClusterState() throws IOException {
@@ -421,83 +317,15 @@ public class TestPolicy extends SolrTestCaseJ4 {
     assertTrue(nodes.contains("node2"));
   }
 
-  public void testWithCollectionMoveVsAddSuggestions() {
-    String clusterStateStr = "{" +
-        "  'articles_coll':{" +
-        "    'router': {" +
-        "      'name': 'compositeId'" +
-        "    }," +
-        "    'shards': {" +
-        "      'shard1': {" +
-        "        'range': '80000000-ffffffff'," +
-        "        'replicas': {" +
-        "          'r1': {" +
-        "            'core': 'r1'," +
-        "            'base_url': 'http://10.0.0.4:8983/solr'," +
-        "            'node_name': 'node1'," +
-        "            'state': 'active'," +
-        "            'leader': 'true'" +
-        "          }," +
-        "          'r2': {" +
-        "            'core': 'r2'," +
-        "            'base_url': 'http://10.0.0.4:7574/solr'," +
-        "            'node_name': 'node2'," +
-        "            'state': 'active'" +
-        "          }," +
-        "          'r3': {" +
-        "            'core': 'r3'," +
-        "            'base_url': 'http://10.0.0.4:7579/solr'," +
-        "            'node_name': 'node6'," +
-        "            'state': 'active'" +
-        "          }" +
-        "        }" +
-        "      }" +
-        "    }" +
-        "  }," +
-        "  'comments_coll': {" +
-        "    'withCollection' :'articles_coll'," +
-        "    'router': {" +
-        "      'name': 'compositeId'" +
-        "    }," +
-        "    'shards': {" +
-        "      'shard1': {" +
-        "        'range': '80000000-ffffffff'," +
-        "        'replicas': {" +
-        "          'r1': {" +
-        "            'core': 'r1'," +
-        "            'base_url': 'http://10.0.0.4:7576/solr'," +
-        "            'node_name': 'node3'," +
-        "            'state': 'active'," +
-        "            'leader': 'true'" +
-        "          }," +
-        "          'r2': {" +
-        "            'core': 'r2'," +
-        "            'base_url': 'http://10.0.0.4:7577/solr'," +
-        "            'node_name': 'node4'," +
-        "            'state': 'active'" +
-        "          }," +
-        "          'r3': {" +
-        "            'core': 'r3'," +
-        "            'base_url': 'http://10.0.0.4:7578/solr'," +
-        "            'node_name': 'node5'," +
-        "            'state': 'active'" +
-        "          }," +
-        "          'r4': {" +
-        "            'core': 'r4'," +
-        "            'base_url': 'http://10.0.0.4:7579/solr'," +
-        "            'node_name': 'node6'," +
-        "            'state': 'active'" +
-        "          }" +
-        "        }" +
-        "      }" +
-        "    }" +
-        "  }" +
-        "}";
-    ClusterState clusterState = ClusterState.load(1, clusterStateStr.getBytes(UTF_8),
-        ImmutableSet.of("node1", "node2", "node3", "node4", "node5", "node6"));
+  public void testWithCollectionMoveVsAddSuggestions() throws IOException {
+    ClusterState clusterState = ClusterState.load(1,
+        (Map) loadFromResource("testWithCollectionMoveVsAddSuggestions.json"),
+        ImmutableSet.of("node1", "node2", "node3", "node4", "node5", "node6"),
+        CLUSTER_STATE
+    );
     DelegatingClusterStateProvider clusterStateProvider = new DelegatingClusterStateProvider(null) {
       @Override
-      public ClusterState getClusterState() throws IOException {
+      public ClusterState getClusterState() {
         return clusterState;
       }
 
@@ -598,55 +426,9 @@ public class TestPolicy extends SolrTestCaseJ4 {
   }
 
   public void testWithCollectionMoveReplica() {
-    String clusterStateStr = "{" +
-        "  'comments_coll':{" +
-        "    'router': {" +
-        "      'name': 'compositeId'" +
-        "    }," +
-        "    'shards':{" +
-        "       'shard1' : {" +
-        "        'range': '80000000-ffffffff'," +
-        "        'replicas': {" +
-        "          'r1': {" +
-        "            'core': 'r1'," +
-        "            'base_url': 'http://10.0.0.4:8983/solr'," +
-        "            'node_name': 'node1'," +
-        "            'state': 'active'," +
-        "            'leader': 'true'" +
-        "          }" +
-        "         }" +
-        "       }" +
-        "     }," +
-        "    'withCollection' :'articles_coll'" +
-        "  }," +
-        "  'articles_coll': {" +
-        "    'router': {" +
-        "      'name': 'compositeId'" +
-        "    }," +
-        "    'shards': {" +
-        "      'shard1': {" +
-        "        'range': '80000000-ffffffff'," +
-        "        'replicas': {" +
-        "          'r1': {" +
-        "            'core': 'r1'," +
-        "            'base_url': 'http://10.0.0.4:8983/solr'," +
-        "            'node_name': 'node1'," +
-        "            'state': 'active'," +
-        "            'leader': 'true'" +
-        "          }," +
-        "          'r2': {" +
-        "            'core': 'r2'," +
-        "            'base_url': 'http://10.0.0.4:7574/solr'," +
-        "            'node_name': 'node2'," +
-        "            'state': 'active'" +
-        "          }" +
-        "        }" +
-        "      }" +
-        "    }" +
-        "  }" +
-        "}";
-    ClusterState clusterState = ClusterState.load(1, clusterStateStr.getBytes(UTF_8),
-        ImmutableSet.of("node2", "node3", "node4", "node5"));
+    ClusterState clusterState = ClusterState.load(1,
+        (Map) loadFromResource("testWithCollectionMoveReplica.json"),
+        ImmutableSet.of("node2", "node3", "node4", "node5"), CLUSTER_STATE);
     DelegatingClusterStateProvider clusterStateProvider = new DelegatingClusterStateProvider(null) {
       @Override
       public ClusterState getClusterState() throws IOException {
@@ -1342,7 +1124,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
         return new DelegatingClusterStateProvider(null) {
           @Override
           public ClusterState getClusterState() throws IOException {
-            return ClusterState.load(0,new HashMap<>(), getLiveNodes(),"/clusterstate.json");
+            return ClusterState.load(0, new HashMap<>(), getLiveNodes(), CLUSTER_STATE);
           }
 
           @Override
@@ -1444,7 +1226,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
         "}");
     Policy policy = new Policy(new HashMap<>());
     Suggester suggester = policy.createSession(getSolrCloudManager(nodeValues,
-        (Map<String, Object>) TestPolicy2.loadFromResource("testMoveReplicasInMultipleCollections.json")))
+        (Map<String, Object>) loadFromResource("testMoveReplicasInMultipleCollections.json")))
         .getSuggester(MOVEREPLICA)
         .hint(Hint.COLL, "collection1")
         .hint(Hint.COLL, "collection2")
@@ -2189,29 +1971,13 @@ public class TestPolicy extends SolrTestCaseJ4 {
   }
 
   public void testMoveReplicaSuggester() {
-    String dataproviderdata = "{" +
-        "  'liveNodes':[" +
-        "    '10.0.0.6:7574_solr'," +
-        "    '10.0.0.6:8983_solr']," +
-        "  'replicaInfo':{" +
-        "    '10.0.0.6:7574_solr':{}," +
-        "    '10.0.0.6:8983_solr':{'mycoll1':{" +
-        "        'shard2':[{'core_node2':{'type':'NRT'}}]," +
-        "        'shard1':[{'core_node1':{'type':'NRT'}}]}}}," +
-        "  'nodeValues':{" +
-        "    '10.0.0.6:7574_solr':{" +
-        "      'node':'10.0.0.6:7574_solr'," +
-        "      'cores':0}," +
-        "    '10.0.0.6:8983_solr':{" +
-        "      'node':'10.0.0.6:8983_solr'," +
-        "      'cores':2}}}";
     String autoScalingjson = "  '{cluster-policy':[" +
         "    {      'cores':'<10',      'node':'#ANY'}," +
         "    {      'replica':'<2',      'shard':'#EACH',      'node':'#ANY'}," +
         "    {      'nodeRole':'overseer','replica':0}]," +
         "  'cluster-preferences':[{'minimize':'cores'}]}";
     Policy policy = new Policy((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
-    Policy.Session session = policy.createSession(cloudManagerWithData(dataproviderdata));
+    Policy.Session session = policy.createSession(cloudManagerWithData((Map) loadFromResource("testMoveReplicaSuggester.json")));
     Suggester suggester = session.getSuggester(MOVEREPLICA).hint(Hint.TARGET_NODE, "10.0.0.6:7574_solr");
     SolrRequest op = suggester.getSuggestion();
     assertNotNull(op);
@@ -2221,26 +1987,6 @@ public class TestPolicy extends SolrTestCaseJ4 {
   }
 
   public void testComputePlanAfterNodeAdded() {
-
-    String dataproviderdata = "{" +
-        "     liveNodes:[" +
-        "       '127.0.0.1:51078_solr'," +
-        "       '127.0.0.1:51147_solr']," +
-        "     replicaInfo:{" +
-        "       '127.0.0.1:51147_solr':{}," +
-        "       '127.0.0.1:51078_solr':{testNodeAdded:{shard1:[" +
-        "             { core_node3 : { type : NRT}}," +
-        "             { core_node4 : { type : NRT}}]}}}," +
-        "     nodeValues:{" +
-        "       '127.0.0.1:51147_solr':{" +
-        "         node:'127.0.0.1:51147_solr'," +
-        "         cores:0," +
-        "         freedisk : 880.5428657531738}," +
-        "       '127.0.0.1:51078_solr':{" +
-        "         node:'127.0.0.1:51078_solr'," +
-        "         cores:2," +
-        "         freedisk:880.5428695678711}}}";
-
     String autoScalingjson = "cluster-preferences:[" +
         "       {minimize : cores}," +
         "       {'maximize':freedisk , precision:100}],    " +
@@ -2248,7 +1994,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
         "       {replica:'<2', shard:'#EACH',node:'#ANY'}," +
         "       { nodeRole:overseer,replica:0}]}";
     Policy policy = new Policy((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
-    Policy.Session session = policy.createSession(cloudManagerWithData(dataproviderdata));
+    Policy.Session session = policy.createSession(cloudManagerWithData((Map) loadFromResource("testComputePlanAfterNodeAdded.json")));
     Suggester suggester = session.getSuggester(CollectionParams.CollectionAction.MOVEREPLICA)
         .hint(Hint.TARGET_NODE, "127.0.0.1:51147_solr");
     SolrRequest op = suggester.getSuggestion();
@@ -2257,29 +2003,13 @@ public class TestPolicy extends SolrTestCaseJ4 {
   }
 
   public void testReplicaCountSuggestions() {
-    String dataproviderdata = "{" +
-        "  'liveNodes':[" +
-        "    '10.0.0.6:7574_solr'," +
-        "    '10.0.0.6:8983_solr']," +
-        "  'replicaInfo':{" +
-        "    '10.0.0.6:7574_solr':{}," +
-        "    '10.0.0.6:8983_solr':{'mycoll1':{" +
-        "        'shard2':[{'core_node2':{'type':'NRT'}}]," +
-        "        'shard1':[{'core_node1':{'type':'NRT'}}]}}}," +
-        "  'nodeValues':{" +
-        "    '10.0.0.6:7574_solr':{" +
-        "      'node':'10.0.0.6:7574_solr'," +
-        "      'cores':0}," +
-        "    '10.0.0.6:8983_solr':{" +
-        "      'node':'10.0.0.6:8983_solr'," +
-        "      'cores':2}}}";
     String autoScalingjson = "  { cluster-policy:[" +
         "    { cores :'<10', node :'#ANY'}," +
         "    { replica :'<2',  node:'#ANY'}," +
         "    { nodeRole : overseer, replica :0}]," +
         "  cluster-preferences :[{ minimize : cores }]}";
     List<Suggester.SuggestionInfo> l = PolicyHelper.getSuggestions(new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson)),
-        cloudManagerWithData(dataproviderdata));
+        cloudManagerWithData((Map) loadFromResource("testReplicaCountSuggestions.json")));
     assertFalse(l.isEmpty());
 
     assertEquals(1.0d, l.get(0)._get( "violation/violation/delta",null));
@@ -2292,28 +2022,14 @@ public class TestPolicy extends SolrTestCaseJ4 {
 
 
   public void testReplicaPercentage() {
-    String dataproviderdata = "{" +
-        "  'liveNodes':[" +
-        "    '10.0.0.6:7574_solr'," +
-        "    '10.0.0.6:8983_solr']," +
-        "  'replicaInfo':{" +
-        "    '10.0.0.6:7574_solr':{}," +
-        "    '10.0.0.6:8983_solr':{'mycoll1':{" +
-        "        'shard1':[{'core_node1':{'type':'NRT'}},{'core_node2':{'type':'NRT'}},{'core_node3':{'type':'NRT'}}]}}}," +
-        "  'nodeValues':{" +
-        "    '10.0.0.6:7574_solr':{" +
-        "      'node':'10.0.0.6:7574_solr'," +
-        "      'cores':0}," +
-        "    '10.0.0.6:8983_solr':{" +
-        "      'node':'10.0.0.6:8983_solr'," +
-        "      'cores':3}}}";
+    List<Map> l = (List<Map>) loadFromResource("testReplicaPercentage.json");
     String autoScalingjson = "  { cluster-policy:[" +
         "    { replica :'51%', shard:'#EACH', node:'#ANY'}]," +
         "  cluster-preferences :[{ minimize : cores }]}";
 
 
     AutoScalingConfig autoScalingConfig = new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
-    Policy.Session session = autoScalingConfig.getPolicy().createSession(cloudManagerWithData(dataproviderdata));
+    Policy.Session session = autoScalingConfig.getPolicy().createSession(cloudManagerWithData(l.get(0)));
     List<Violation> violations = session.getViolations();
     assertEquals(2, violations.size());
     for (Violation violation : violations) {
@@ -2327,82 +2043,27 @@ public class TestPolicy extends SolrTestCaseJ4 {
     }
 
 
-    dataproviderdata = "{" +
-        "  'liveNodes':[" +
-        "    '10.0.0.6:7574_solr'," +
-        "    '10.0.0.6:8983_solr']," +
-        "  'replicaInfo':{" +
-        "    '10.0.0.6:7574_solr':{}," +
-        "    '10.0.0.6:8983_solr':{'mycoll1':{" +
-        "        'shard2':[{'core_node2':{'type':'NRT'}}]," +
-        "        'shard1':[{'core_node1':{'type':'NRT'}}]}}}," +
-        "  'nodeValues':{" +
-        "    '10.0.0.6:7574_solr':{" +
-        "      'node':'10.0.0.6:7574_solr'," +
-        "      'cores':0}," +
-        "    '10.0.0.6:8983_solr':{" +
-        "      'node':'10.0.0.6:8983_solr'," +
-        "      'cores':2}}}";
-
-    session = autoScalingConfig.getPolicy().createSession(cloudManagerWithData(dataproviderdata));
+    session = autoScalingConfig.getPolicy().createSession(cloudManagerWithData(Utils.getDeepCopy(l.get(1), 6)));
     violations = session.getViolations();
     assertEquals(0, violations.size());
     autoScalingjson = "  { cluster-policy:[" +
         "    { replica :'51%', shard: '#EACH' , node:'#ANY'}]," +
         "  cluster-preferences :[{ minimize : cores }]}";
     autoScalingConfig = new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
-    session = autoScalingConfig.getPolicy().createSession(cloudManagerWithData(dataproviderdata));
+    session = autoScalingConfig.getPolicy().createSession(cloudManagerWithData(l.get(1)));
     violations = session.getViolations();
     assertEquals(0, violations.size());
-
-    dataproviderdata = "{" +
-        "  'liveNodes':[" +
-        "    '10.0.0.6:7574_solr'," +
-        "    '10.0.0.6:8983_solr']," +
-        "  'replicaInfo':{" +
-        "    '10.0.0.6:7574_solr':{}," +
-        "    '10.0.0.6:8983_solr':{'mycoll1':{" +
-        "        'shard1':[{'core_node4':{'type':'PULL'}}]," +
-        "        'shard1':[{'core_node3':{'type':'PULL'}}]," +
-        "        'shard3':[{'core_node2':{'type':'TLOG'}}]," +
-        "        'shard2':[{'core_node1':{'type':'TLOG'}}]}}}," +
-        "  'nodeValues':{" +
-        "    '10.0.0.6:7574_solr':{" +
-        "      'node':'10.0.0.6:7574_solr'," +
-        "      'cores':0}," +
-        "    '10.0.0.6:8983_solr':{" +
-        "      'node':'10.0.0.6:8983_solr'," +
-        "      'cores':2}}}";
     autoScalingjson = "  { cluster-policy:[" +
         "    { replica :'50%',node:'#ANY' , type: TLOG } ,{ replica :'50%',node:'#ANY' , type: PULL } ]," +
         "  cluster-preferences :[{ minimize : cores }]}";
     autoScalingConfig = new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
-    session = autoScalingConfig.getPolicy().createSession(cloudManagerWithData(dataproviderdata));
+    session = autoScalingConfig.getPolicy().createSession(cloudManagerWithData(l.get(2)));
     violations = session.getViolations();
     assertEquals(2, violations.size());
 
   }
 
   public void testReplicaZonesPercentage() {
-    String dataproviderdata = "{" +
-        "  'liveNodes':[" +
-        "    '10.0.0.6:7574_solr'," +
-        "    '10.0.0.6:8983_solr']," +
-        "  'replicaInfo':{" +
-        "    '10.0.0.6:7574_solr':{}," +
-        "    '10.0.0.6:8983_solr':{}}," +
-        "  'nodeValues':{" +
-        "    '10.0.0.6:7574_solr':{" +
-        "      'node':'10.0.0.6:7574_solr'," +
-        "      'cores':0," +
-        "      'sysprop.az': 'west'" +
-        "    }," +
-        "    '10.0.0.6:8983_solr':{" +
-        "      'node':'10.0.0.6:8983_solr'," +
-        "      'cores':0," +
-        "      'sysprop.az': 'east'    " +
-        "    }}}";
-
     String autoScalingjson = "  { cluster-policy:[" +
         "    { replica :'33%', shard: '#EACH', sysprop.az : east}," +
         "    { replica :'67%', shard: '#EACH', sysprop.az : west}" +
@@ -2413,7 +2074,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
     AutoScalingConfig autoScalingConfig = new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
 
     Policy.Transaction txn = new Policy.Transaction(autoScalingConfig.getPolicy());
-    txn.open(cloudManagerWithData(dataproviderdata));
+    txn.open(cloudManagerWithData((Map<String, Object>) loadFromResource("testReplicaZonesPercentage.json")));
 
     List<String> nodes = new ArrayList<>();
 
@@ -2448,8 +2109,9 @@ public class TestPolicy extends SolrTestCaseJ4 {
     assertEquals(4, count.get());
 
   }
-  public void testFreeDiskDeviation() throws IOException {
-    Map map = (Map) TestPolicy2.loadFromResource("testFreeDiskDeviation.json");
+
+  public void testFreeDiskDeviation() {
+    Map map = (Map) loadFromResource("testFreeDiskDeviation.json");
     AutoScalingConfig cfg = new AutoScalingConfig((Map<String, Object>) map.get("config"));
     SolrCloudManager scm = cloudManagerWithData(map);
     Suggester suggester = cfg.getPolicy()
@@ -2483,26 +2145,12 @@ public class TestPolicy extends SolrTestCaseJ4 {
 
 
   public void testFreeDiskSuggestions() {
-    String dataproviderdata = "{" +
-        "  liveNodes:[node1,node2]," +
-        "  replicaInfo : {" +
-        "    node1:{}," +
-        "    node2:{mycoll1:{" +
-        "        shard1:[{r1:{type:NRT, INDEX.sizeInGB:900}}]," +
-        "        shard2:[{r2:{type:NRT, INDEX.sizeInGB:300}}]," +
-        "        shard3:[{r3:{type:NRT, INDEX.sizeInGB:200}}]," +
-        "        shard4:[{r4:{type:NRT, INDEX.sizeInGB:100}}]}}}" +
-        "    nodeValues : {" +
-        "    node1: { node : node1 , cores:0 , freedisk : 2000}," +
-        "    node2: { node : node2 , cores:4 , freedisk : 500}}}";
-
-
     String autoScalingjson = "  { cluster-policy:[" +
         "    { replica :'0', freedisk:'<1000'}," +
         "    { nodeRole : overseer, replica :0}]," +
         "  cluster-preferences :[{ minimize : cores, precision : 2 }]}";
     AutoScalingConfig cfg = new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
-    List<Violation> violations = cfg.getPolicy().createSession(cloudManagerWithData(dataproviderdata)).getViolations();
+    List<Violation> violations = cfg.getPolicy().createSession(cloudManagerWithData((Map) loadFromResource("testFreeDiskSuggestions.json"))).getViolations();
     assertEquals(1, violations.size());
     assertEquals(4, violations.get(0).getViolatingReplicas().size());
     assertEquals(4, violations.get(0).replicaCountDelta, 0.1);
@@ -2511,7 +2159,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
 
     }
 
-    List<Suggester.SuggestionInfo> l = PolicyHelper.getSuggestions(cfg, cloudManagerWithData(dataproviderdata));
+    List<Suggester.SuggestionInfo> l = PolicyHelper.getSuggestions(cfg, cloudManagerWithData((Map) loadFromResource("testFreeDiskSuggestions.json")));
     assertEquals(3, l.size());
     assertEquals("r4", l.get(0)._get("operation/command/move-replica/replica", null));
     assertEquals("node1", l.get(0)._get("operation/command/move-replica/targetNode", null));
@@ -2528,13 +2176,13 @@ public class TestPolicy extends SolrTestCaseJ4 {
         "    { nodeRole : overseer, replica :0}]," +
         "  cluster-preferences :[{ minimize : cores, precision : 2 }]}";
     cfg = new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
-    violations = cfg.getPolicy().createSession(cloudManagerWithData(dataproviderdata)).getViolations();
+    violations = cfg.getPolicy().createSession(cloudManagerWithData((Map) loadFromResource("testFreeDiskSuggestions.json"))).getViolations();
     assertEquals(1, violations.size());
     assertEquals(-4, violations.get(0).replicaCountDelta, 0.1);
     assertEquals(1, violations.size());
     assertEquals(0, violations.get(0).getViolatingReplicas().size());
 
-    l = PolicyHelper.getSuggestions(cfg, cloudManagerWithData(dataproviderdata));
+    l = PolicyHelper.getSuggestions(cfg, cloudManagerWithData((Map) loadFromResource("testFreeDiskSuggestions.json")));
     assertEquals(4, l.size());
     assertEquals("r4", l.get(0)._get("operation/command/move-replica/replica", null));
     assertEquals("node1", l.get(0)._get("operation/command/move-replica/targetNode", null));
@@ -2552,34 +2200,16 @@ public class TestPolicy extends SolrTestCaseJ4 {
 
 
   public void testCoresSuggestions() {
-    String dataproviderdata = "{" +
-        "  'liveNodes':[" +
-        "    '10.0.0.6:7574_solr'," +
-        "    '10.0.0.6:8983_solr']," +
-        "  'replicaInfo':{" +
-        "    '10.0.0.6:7574_solr':{}," +
-        "    '10.0.0.6:8983_solr':{'mycoll1':{" +
-        "        'shard1':[{'core_node1':{'type':'NRT'}}]," +
-        "        'shard2':[{'core_node2':{'type':'NRT'}}]," +
-        "        'shard3':[{'core_node3':{'type':'NRT'}}]," +
-        "        'shard4':[{'core_node4':{'type':'NRT'}}]}}}," +
-        "  'nodeValues':{" +
-        "    '10.0.0.6:7574_solr':{" +
-        "      'node':'10.0.0.6:7574_solr'," +
-        "      'cores':0}," +
-        "    '10.0.0.6:8983_solr':{" +
-        "      'node':'10.0.0.6:8983_solr'," +
-        "      'cores':4}}}";
     String autoScalingjson = "  { cluster-policy:[" +
         "    { cores :'<3', node :'#ANY'}]," +
         "  cluster-preferences :[{ minimize : cores }]}";
     AutoScalingConfig cfg = new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
-    List<Violation> violations = cfg.getPolicy().createSession(cloudManagerWithData(dataproviderdata)).getViolations();
+    List<Violation> violations = cfg.getPolicy().createSession(cloudManagerWithData((Map) loadFromResource("testCoresSuggestions.json"))).getViolations();
     assertFalse(violations.isEmpty());
     assertEquals(2L, violations.get(0).replicaCountDelta.longValue());
 
     List<Suggester.SuggestionInfo> l = PolicyHelper.getSuggestions(cfg,
-        cloudManagerWithData(dataproviderdata));
+        cloudManagerWithData((Map) loadFromResource("testCoresSuggestions.json")));
     assertEquals(2, l.size());
     for (Suggester.SuggestionInfo suggestionInfo : l) {
       assertEquals("10.0.0.6:7574_solr", suggestionInfo._get("operation/command/move-replica/targetNode", null));
@@ -2589,7 +2219,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
 
   }
 
-  public void testSyspropSuggestions() {
+  public void testSyspropSuggestions1() {
     String autoScalingjson = "{" +
         "  'cluster-preferences': [" +
         "    { 'maximize': 'freedisk', 'precision': 50}," +
@@ -2601,28 +2231,10 @@ public class TestPolicy extends SolrTestCaseJ4 {
         "}";
 
 
-    String dataproviderdata = "{" +
-        "  'liveNodes': [" +
-        "    'node1'," +
-        "    'node2'," +
-        "    'node3'" +
-        "  ]," +
-        "  'replicaInfo': {" +
-        "    'node1': {" +
-        "      'c1': {'s1': [{'r1': {'type': 'NRT'}, 'r2': {'type': 'NRT'}}]," +
-        "             's2': [{'r1': {'type': 'NRT'}, 'r2': {'type': 'NRT'}}]}," +
-        "    }" +
-        "  }," +
-        "    'nodeValues': {" +
-        "      'node1': {'cores': 2, 'freedisk': 334, 'sysprop.fs': 'slowdisk'}," +
-        "      'node2': {'cores': 2, 'freedisk': 749, 'sysprop.fs': 'slowdisk'}," +
-        "      'node3': {'cores': 0, 'freedisk': 262, 'sysprop.fs': 'ssd'}" +
-        "    }" +
-        "}";
     AutoScalingConfig cfg = new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
-    List<Violation> violations = cfg.getPolicy().createSession(cloudManagerWithData(dataproviderdata)).getViolations();
+    List<Violation> violations = cfg.getPolicy().createSession(cloudManagerWithData((Map) loadFromResource("testSyspropSuggestions1.json"))).getViolations();
     assertEquals("expected 2 violations", 2, violations.size());
-    List<Suggester.SuggestionInfo> suggestions = PolicyHelper.getSuggestions(cfg, cloudManagerWithData(dataproviderdata));
+    List<Suggester.SuggestionInfo> suggestions = PolicyHelper.getSuggestions(cfg, cloudManagerWithData((Map) loadFromResource("testSyspropSuggestions1.json")));
     assertEquals(2, suggestions.size());
     for (Suggester.SuggestionInfo suggestion : suggestions) {
       suggestion._get("operation/move-replica/targetNode", null);
@@ -2640,49 +2252,10 @@ public class TestPolicy extends SolrTestCaseJ4 {
         "  ]" +
         "}";
 
-
-    String dataproviderdata = "{" +
-        "  'liveNodes': [" +
-        "    'node1:8983'," +
-        "    'node2:8984'," +
-        "    'node3:8985'" +
-        "  ]," +
-        "  'replicaInfo': {" +
-        "    'node1:8983': {" +
-        "      'c1': {" +
-        "        's1': [" +
-        "          {'r1': {'type': 'NRT'}}," +
-        "          {'r2': {'type': 'NRT'}}" +
-        "        ]," +
-        "        's2': [" +
-        "          {'r1': {'type': 'NRT'}}," +
-        "          {'r2': {'type': 'NRT'}}" +
-        "        ]" +
-        "      }" +
-        "    }" +
-        "  }," +
-        "  'nodeValues': {" +
-        "    'node1:8983': {" +
-        "      'cores': 4," +
-        "      'freedisk': 334," +
-        "      'port': 8983" +
-        "    }," +
-        "    'node2:8984': {" +
-        "      'cores': 0," +
-        "      'freedisk': 1000," +
-        "      'port': 8984" +
-        "    }," +
-        "    'node3:8985': {" +
-        "      'cores': 0," +
-        "      'freedisk': 1500," +
-        "      'port': 8985" +
-        "    }" +
-        "  }" +
-        "}";
     AutoScalingConfig cfg = new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
-    List<Violation> violations = cfg.getPolicy().createSession(cloudManagerWithData(dataproviderdata)).getViolations();
+    List<Violation> violations = cfg.getPolicy().createSession(cloudManagerWithData((Map) loadFromResource("testPortSuggestions.json"))).getViolations();
     assertEquals(2, violations.size());
-    List<Suggester.SuggestionInfo> suggestions = PolicyHelper.getSuggestions(cfg, cloudManagerWithData(dataproviderdata));
+    List<Suggester.SuggestionInfo> suggestions = PolicyHelper.getSuggestions(cfg, cloudManagerWithData((Map) loadFromResource("testPortSuggestions.json")));
     assertEquals(4, suggestions.size());
     for (Suggester.SuggestionInfo suggestionInfo : suggestions) {
       assertEquals(suggestionInfo.operation.getPath(), "/c/c1");
@@ -2690,33 +2263,13 @@ public class TestPolicy extends SolrTestCaseJ4 {
   }
 
   public void testDiskSpaceHint() {
-
-    String dataproviderdata = "{" +
-        "     liveNodes:[" +
-        "       '127.0.0.1:51078_solr'," +
-        "       '127.0.0.1:51147_solr']," +
-        "     replicaInfo:{" +
-        "       '127.0.0.1:51147_solr':{}," +
-        "       '127.0.0.1:51078_solr':{testNodeAdded:{shard1:[" +
-        "             { core_node3 : { type : NRT}}," +
-        "             { core_node4 : { type : NRT}}]}}}," +
-        "     nodeValues:{" +
-        "       '127.0.0.1:51147_solr':{" +
-        "         node:'127.0.0.1:51147_solr'," +
-        "         cores:0," +
-        "         freedisk : 100}," +
-        "       '127.0.0.1:51078_solr':{" +
-        "         node:'127.0.0.1:51078_solr'," +
-        "         cores:2," +
-        "         freedisk:200}}}";
-
     String autoScalingjson = "cluster-preferences:[" +
         "       {minimize : cores}]" +
         " cluster-policy:[{cores:'<10',node:'#ANY'}," +
         "       {replica:'<2', shard:'#EACH',node:'#ANY'}," +
         "       { nodeRole:overseer,replica:0}]}";
     Policy policy = new Policy((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
-    Policy.Session session = policy.createSession(cloudManagerWithData(dataproviderdata));
+    Policy.Session session = policy.createSession(cloudManagerWithData((Map) loadFromResource("testDiskSpaceHint.json")));
     Suggester suggester = session.getSuggester(CollectionAction.ADDREPLICA)
         .hint(Hint.COLL_SHARD, new Pair<>("coll1", "shard1"))
         .hint(Hint.MINFREEDISK, 150);
@@ -2840,7 +2393,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
   }
 
   public void testScheduledTriggerFailure() throws Exception {
-    Map jsonObj = (Map) TestPolicy2.loadFromResource("testScheduledTriggerFailure.json");
+    Map jsonObj = (Map) loadFromResource("testScheduledTriggerFailure.json");
     SolrCloudManager cloudManager = createCloudManager(jsonObj);
     Suggester suggester = createSuggester(cloudManager, jsonObj, null);
     int count = 0;
@@ -2856,7 +2409,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
   }
 
   public void testUtilizeNodeFailure() throws Exception {
-    Map jsonObj = (Map) TestPolicy2.loadFromResource("testUtilizeNodeFailure.json"); //(Map) Utils.fromJSONString(state);
+    Map jsonObj = (Map) loadFromResource("testUtilizeNodeFailure.json"); //(Map) Utils.fromJSONString(state);
     SolrCloudManager cloudManager = createCloudManager(jsonObj);
     Suggester suggester = createSuggester(cloudManager, jsonObj, null);
     int count = 0;
@@ -2872,7 +2425,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
   }
 
   public void testUtilizeNodeFailure2() throws Exception {
-    Map jsonObj = (Map) TestPolicy2.loadFromResource("testUtilizeNodeFailure2.json");
+    Map jsonObj = (Map) loadFromResource("testUtilizeNodeFailure2.json");
     SolrCloudManager cloudManager = createCloudManager(jsonObj);
     Suggester suggester = createSuggester(cloudManager, jsonObj, null);
     int count = 0;
@@ -2888,12 +2441,12 @@ public class TestPolicy extends SolrTestCaseJ4 {
   }
 
   //SOLR-12358
-  public void testSortError() throws IOException {
+  public void testSortError() {
     Policy policy = new Policy((Map<String, Object>) Utils.fromJSONString("{cluster-preferences: [{minimize : cores, precision:1}, " +
         "{maximize : freedisk, precision: 50}, " +
         "{minimize: sysLoadAvg}]}"));
 
-    List l = (List) TestPolicy2.loadFromResource("testSortError.json");
+    List l = (List) loadFromResource("testSortError.json");
     List<Variable.Type> params = new ArrayList<>();
     params.add(CORES);
     params.add(Variable.Type.FREEDISK);
@@ -2947,47 +2500,8 @@ public class TestPolicy extends SolrTestCaseJ4 {
         "  ]" +
         "}";
 
-
-    String dataproviderdata = "{" +
-        "  'liveNodes': [" +
-        "    'node1:8983'," +
-        "    'node2:8984'," +
-        "    'node3:8985'" +
-        "  ]," +
-        "  'replicaInfo': {" +
-        "    'node1:8983': {" +
-        "      'c1': {" +
-        "        's1': [" +
-        "          {'r1': {'type': 'NRT'}}," +
-        "          {'r2': {'type': 'NRT'}}" +
-        "        ]," +
-        "        's2': [" +
-        "          {'r1': {'type': 'NRT'}}," +
-        "          {'r2': {'type': 'NRT'}}" +
-        "        ]" +
-        "      }" +
-        "    }" +
-        "  }," +
-        "  'nodeValues': {" +
-        "    'node1:8983': {" +
-        "      'cores': 4," +
-        "      'freedisk': 334," +
-        "      'port': 8983" +
-        "    }," +
-        "    'node2:8984': {" +
-        "      'cores': 0," +
-        "      'freedisk': 1000," +
-        "      'port': 8984" +
-        "    }," +
-        "    'node3:8985': {" +
-        "      'cores': 0," +
-        "      'freedisk': 1500," +
-        "      'port': 8985" +
-        "    }" +
-        "  }" +
-        "}";
     AutoScalingConfig cfg = new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
-    List<Violation> violations = cfg.getPolicy().createSession(cloudManagerWithData((Map) Utils.fromJSONString(dataproviderdata))).getViolations();
+    List<Violation> violations = cfg.getPolicy().createSession(cloudManagerWithData((Map) loadFromResource("testViolationOutput.json"))).getViolations();
     StringWriter writer = new StringWriter();
     NamedList<Object> val = new NamedList<>();
     val.add("violations", violations);
@@ -3003,47 +2517,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
 
 
   public void testFreediskPercentage() {
-    String dataproviderdata = "{" +
-        "  'liveNodes': [" +
-        "    'node1:8983'," +
-        "    'node2:8984'," +
-        "    'node3:8985'" +
-        "  ]," +
-        "  'replicaInfo': {" +
-        "    'node1:8983': {" +
-        "      'c1': {" +
-        "        's1': [" +
-        "          {'r1': {'type': 'NRT'}}," +
-        "          {'r2': {'type': 'NRT'}}" +
-        "        ]," +
-        "        's2': [" +
-        "          {'r1': {'type': 'NRT'}}," +
-        "          {'r2': {'type': 'NRT'}}" +
-        "        ]" +
-        "      }" +
-        "    }" +
-        "  }," +
-        "  'nodeValues': {" +
-        "    'node1:8983': {" +
-        "      'cores': 4," +
-        "      'freedisk': 230," +
-        "      'totaldisk': 800," +
-        "      'port': 8983" +
-        "    }," +
-        "    'node2:8984': {" +
-        "      'cores': 0," +
-        "      'freedisk': 1000," +
-        "      'totaldisk': 1200," +
-        "      'port': 8984" +
-        "    }," +
-        "    'node3:8985': {" +
-        "      'cores': 0," +
-        "      'freedisk': 1500," +
-        "      'totaldisk': 1700," +
-        "      'port': 8985" +
-        "    }" +
-        "  }" +
-        "}";
+
     String autoScalingjson = "{" +
         "  'cluster-preferences': [" +
         "    { 'maximize': 'freedisk', 'precision': 50}," +
@@ -3054,7 +2528,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
         "  ]" +
         "}";
     AutoScalingConfig cfg = new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
-    List<Violation> violations = cfg.getPolicy().createSession(cloudManagerWithData((Map) Utils.fromJSONString(dataproviderdata))).getViolations();
+    List<Violation> violations = cfg.getPolicy().createSession(cloudManagerWithData((Map) loadFromResource("testFreediskPercentage.json"))).getViolations();
     assertEquals(1, violations.size());
     assertEquals(4, violations.get(0).getViolatingReplicas().size());
     for (Violation.ReplicaInfoAndErr r : violations.get(0).getViolatingReplicas()) {
@@ -3070,7 +2544,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
         "  ]" +
         "}";
     cfg = new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
-    violations = cfg.getPolicy().createSession(cloudManagerWithData((Map) Utils.fromJSONString(dataproviderdata))).getViolations();
+    violations = cfg.getPolicy().createSession(cloudManagerWithData((Map) loadFromResource("testFreediskPercentage.json"))).getViolations();
     assertEquals(1, violations.size());
     assertEquals(-4d, violations.get(0).replicaCountDelta, 0.01);
     for (Violation.ReplicaInfoAndErr r : violations.get(0).getViolatingReplicas()) {
@@ -3080,88 +2554,8 @@ public class TestPolicy extends SolrTestCaseJ4 {
   }
 
   public void testAutoscalingPreferencesUsedWithNoPolicy() throws IOException, InterruptedException {
-    String dataproviderdata = "{" +
-        "  'liveNodes': [" +
-        "    'node1:8983'," +
-        "    'node2:8984'," +
-        "    'node3:8985'" +
-        "  ]," +
-        "  'replicaInfo': {" +
-        "    'node1:8983': {" +
-        "      'c1': {" +
-        "        's1': [" +
-        "          {'r1': {'type': 'NRT', 'INDEX.sizeInGB':'1100'}}," +
-        "          {'r2': {'type': 'NRT'}}" +
-        "        ]," +
-        "        's2': [" +
-        "          {'r1': {'type': 'NRT', 'INDEX.sizeInGB':'1100'}}," +
-        "          {'r2': {'type': 'NRT'}}" +
-        "        ]" +
-        "      }" +
-        "    }" +
-        "  }," +
-        "  'nodeValues': {" +
-        "    'node1:8983': {" +
-        "      'cores': 4," +
-        "      'freedisk': 300," +
-        "      'totaldisk': 4700," +
-        "      'port': 8983" +
-        "    }," +
-        "    'node2:8984': {" +
-        "      'cores': 0," +
-        "      'freedisk': 1000," +
-        "      'totaldisk': 1200," +
-        "      'port': 8984" +
-        "    }," +
-        "    'node3:8985': {" +
-        "      'cores': 0," +
-        "      'freedisk': 1651," +
-        "      'totaldisk': 1700," +
-        "      'port': 8985" +
-        "    }" +
-        "  }" +
-        "}";
-
-    String clusterState = "{\n" +
-        "  \"c1\" : {\n" +
-        "    \"router\":{\"name\":\"compositeId\"},\n" +
-        "    \"maxShardsPerNode\":-1,\n" +
-        "    \"shards\" : {\n" +
-        "      \"s1\" :  {\n" +
-        "        \"replicas\" : {\n" +
-        "          \"r1\" : {\n" +
-        "            \"type\" : \"NRT\",\n" +
-        "            \"node_name\" : \"node1:8983\",\n" +
-        "            \"state\" : \"active\",\n" +
-        "            \"leader\" : \"true\"\n" +
-        "          },\n" +
-        "          \"r2\" : {\n" +
-        "            \"type\" : \"NRT\",\n" +
-        "            \"node_name\" : \"node1:8983\",\n" +
-        "            \"state\" : \"active\"\n" +
-        "          }\n" +
-        "        }\n" +
-        "      },\n" +
-        "      \"s2\" : {\n" +
-        "        \"replicas\" : {\n" +
-        "          \"r1\" : {\n" +
-        "            \"type\" : \"NRT\",\n" +
-        "            \"node_name\" : \"node1:8983\",\n" +
-        "            \"state\" : \"active\",\n" +
-        "            \"leader\" : \"true\"\n" +
-        "          },\n" +
-        "          \"r2\" : {\n" +
-        "            \"type\" : \"NRT\",\n" +
-        "            \"node_name\" : \"node1:8983\",\n" +
-        "            \"state\" : \"active\"\n" +
-        "          }\n" +
-        "        }\n" +
-        "      }\n" +
-        "    }\n" +
-        "  }\n" +
-        "}";
-
-    Map m = (Map) Utils.fromJSONString(dataproviderdata);
+    Map m = (Map) loadFromResource("testAutoscalingPreferencesUsedWithNoPolicy.json");
+    Map clusterState = (Map) m.remove("clusterstate");
 
     Map replicaInfo = (Map) m.get("replicaInfo");
     replicaInfo.forEach((node, val) -> {
@@ -3206,7 +2600,7 @@ public class TestPolicy extends SolrTestCaseJ4 {
 
           @Override
           public ClusterState getClusterState() throws IOException {
-            return ClusterState.load(0, clusterState.getBytes(Charset.forName("UTF-8")), getLiveNodes(), ZkStateReader.getCollectionPath("c1"));
+            return ClusterState.load(0, clusterState, getLiveNodes(), ZkStateReader.getCollectionPath("c1"));
           }
         };
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f1a30bfb/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
index afc5540..c2ce4fa 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy2.java
@@ -54,82 +54,12 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
   public void testEqualOnNonNode() {
-    String state = "{" +
-        "  'coll1': {" +
-        "    'router': {" +
-        "      'name': 'compositeId'" +
-        "    }," +
-        "    'shards': {" +
-        "      'shard1': {" +
-        "        'range': '80000000-ffffffff'," +
-        "        'replicas': {" +
-        "          'r1': {" +//east
-        "            'core': 'r1'," +
-        "            'base_url': 'http://10.0.0.4:8983/solr'," +
-        "            'node_name': 'node1'," +
-        "            'state': 'active'" +
-        "          }," +
-        "          'r2': {" +//west
-        "            'core': 'r2'," +
-        "            'base_url': 'http://10.0.0.4:7574/solr'," +
-        "            'node_name': 'node2'," +
-        "            'state': 'active'" +
-        "          }" +
-        "        }" +
-        "      }," +
-        "      'shard2': {" +
-        "        'range': '0-7fffffff'," +
-        "        'replicas': {" +
-        "          'r3': {" +//east
-        "            'core': 'r3'," +
-        "            'base_url': 'http://10.0.0.4:8983/solr'," +
-        "            'node_name': 'node1'," +
-        "            'state': 'active'" +
-        "          }," +
-        "          'r4': {" +//west
-        "            'core': 'r4'," +
-        "            'base_url': 'http://10.0.0.4:8987/solr'," +
-        "            'node_name': 'node4'," +
-        "            'state': 'active'" +
-        "          }," +
-        "          'r6': {" +//east
-        "            'core': 'r6'," +
-        "            'base_url': 'http://10.0.0.4:8989/solr'," +
-        "            'node_name': 'node3'," +
-        "            'state': 'active'" +
-        "          }," +
-        "          'r5': {" +//east
-        "            'core': 'r5'," +
-        "            'base_url': 'http://10.0.0.4:8983/solr'," +
-        "            'node_name': 'node1'," +
-        "            'state': 'active'" +
-        "          }" +
-        "        }" +
-        "      }" +
-        "    }" +
-        "  }" +
-        "}";
-    String metaData =
-        "  {'nodeValues':{" +
-            "    'node1':{'cores' : 3, 'freedisk' : 700, 'totaldisk' :1000, 'sysprop.zone' : 'east'}," +
-            "    'node2':{'cores' : 1, 'freedisk' : 900, 'totaldisk' :1000, 'sysprop.zone' : 'west'}," +
-            "    'node3':{'cores' : 1, 'freedisk' : 900, 'totaldisk' :1000, 'sysprop.zone': 'east'}," +
-            "    'node4':{'cores' : 1, 'freedisk' : 900, 'totaldisk' :1000, 'sysprop.zone': 'west'}," +
-            "    'node5':{'cores' : 0, 'freedisk' : 1000, 'totaldisk' :1000, 'sysprop.zone': 'west'}" +
-            "  }," +
-            "  'replicaValues':[" +
-            "    {'INDEX.sizeInGB': 100, core : r1}," +
-            "    {'INDEX.sizeInGB': 100, core : r2}," +
-            "    {'INDEX.sizeInGB': 100, core : r3}," +
-            "    {'INDEX.sizeInGB': 100, core : r4}," +
-            "    {'INDEX.sizeInGB': 100, core : r5}," +
-            "    {'INDEX.sizeInGB': 100, core : r6}]}";
-
+    List<Map> l = (List<Map>) loadFromResource("testEqualOnNonNode.json");
     String autoScalingjson = "{cluster-policy:[" +
         "    { replica : '<3' , shard : '#EACH', sysprop.zone: [east,west] } ]," +
         "  'cluster-preferences':[{ minimize : cores},{maximize : freedisk, precision : 50}]}";
     Policy policy = new Policy((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
-    Policy.Session session = policy.createSession(createCloudManager(state, metaData));
+    Policy.Session session = policy.createSession(createCloudManager(l.get(0), l.get(1)));
     List<Violation> violations = session.getViolations();
     assertEquals(1, violations.size());
     assertEquals(3, violations.get(0).getViolatingReplicas().size());
@@ -138,11 +68,12 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
       assertEquals("shard2", r.replicaInfo.getShard());
     }
 
+    l = (List<Map>) loadFromResource("testEqualOnNonNode.json");
     autoScalingjson = "{cluster-policy:[" +
         "    { replica : '<3' , shard : '#EACH', sysprop.zone: '#EACH' } ]," +
         "  'cluster-preferences':[{ minimize : cores},{maximize : freedisk, precision : 50}]}";
     policy = new Policy((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
-    session = policy.createSession(createCloudManager(state, metaData));
+    session = policy.createSession(createCloudManager(l.get(0), l.get(1)));
     violations = session.getViolations();
     assertEquals(1, violations.size());
     assertEquals(3, violations.get(0).getViolatingReplicas().size());
@@ -150,11 +81,12 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
     for (Violation.ReplicaInfoAndErr r : violations.get(0).getViolatingReplicas()) {
       assertEquals("shard2", r.replicaInfo.getShard());
     }
+    l = (List<Map>) loadFromResource("testEqualOnNonNode.json");
     autoScalingjson = "{cluster-policy:[" +
         "    { replica : '#EQUAL' , node: '#ANY' } ]," +
         "  'cluster-preferences':[{ minimize : cores},{maximize : freedisk, precision : 50}]}";
     policy = new Policy((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
-    session = policy.createSession(createCloudManager(state, metaData));
+    session = policy.createSession(createCloudManager(l.get(0), l.get(1)));
     violations = session.getViolations();
     List<Suggester.SuggestionInfo> suggestions = null;
     assertEquals(2, violations.size());
@@ -169,8 +101,9 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
         fail();
       }
     }
+    l = (List<Map>) loadFromResource("testEqualOnNonNode.json");
     suggestions = PolicyHelper.getSuggestions(new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson))
-        , createCloudManager(state, metaData));
+        , createCloudManager(l.get(0), l.get(1)));
     assertEquals(1, suggestions.size());
     String repName = (String) suggestions.get(0)._get("operation/command/move-replica/replica", null);
 
@@ -182,11 +115,12 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
     });
     assertTrue(found.get());
 
+    l = (List<Map>) loadFromResource("testEqualOnNonNode.json");
     autoScalingjson = "{cluster-policy:[" +
         "    { cores : '#EQUAL' , node: '#ANY' } ]," +
         "  'cluster-preferences':[{ minimize : cores},{minimize : freedisk, precision : 50}]}";
     policy = new Policy((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
-    session = policy.createSession(createCloudManager(state, metaData));
+    session = policy.createSession(createCloudManager(l.get(0), l.get(1)));
     violations = session.getViolations();
     assertEquals(2, violations.size());
     for (Violation violation : violations) {
@@ -201,9 +135,9 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
       }
 
     }
-
+    l = (List<Map>) loadFromResource("testEqualOnNonNode.json");
     suggestions = PolicyHelper.getSuggestions(new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson)),
-        createCloudManager(state, metaData));
+        createCloudManager(l.get(0), l.get(1)));
     assertEquals(1, suggestions.size());
     assertEquals("node5", suggestions.get(0)._get("operation/command/move-replica/targetNode", null));
 
@@ -219,9 +153,7 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
 
   }
 
-  static SolrCloudManager createCloudManager(String clusterStateStr, String metadata) {
-    Map m = (Map) Utils.fromJSONString(clusterStateStr);
-    Map meta = (Map) Utils.fromJSONString(metadata);
+  static SolrCloudManager createCloudManager(Map m, Map meta) {
     Map nodeVals = (Map) meta.get("nodeValues");
     List<Map> replicaVals = (List<Map>) meta.get("replicaValues");
     ClusterState clusterState = ClusterState.load(0, m, Collections.emptySet(), null);
@@ -467,9 +399,11 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
 
   }
 
-  public static Object loadFromResource(String file) throws IOException {
+  public static Object loadFromResource(String file)  {
     try (InputStream is = TestPolicy2.class.getResourceAsStream("/solrj/solr/autoscaling/" + file)) {
       return Utils.fromJSON(is);
+    } catch (IOException e) {
+      throw new RuntimeException(e);
     }
   }
 


[33/50] [abbrv] lucene-solr:jira/http2: SOLR-12028: BadApple and AwaitsFix annotations usage

Posted by da...@apache.org.
SOLR-12028: BadApple and AwaitsFix annotations usage


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/6c5df58b
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/6c5df58b
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/6c5df58b

Branch: refs/heads/jira/http2
Commit: 6c5df58b5a87c44626427dbf7968dfdd284b5eba
Parents: e14bacf
Author: Erick Erickson <Er...@gmail.com>
Authored: Sun Oct 14 21:13:33 2018 -0400
Committer: Erick Erickson <Er...@gmail.com>
Committed: Sun Oct 14 21:13:33 2018 -0400

----------------------------------------------------------------------
 .../org/apache/lucene/index/TestIndexWriterOnVMError.java     | 1 +
 .../ltr/src/test/org/apache/solr/ltr/TestLTROnSolrCloud.java  | 1 +
 .../test/org/apache/solr/cloud/BasicDistributedZkTest.java    | 1 +
 .../src/test/org/apache/solr/cloud/DeleteReplicaTest.java     | 1 +
 .../org/apache/solr/cloud/LegacyCloudClusterPropTest.java     | 2 +-
 .../src/test/org/apache/solr/cloud/MoveReplicaHDFSTest.java   | 3 ++-
 .../src/test/org/apache/solr/cloud/OverseerRolesTest.java     | 2 +-
 .../test/org/apache/solr/cloud/RestartWhileUpdatingTest.java  | 2 +-
 .../src/test/org/apache/solr/cloud/TestCloudRecovery.java     | 1 +
 .../src/test/org/apache/solr/cloud/TestWithCollection.java    | 4 ++--
 .../api/collections/CollectionsAPIAsyncDistributedZkTest.java | 1 +
 .../org/apache/solr/cloud/api/collections/ShardSplitTest.java | 1 +
 .../cloud/api/collections/TestLocalFSCloudBackupRestore.java  | 2 +-
 .../apache/solr/cloud/autoscaling/ComputePlanActionTest.java  | 3 +++
 .../solr/cloud/autoscaling/MetricTriggerIntegrationTest.java  | 1 +
 .../cloud/autoscaling/ScheduledTriggerIntegrationTest.java    | 1 +
 .../solr/cloud/autoscaling/sim/TestSimComputePlanAction.java  | 2 ++
 .../solr/cloud/autoscaling/sim/TestSimExecutePlanAction.java  | 1 +
 .../cloud/autoscaling/sim/TestSimGenericDistributedQueue.java | 1 +
 .../solr/cloud/autoscaling/sim/TestSimLargeCluster.java       | 2 ++
 .../apache/solr/cloud/autoscaling/sim/TestSimPolicyCloud.java | 2 +-
 .../solr/cloud/autoscaling/sim/TestSimTriggerIntegration.java | 5 +++++
 .../test/org/apache/solr/cloud/cdcr/CdcrBootstrapTest.java    | 1 +
 .../org/apache/solr/cloud/cdcr/CdcrOpsAndBoundariesTest.java  | 1 +
 .../org/apache/solr/cloud/cdcr/CdcrWithNodesRestartsTest.java | 1 +
 .../apache/solr/cloud/hdfs/HdfsUnloadDistributedZkTest.java   | 3 +--
 .../solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java   | 3 +--
 .../apache/solr/handler/admin/MetricsHistoryHandlerTest.java  | 2 +-
 .../solr/handler/component/CustomHighlightComponentTest.java  | 1 +
 .../component/TestDistributedStatsComponentCardinality.java   | 2 +-
 .../solr/metrics/reporters/SolrJmxReporterCloudTest.java      | 3 +--
 .../src/test/org/apache/solr/search/stats/TestDistribIDF.java | 1 +
 .../apache/solr/uninverting/TestDocTermOrdsUninvertLimit.java | 1 +
 .../update/processor/TimeRoutedAliasUpdateProcessorTest.java  | 1 +
 .../client/solrj/embedded/LargeVolumeBinaryJettyTest.java     | 2 ++
 .../solr/client/solrj/embedded/LargeVolumeJettyTest.java      | 2 ++
 .../solr/client/solrj/io/stream/StreamDecoratorTest.java      | 3 ++-
 .../apache/solr/common/cloud/TestCollectionStateWatchers.java | 7 +++----
 38 files changed, 53 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnVMError.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnVMError.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnVMError.java
index 30e6de5..1913b5c 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnVMError.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnVMError.java
@@ -275,6 +275,7 @@ public class TestIndexWriterOnVMError extends LuceneTestCase {
   }
   
   @Nightly
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testCheckpoint() throws Exception {
     final Random r = new Random(random().nextLong());
     doTest(new Failure() {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestLTROnSolrCloud.java
----------------------------------------------------------------------
diff --git a/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestLTROnSolrCloud.java b/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestLTROnSolrCloud.java
index b153a71..65e0e7f 100644
--- a/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestLTROnSolrCloud.java
+++ b/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestLTROnSolrCloud.java
@@ -74,6 +74,7 @@ public class TestLTROnSolrCloud extends TestRerankBase {
 
   @Test
   // commented 4-Sep-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 2-Aug-2018
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testSimpleQuery() throws Exception {
     // will randomly pick a configuration with [1..5] shards and [1..3] replicas
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
index ea12c83..ccc6528 100644
--- a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
@@ -150,6 +150,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
   @Test
   @ShardsFixed(num = 4)
   //DO NOT ENABLE @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 12-Jun-2018
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void test() throws Exception {
     // setLoggingLevel(null);
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java b/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
index 5d10824..a184997 100644
--- a/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
@@ -168,6 +168,7 @@ public class DeleteReplicaTest extends SolrCloudTestCase {
 
   @Test
   //commented 2-Aug-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 28-June-2018
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void deleteReplicaFromClusterState() throws Exception {
     deleteReplicaFromClusterState("true");
     deleteReplicaFromClusterState("false");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/LegacyCloudClusterPropTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/LegacyCloudClusterPropTest.java b/solr/core/src/test/org/apache/solr/cloud/LegacyCloudClusterPropTest.java
index cdeb319..c26c31b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/LegacyCloudClusterPropTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/LegacyCloudClusterPropTest.java
@@ -66,7 +66,7 @@ public class LegacyCloudClusterPropTest extends SolrCloudTestCase {
 
   @Test
   //2018-06-18 (commented) @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028")
-  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 17-Aug-2018
+  //Commented 14-Oct-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 17-Aug-2018
   public void testCreateCollectionSwitchLegacyCloud() throws Exception {
     createAndTest("legacyTrue", true);
     createAndTest("legacyFalse", false);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSTest.java b/solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSTest.java
index 61a5530..4308d8a 100644
--- a/solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSTest.java
@@ -70,7 +70,8 @@ public class MoveReplicaHDFSTest extends MoveReplicaTest {
   //commented 23-AUG-2018  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 17-Aug-2018
   // commented 4-Sep-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 23-Aug-2018
   //commented 20-Sep-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 6-Sep-2018
-  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
+  //Commented 14-Oct-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testNormalFailedMove() throws Exception {
     inPlaceMove = false;
     testFailedMove();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java
index 83569eb..5fa64a9 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java
@@ -86,7 +86,7 @@ public class OverseerRolesTest extends SolrCloudTestCase {
 
   @Test
   //commented 2-Aug-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 04-May-2018
-  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 6-Sep-2018
+  //Commented 14-Oct-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 6-Sep-2018
   public void testOverseerRole() throws Exception {
 
     logOverseerState();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/RestartWhileUpdatingTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/RestartWhileUpdatingTest.java b/solr/core/src/test/org/apache/solr/cloud/RestartWhileUpdatingTest.java
index 497cd00..75f4266 100644
--- a/solr/core/src/test/org/apache/solr/cloud/RestartWhileUpdatingTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/RestartWhileUpdatingTest.java
@@ -76,7 +76,7 @@ public class RestartWhileUpdatingTest extends AbstractFullDistribZkTestBase {
   }
 
   @Test
-  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 21-May-2018
+  //Commented 14-Oct-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 21-May-2018
   public void test() throws Exception {
     handle.clear();
     handle.put("timestamp", SKIPVAL);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery.java b/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery.java
index 7f822a3..eb8a92e 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery.java
@@ -145,6 +145,7 @@ public class TestCloudRecovery extends SolrCloudTestCase {
   }
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void corruptedLogTest() throws Exception {
     AtomicInteger countReplayLog = new AtomicInteger(0);
     DirectUpdateHandler2.commitOnClose = false;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/TestWithCollection.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestWithCollection.java b/solr/core/src/test/org/apache/solr/cloud/TestWithCollection.java
index b23d035..52e659a 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestWithCollection.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestWithCollection.java
@@ -176,7 +176,7 @@ public class TestWithCollection extends SolrCloudTestCase {
   }
 
   @Test
-  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 23-Aug-2018
+  //Commented 14-Oct-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 23-Aug-2018
   public void testDeleteWithCollection() throws IOException, SolrServerException, InterruptedException {
     String prefix = "testDeleteWithCollection";
     String xyz = prefix + "_xyz";
@@ -351,7 +351,7 @@ public class TestWithCollection extends SolrCloudTestCase {
   }
 
   @Test
-  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 23-Aug-2018
+  //Commented 14-Oct-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 23-Aug-2018
   public void testMoveReplicaWithCollection() throws Exception {
     String prefix = "testMoveReplicaWithCollection";
     String xyz = prefix + "_xyz";

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
index 00113a9..7e939a0 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
@@ -192,6 +192,7 @@ public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
     assertSame("DeleteCollection did not complete", RequestStatusState.COMPLETED, state);
   }
   // commented 4-Sep-2018  @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 2-Aug-2018
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testAsyncIdRaceCondition() throws Exception {
     SolrClient[] clients = new SolrClient[cluster.getJettySolrRunners().size()];
     int j = 0;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
index abd887c..0b474e5 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
@@ -337,6 +337,7 @@ public class ShardSplitTest extends AbstractFullDistribZkTestBase {
   }
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testSplitMixedReplicaTypesLink() throws Exception {
     doSplitMixedReplicaTypes(SolrIndexSplitter.SplitMethod.LINK);
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestLocalFSCloudBackupRestore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestLocalFSCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestLocalFSCloudBackupRestore.java
index 34c6e3d..83a6947 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestLocalFSCloudBackupRestore.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestLocalFSCloudBackupRestore.java
@@ -59,7 +59,7 @@ public class TestLocalFSCloudBackupRestore extends AbstractCloudBackupRestoreTes
 
   @Override
   @Test
-  @BadApple(bugUrl = "https://issues.apache.org/jira/browse/SOLR-12028") // added 09-Aug-2018
+  //Commented 14-Oct-2018 @BadApple(bugUrl = "https://issues.apache.org/jira/browse/SOLR-12028") // added 09-Aug-2018
   public void test() throws Exception {
     super.test();
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/autoscaling/ComputePlanActionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/ComputePlanActionTest.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/ComputePlanActionTest.java
index 087094a..99eca6c 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/ComputePlanActionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/ComputePlanActionTest.java
@@ -325,6 +325,7 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
   }
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testNodeAdded() throws Exception {
     CloudSolrClient solrClient = cluster.getSolrClient();
     String setTriggerCommand = "{" +
@@ -516,6 +517,7 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
   }
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testNodeAddedTriggerWithAddReplicaPreferredOp_2Shard() throws Exception {
     String collectionNamePrefix = "testNodeAddedTriggerWithAddReplicaPreferredOp_2Shard";
     int numShards = 2;
@@ -613,6 +615,7 @@ public class ComputePlanActionTest extends SolrCloudTestCase {
   }
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testNodeLostTriggerWithDeleteNodePreferredOp() throws Exception {
     String collectionNamePrefix = "testNodeLostTriggerWithDeleteNodePreferredOp";
     int numCollections = 1 + random().nextInt(3), numShards = 1 + random().nextInt(3);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/autoscaling/MetricTriggerIntegrationTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/MetricTriggerIntegrationTest.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/MetricTriggerIntegrationTest.java
index 81cac33..a9aac97 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/MetricTriggerIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/MetricTriggerIntegrationTest.java
@@ -82,6 +82,7 @@ public class MetricTriggerIntegrationTest extends SolrCloudTestCase {
 
   @Test
   // commented 4-Sep-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 2-Aug-2018
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testMetricTrigger() throws Exception {
     cluster.waitForAllNodes(5);
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/autoscaling/ScheduledTriggerIntegrationTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/ScheduledTriggerIntegrationTest.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/ScheduledTriggerIntegrationTest.java
index 8d1fcda..ff0223b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/ScheduledTriggerIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/ScheduledTriggerIntegrationTest.java
@@ -73,6 +73,7 @@ public class ScheduledTriggerIntegrationTest extends SolrCloudTestCase {
 
   @Test
   // commented 15-Sep-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 2-Aug-2018
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testScheduledTrigger() throws Exception {
     CloudSolrClient solrClient = cluster.getSolrClient();
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimComputePlanAction.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimComputePlanAction.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimComputePlanAction.java
index 98a7728..b849c97 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimComputePlanAction.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimComputePlanAction.java
@@ -122,6 +122,7 @@ public class TestSimComputePlanAction extends SimSolrCloudTestCase {
   }
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testNodeLost() throws Exception  {
     // let's start a node so that we have at least two
     String node = cluster.simAddNode();
@@ -254,6 +255,7 @@ public class TestSimComputePlanAction extends SimSolrCloudTestCase {
 
   @Test
   //17-Aug-2018 commented @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 28-June-2018
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testNodeAdded() throws Exception {
     AssertingTriggerAction.expectedNode = null;
     SolrClient solrClient = cluster.simGetSolrClient();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimExecutePlanAction.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimExecutePlanAction.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimExecutePlanAction.java
index ea753bc..ab228d5 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimExecutePlanAction.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimExecutePlanAction.java
@@ -156,6 +156,7 @@ public class TestSimExecutePlanAction extends SimSolrCloudTestCase {
   }
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testIntegration() throws Exception  {
     SolrClient solrClient = cluster.simGetSolrClient();
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimGenericDistributedQueue.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimGenericDistributedQueue.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimGenericDistributedQueue.java
index 436542a..89f3366 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimGenericDistributedQueue.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimGenericDistributedQueue.java
@@ -32,6 +32,7 @@ public class TestSimGenericDistributedQueue extends TestSimDistributedQueue {
   }
 
   // commented 4-Sep-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 09-Aug-2018
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testDistributedQueue() throws Exception {
     super.testDistributedQueue();
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimLargeCluster.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimLargeCluster.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimLargeCluster.java
index 42ddcc1..5793f92 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimLargeCluster.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimLargeCluster.java
@@ -390,6 +390,7 @@ public class TestSimLargeCluster extends SimSolrCloudTestCase {
 
   @Test
   // commented 4-Sep-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 2018-06-18
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testNodeLost() throws Exception {
     doTestNodeLost(waitForSeconds, 5000, 0);
   }
@@ -634,6 +635,7 @@ public class TestSimLargeCluster extends SimSolrCloudTestCase {
 
   @Test
   //commented 2-Aug-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 2018-06-18
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testSearchRate() throws Exception {
     SolrClient solrClient = cluster.simGetSolrClient();
     String collectionName = "testSearchRate";

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimPolicyCloud.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimPolicyCloud.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimPolicyCloud.java
index 379011d..b9dbebb 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimPolicyCloud.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimPolicyCloud.java
@@ -290,7 +290,7 @@ public class TestSimPolicyCloud extends SimSolrCloudTestCase {
     assertEquals(3, coll.getSlice("s3").getReplicas().size());
     coll.forEachReplica(verifyReplicas);
   }
-
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testCreateCollectionAddShardUsingPolicy() throws Exception {
     SolrClient solrClient = cluster.simGetSolrClient();
     String nodeId = cluster.getSimClusterStateProvider().simGetRandomNode();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimTriggerIntegration.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimTriggerIntegration.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimTriggerIntegration.java
index 36beeae..d8cdcc2 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimTriggerIntegration.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestSimTriggerIntegration.java
@@ -290,6 +290,7 @@ public class TestSimTriggerIntegration extends SimSolrCloudTestCase {
   @Test
   // commented 20-July-2018  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028")
   // commented 4-Sep-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 09-Aug-2018
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testNodeLostTriggerRestoreState() throws Exception {
     // for this test we want to update the trigger so we must assert that the actions were created twice
     TestSimTriggerIntegration.actionInitCalled = new CountDownLatch(2);
@@ -464,6 +465,7 @@ public class TestSimTriggerIntegration extends SimSolrCloudTestCase {
 
   @Test
   // commented 4-Sep-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 26-Mar-2018
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testNodeLostTrigger() throws Exception {
     SolrClient solrClient = cluster.simGetSolrClient();
     String setTriggerCommand = "{" +
@@ -638,6 +640,7 @@ public class TestSimTriggerIntegration extends SimSolrCloudTestCase {
 
   @Test
   // commented 4-Sep-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 16-Apr-2018
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testEventQueue() throws Exception {
     waitForSeconds = 1;
     SolrClient solrClient = cluster.simGetSolrClient();
@@ -694,6 +697,7 @@ public class TestSimTriggerIntegration extends SimSolrCloudTestCase {
 
   @Test
   // commented 4-Sep-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") //2018-03-10
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testEventFromRestoredState() throws Exception {
     SolrClient solrClient = cluster.simGetSolrClient();
     String setTriggerCommand = "{" +
@@ -1215,6 +1219,7 @@ public class TestSimTriggerIntegration extends SimSolrCloudTestCase {
 
   @Test
   //@BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028")
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testSearchRate() throws Exception {
     SolrClient solrClient = cluster.simGetSolrClient();
     String COLL1 = "collection1";

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/cdcr/CdcrBootstrapTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/cdcr/CdcrBootstrapTest.java b/solr/core/src/test/org/apache/solr/cloud/cdcr/CdcrBootstrapTest.java
index 4ba32a0..8472ff9 100644
--- a/solr/core/src/test/org/apache/solr/cloud/cdcr/CdcrBootstrapTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/cdcr/CdcrBootstrapTest.java
@@ -58,6 +58,7 @@ public class CdcrBootstrapTest extends SolrTestCaseJ4 {
    */
   @Test
   // commented 4-Sep-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 2-Aug-2018
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testConvertClusterToCdcrAndBootstrap() throws Exception {
     // start the target first so that we know its zkhost
     MiniSolrCloudCluster target = new MiniSolrCloudCluster(1, createTempDir("cdcr-target"), buildJettyConfig("/solr"));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/cdcr/CdcrOpsAndBoundariesTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/cdcr/CdcrOpsAndBoundariesTest.java b/solr/core/src/test/org/apache/solr/cloud/cdcr/CdcrOpsAndBoundariesTest.java
index 53942bb..957c1a4 100644
--- a/solr/core/src/test/org/apache/solr/cloud/cdcr/CdcrOpsAndBoundariesTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/cdcr/CdcrOpsAndBoundariesTest.java
@@ -63,6 +63,7 @@ public class CdcrOpsAndBoundariesTest extends SolrTestCaseJ4 {
    * Check the ops statistics.
    */
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testOps() throws Exception {
     createCollections();
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/cdcr/CdcrWithNodesRestartsTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/cdcr/CdcrWithNodesRestartsTest.java b/solr/core/src/test/org/apache/solr/cloud/cdcr/CdcrWithNodesRestartsTest.java
index c8d5a32..7a22761 100644
--- a/solr/core/src/test/org/apache/solr/cloud/cdcr/CdcrWithNodesRestartsTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/cdcr/CdcrWithNodesRestartsTest.java
@@ -195,6 +195,7 @@ public class CdcrWithNodesRestartsTest extends SolrTestCaseJ4 {
   }
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testReplicationAfterLeaderChange() throws Exception {
     createCollections();
     CdcrTestsUtil.cdcrStart(sourceSolrClient);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsUnloadDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsUnloadDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsUnloadDistributedZkTest.java
index 6b5fa3c..117ec61 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsUnloadDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsUnloadDistributedZkTest.java
@@ -19,7 +19,6 @@ package org.apache.solr.cloud.hdfs;
 import java.io.IOException;
 
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.cloud.UnloadDistributedZkTest;
 import org.apache.solr.util.BadHdfsThreadsFilter;
@@ -34,7 +33,7 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
 @ThreadLeakFilters(defaultFilters = true, filters = {
     BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
 })
-@LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 21-May-2018
+//Commented  4-Oct-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 21-May-2018
 public class HdfsUnloadDistributedZkTest extends UnloadDistributedZkTest {
   private static MiniDFSCluster dfsCluster;
   

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java
index 6acc267..1905874 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java
@@ -31,7 +31,6 @@ import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.NRTCachingDirectory;
 import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.Nightly;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.client.solrj.SolrQuery;
@@ -58,7 +57,7 @@ import org.junit.Test;
 @ThreadLeakFilters(defaultFilters = true, filters = {
     BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
 })
-@LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 12-Jun-2018
+//Commented  4-Oct-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 12-Jun-2018
 public class HdfsWriteToMultipleCollectionsTest extends BasicDistributedZkTest {
   private static final String SOLR_HDFS_HOME = "solr.hdfs.home";
   private static final String SOLR_HDFS_BLOCKCACHE_GLOBAL = "solr.hdfs.blockcache.global";

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/handler/admin/MetricsHistoryHandlerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/admin/MetricsHistoryHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/admin/MetricsHistoryHandlerTest.java
index 79bded9..c4ca537 100644
--- a/solr/core/src/test/org/apache/solr/handler/admin/MetricsHistoryHandlerTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/admin/MetricsHistoryHandlerTest.java
@@ -110,7 +110,7 @@ public class MetricsHistoryHandlerTest extends SolrCloudTestCase {
   }
 
   @Test
-  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 15-Sep-2018
+  //Commented 14-Oct-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 15-Sep-2018
   public void testBasic() throws Exception {
     timeSource.sleep(10000);
     List<Pair<String, Long>> list = handler.getFactory().list(100);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/handler/component/CustomHighlightComponentTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/component/CustomHighlightComponentTest.java b/solr/core/src/test/org/apache/solr/handler/component/CustomHighlightComponentTest.java
index da7ea57..69edab8 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/CustomHighlightComponentTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/CustomHighlightComponentTest.java
@@ -126,6 +126,7 @@ public class CustomHighlightComponentTest extends SolrCloudTestCase {
   }
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void test() throws Exception {
 
     // determine custom search handler name (the exact name should not matter)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/handler/component/TestDistributedStatsComponentCardinality.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/component/TestDistributedStatsComponentCardinality.java b/solr/core/src/test/org/apache/solr/handler/component/TestDistributedStatsComponentCardinality.java
index 8a5f4b1..f226741 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/TestDistributedStatsComponentCardinality.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/TestDistributedStatsComponentCardinality.java
@@ -101,7 +101,7 @@ public class TestDistributedStatsComponentCardinality extends BaseDistributedSea
     
   }
 
-
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void test() throws Exception {
     buildIndex();
     

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/metrics/reporters/SolrJmxReporterCloudTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/metrics/reporters/SolrJmxReporterCloudTest.java b/solr/core/src/test/org/apache/solr/metrics/reporters/SolrJmxReporterCloudTest.java
index ffe9834..b94ff8a 100644
--- a/solr/core/src/test/org/apache/solr/metrics/reporters/SolrJmxReporterCloudTest.java
+++ b/solr/core/src/test/org/apache/solr/metrics/reporters/SolrJmxReporterCloudTest.java
@@ -26,7 +26,6 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.lucene.util.LuceneTestCase;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
@@ -63,7 +62,7 @@ public class SolrJmxReporterCloudTest extends SolrCloudTestCase {
   }
 
   @Test
-  @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 2-Aug-2018
+  //Commented 14-Oct-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 2-Aug-2018
   public void testJmxReporter() throws Exception {
     CollectionAdminRequest.reloadCollection(COLLECTION).processAndWait(cluster.getSolrClient(), 60);
     CloudSolrClient solrClient = cluster.getSolrClient();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/search/stats/TestDistribIDF.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/stats/TestDistribIDF.java b/solr/core/src/test/org/apache/solr/search/stats/TestDistribIDF.java
index be5ede0..0cc8601 100644
--- a/solr/core/src/test/org/apache/solr/search/stats/TestDistribIDF.java
+++ b/solr/core/src/test/org/apache/solr/search/stats/TestDistribIDF.java
@@ -142,6 +142,7 @@ public class TestDistribIDF extends SolrTestCaseJ4 {
 
   @Test
 // commented 4-Sep-2018   @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 2-Aug-2018
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testMultiCollectionQuery() throws Exception {
     // collection1 and collection2 are collections which have distributed idf enabled
     // collection1_local and collection2_local don't have distributed idf available

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/uninverting/TestDocTermOrdsUninvertLimit.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/uninverting/TestDocTermOrdsUninvertLimit.java b/solr/core/src/test/org/apache/solr/uninverting/TestDocTermOrdsUninvertLimit.java
index 0caad54..4868d46 100644
--- a/solr/core/src/test/org/apache/solr/uninverting/TestDocTermOrdsUninvertLimit.java
+++ b/solr/core/src/test/org/apache/solr/uninverting/TestDocTermOrdsUninvertLimit.java
@@ -39,6 +39,7 @@ public class TestDocTermOrdsUninvertLimit extends LuceneTestCase {
   @SuppressWarnings({"ConstantConditions", "PointlessBooleanExpression"})
   @Nightly
 // commented 4-Sep-2018   @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 12-Jun-2018
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testTriggerUnInvertLimit() throws IOException {
     final boolean SHOULD_TRIGGER = false; // Set this to true to use the test with the old implementation
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/core/src/test/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessorTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessorTest.java b/solr/core/src/test/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessorTest.java
index 5c9fc94..640eeed 100644
--- a/solr/core/src/test/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessorTest.java
+++ b/solr/core/src/test/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessorTest.java
@@ -115,6 +115,7 @@ public class TimeRoutedAliasUpdateProcessorTest extends SolrCloudTestCase {
   @Slow
   @Test
   @LogLevel("org.apache.solr.update.processor.TimeRoutedAlias=DEBUG;org.apache.solr.cloud=DEBUG")
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void test() throws Exception {
     String configName = TimeRoutedAliasUpdateProcessorTest.configName + getTestName();
     createConfigSet(configName);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/LargeVolumeBinaryJettyTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/LargeVolumeBinaryJettyTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/LargeVolumeBinaryJettyTest.java
index 67ea49c..fc28449 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/LargeVolumeBinaryJettyTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/LargeVolumeBinaryJettyTest.java
@@ -16,6 +16,7 @@
  */
 package org.apache.solr.client.solrj.embedded;
 
+import org.apache.lucene.util.LuceneTestCase;
 import org.apache.solr.client.solrj.LargeVolumeTestBase;
 import org.junit.BeforeClass;
 
@@ -23,6 +24,7 @@ import org.junit.BeforeClass;
  * @see org.apache.solr.client.solrj.impl.BinaryRequestWriter
  * @see org.apache.solr.client.solrj.request.JavaBinUpdateRequestCodec
  */
+@LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
 public class LargeVolumeBinaryJettyTest extends LargeVolumeTestBase {
   @BeforeClass
   public static void beforeTest() throws Exception {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/LargeVolumeJettyTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/LargeVolumeJettyTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/LargeVolumeJettyTest.java
index ed6ab1f..02764fb 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/LargeVolumeJettyTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/LargeVolumeJettyTest.java
@@ -16,10 +16,12 @@
  */
 package org.apache.solr.client.solrj.embedded;
 
+import org.apache.lucene.util.LuceneTestCase;
 import org.apache.solr.client.solrj.LargeVolumeTestBase;
 import org.junit.BeforeClass;
 
 // commented 4-Sep-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 2-Aug-2018
+@LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
 public class LargeVolumeJettyTest extends LargeVolumeTestBase {
   @BeforeClass
   public static void beforeTest() throws Exception {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamDecoratorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamDecoratorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamDecoratorTest.java
index ef5729d..0a8030c 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamDecoratorTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamDecoratorTest.java
@@ -1565,6 +1565,7 @@ public class StreamDecoratorTest extends SolrCloudTestCase {
   }
 
   @Test
+  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 14-Oct-2018
   public void testParallelRollupStream() throws Exception {
 
     new UpdateRequest()
@@ -3296,7 +3297,7 @@ public class StreamDecoratorTest extends SolrCloudTestCase {
   }
 
   @Test
-  @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 2-Aug-2018
+  //Commented 14-Oct-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 2-Aug-2018
   public void testClassifyStream() throws Exception {
     Assume.assumeTrue(!useAlias);
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c5df58b/solr/solrj/src/test/org/apache/solr/common/cloud/TestCollectionStateWatchers.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/common/cloud/TestCollectionStateWatchers.java b/solr/solrj/src/test/org/apache/solr/common/cloud/TestCollectionStateWatchers.java
index c08c5b7..51ee814 100644
--- a/solr/solrj/src/test/org/apache/solr/common/cloud/TestCollectionStateWatchers.java
+++ b/solr/solrj/src/test/org/apache/solr/common/cloud/TestCollectionStateWatchers.java
@@ -26,7 +26,6 @@ import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
-import org.apache.lucene.util.LuceneTestCase;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
@@ -109,7 +108,7 @@ public class TestCollectionStateWatchers extends SolrCloudTestCase {
   }
 
   @Test
-  @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 12-Jun-2018
+  //Commented 14-Oct-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 12-Jun-2018
   public void testSimpleCollectionWatch() throws Exception {
 
     CloudSolrClient client = cluster.getSolrClient();
@@ -224,7 +223,7 @@ public class TestCollectionStateWatchers extends SolrCloudTestCase {
   }
 
   @Test
-  @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 12-Jun-2018
+  //Commented 14-Oct-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 12-Jun-2018
   public void testWaitForStateWatcherIsRetainedOnPredicateFailure() throws Exception {
 
     CloudSolrClient client = cluster.getSolrClient();
@@ -286,7 +285,7 @@ public class TestCollectionStateWatchers extends SolrCloudTestCase {
   }
 
   @Test
-  @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 12-Jun-2018
+  //Commented 14-Oct-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 12-Jun-2018
   public void testWatchesWorkForStateFormat1() throws Exception {
 
     final CloudSolrClient client = cluster.getSolrClient();


[30/50] [abbrv] lucene-solr:jira/http2: SOLR-12780: Add support for Leaky ReLU and TanH activations in contrib/ltr NeuralNetworkModel class. (Kamuela Lau, Christine Poerschke)

Posted by da...@apache.org.
SOLR-12780: Add support for Leaky ReLU and TanH activations in contrib/ltr NeuralNetworkModel class.
(Kamuela Lau, Christine Poerschke)


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/9c8ffabf
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/9c8ffabf
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/9c8ffabf

Branch: refs/heads/jira/http2
Commit: 9c8ffabfe3ed75768e770f9f3a0171f77fac0e1e
Parents: 42ac07d
Author: Christine Poerschke <cp...@apache.org>
Authored: Fri Oct 12 17:08:35 2018 +0100
Committer: Christine Poerschke <cp...@apache.org>
Committed: Fri Oct 12 17:08:35 2018 +0100

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  3 ++
 .../solr/ltr/model/NeuralNetworkModel.java      | 34 ++++++++++++++++++--
 2 files changed, 34 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9c8ffabf/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 628abb8..3a58a68 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -144,6 +144,9 @@ New Features
 
 * SOLR-12843: Implement a MultiContentWriter in SolrJ to post multiple files/payload at once (noble)
 
+* SOLR-12780: Add support for Leaky ReLU and TanH activations in contrib/ltr NeuralNetworkModel class.
+  (Kamuela Lau, Christine Poerschke)
+
 Other Changes
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9c8ffabf/solr/contrib/ltr/src/java/org/apache/solr/ltr/model/NeuralNetworkModel.java
----------------------------------------------------------------------
diff --git a/solr/contrib/ltr/src/java/org/apache/solr/ltr/model/NeuralNetworkModel.java b/solr/contrib/ltr/src/java/org/apache/solr/ltr/model/NeuralNetworkModel.java
index 798b81c..fa92374 100644
--- a/solr/contrib/ltr/src/java/org/apache/solr/ltr/model/NeuralNetworkModel.java
+++ b/solr/contrib/ltr/src/java/org/apache/solr/ltr/model/NeuralNetworkModel.java
@@ -31,7 +31,7 @@ import org.apache.solr.util.SolrPluginUtils;
  * A scoring model that computes document scores using a neural network.
  * <p>
  * Supported <a href="https://en.wikipedia.org/wiki/Activation_function">activation functions</a> are:
- * <code>identity</code>, <code>relu</code>, <code>sigmoid</code> and
+ * <code>identity</code>, <code>relu</code>, <code>sigmoid</code>, <code>tanh</code>, <code>leakyrelu</code> and
  * contributions to support additional activation functions are welcome.
  * <p>
  * Example configuration:
@@ -60,8 +60,20 @@ import org.apache.solr.util.SolrPluginUtils;
                 "activation" : "relu"
             },
             {
-                "matrix" : [ [ 27.0, 28.0 ] ],
-                "bias" : [ 29.0 ],
+                "matrix" : [ [ 27.0, 28.0 ],
+                             [ 29.0, 30.0 ] ],
+                "bias" : [ 31.0, 32.0 ],
+                "activation" : "leakyrelu"
+            },
+            {
+                "matrix" : [ [ 33.0, 34.0 ],
+                             [ 35.0, 36.0 ] ],
+                "bias" : [ 37.0, 38.0 ],
+                "activation" : "tanh"
+            },
+            {
+                "matrix" : [ [ 39.0, 40.0 ] ],
+                "bias" : [ 41.0 ],
                 "activation" : "identity"
             }
         ]
@@ -144,6 +156,22 @@ public class NeuralNetworkModel extends LTRScoringModel {
             }
           };
           break;
+        case "leakyrelu":
+          this.activation = new Activation() {
+            @Override
+            public float apply(float in) {
+              return in < 0 ? 0.01f * in : in;
+            }
+          };
+          break;
+        case "tanh":
+          this.activation = new Activation() {
+            @Override
+            public float apply(float in) {
+              return (float)Math.tanh(in);
+            }
+          };
+          break;
         case "sigmoid":
           this.activation = new Activation() {
             @Override


[37/50] [abbrv] lucene-solr:jira/http2: Fix couple of typos.

Posted by da...@apache.org.
Fix couple of typos.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/1ccd5558
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/1ccd5558
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/1ccd5558

Branch: refs/heads/jira/http2
Commit: 1ccd555862805d4205447510e3a60780d4f10985
Parents: d7fd82c
Author: Christine Poerschke <cp...@apache.org>
Authored: Mon Oct 15 15:08:17 2018 -0400
Committer: Christine Poerschke <cp...@apache.org>
Committed: Mon Oct 15 15:08:17 2018 -0400

----------------------------------------------------------------------
 .../src/java/org/apache/lucene/util/LuceneTestCase.java            | 2 +-
 .../src/java/org/apache/solr/logging/log4j2/Log4j2Watcher.java     | 2 +-
 solr/core/src/java/org/apache/solr/search/ReturnFields.java        | 2 +-
 .../apache/solr/update/processor/DistributedUpdateProcessor.java   | 2 +-
 solr/core/src/test/org/apache/solr/cloud/TestRandomFlRTGCloud.java | 2 +-
 .../test/org/apache/solr/handler/component/StatsComponentTest.java | 2 +-
 .../org/apache/solr/search/TestRandomCollapseQParserPlugin.java    | 2 +-
 .../test/org/apache/solr/search/facet/TestJsonFacetRefinement.java | 2 +-
 .../src/working-with-external-files-and-processes.adoc             | 2 +-
 solr/solr-ref-guide/tools/BuildNavAndPDFBody.java                  | 2 +-
 .../java/org/apache/solr/client/solrj/io/stream/TopicStream.java   | 2 +-
 11 files changed, 11 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1ccd5558/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
index 95150fb..5d19587 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
@@ -894,7 +894,7 @@ public abstract class LuceneTestCase extends Assert {
    * Convenience method for logging an iterator.
    *
    * @param label  String logged before/after the items in the iterator
-   * @param iter   Each next() is toString()ed and logged on it's own line. If iter is null this is logged differnetly then an empty iterator.
+   * @param iter   Each next() is toString()ed and logged on its own line. If iter is null this is logged differently then an empty iterator.
    * @param stream Stream to log messages to.
    */
   public static void dumpIterator(String label, Iterator<?> iter,

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1ccd5558/solr/core/src/java/org/apache/solr/logging/log4j2/Log4j2Watcher.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/logging/log4j2/Log4j2Watcher.java b/solr/core/src/java/org/apache/solr/logging/log4j2/Log4j2Watcher.java
index 35aeceb..de79991 100644
--- a/solr/core/src/java/org/apache/solr/logging/log4j2/Log4j2Watcher.java
+++ b/solr/core/src/java/org/apache/solr/logging/log4j2/Log4j2Watcher.java
@@ -147,7 +147,7 @@ public class Log4j2Watcher extends LogWatcher<LogEvent> {
         }
       }
     } else {
-      //It doesn't have it's own logger yet so let's create one
+      //It doesn't have its own logger yet so let's create one
       LoggerConfig explicitConfig = new LoggerConfig(loggerName, Level.valueOf(level), true);
       explicitConfig.setParent(loggerConfig);
       config.addLogger(loggerName, explicitConfig);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1ccd5558/solr/core/src/java/org/apache/solr/search/ReturnFields.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/ReturnFields.java b/solr/core/src/java/org/apache/solr/search/ReturnFields.java
index dcd47a6..c2d5973 100644
--- a/solr/core/src/java/org/apache/solr/search/ReturnFields.java
+++ b/solr/core/src/java/org/apache/solr/search/ReturnFields.java
@@ -67,7 +67,7 @@ public abstract class ReturnFields {
 
   /** 
    * Returns <code>true</code> if the specified field should be returned <em>to the external client</em> 
-   * -- either using it's own name, or via an alias. 
+   * -- either using its own name, or via an alias. 
    * This method returns <code>false</code> even if the specified name is needed as an "extra" field
    * for use by transformers.
    */

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1ccd5558/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java b/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
index 56bcb7a..004f4f7 100644
--- a/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
@@ -2064,7 +2064,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
   //    persist across sub-requests.
   //
   //   Note that the replica that receives the original request has the only RollupReplicationTracker that exists for the
-  //   lifetime of the batch. The leader for each shard keeps track of it's own achieved replicaiton for its shard
+  //   lifetime of the batch. The leader for each shard keeps track of its own achieved replication for its shard
   //   and attaches that to the response to the originating node (i.e. the one with the RollupReplicationTracker).
   //   Followers in general do not need a tracker of any sort with the sole exception of the RollupReplicationTracker
   //   allocated on the original node that receives the top-level request.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1ccd5558/solr/core/src/test/org/apache/solr/cloud/TestRandomFlRTGCloud.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestRandomFlRTGCloud.java b/solr/core/src/test/org/apache/solr/cloud/TestRandomFlRTGCloud.java
index 492765a..5a9db8f 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestRandomFlRTGCloud.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestRandomFlRTGCloud.java
@@ -379,7 +379,7 @@ public class TestRandomFlRTGCloud extends SolrCloudTestCase {
         params.add("ids", idsToRequest.get(0));
       } else {
         if (random().nextBoolean()) {
-          // each id in it's own param
+          // each id in its own param
           for (String id : idsToRequest) {
             params.add("id",id);
           }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1ccd5558/solr/core/src/test/org/apache/solr/handler/component/StatsComponentTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/component/StatsComponentTest.java b/solr/core/src/test/org/apache/solr/handler/component/StatsComponentTest.java
index 6d8cfd4..d4bf069 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/StatsComponentTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/StatsComponentTest.java
@@ -1779,7 +1779,7 @@ public class StatsComponentTest extends SolrTestCaseJ4 {
   }
 
   /**
-   * Test user input errors (split into it's own test to isolate ignored exceptions
+   * Test user input errors (split into its own test to isolate ignored exceptions)
    * @see #testCardinality 
    * @see #testHllOptions
    */

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1ccd5558/solr/core/src/test/org/apache/solr/search/TestRandomCollapseQParserPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestRandomCollapseQParserPlugin.java b/solr/core/src/test/org/apache/solr/search/TestRandomCollapseQParserPlugin.java
index bbb2623..9149c19 100644
--- a/solr/core/src/test/org/apache/solr/search/TestRandomCollapseQParserPlugin.java
+++ b/solr/core/src/test/org/apache/solr/search/TestRandomCollapseQParserPlugin.java
@@ -163,7 +163,7 @@ public class TestRandomCollapseQParserPlugin extends SolrTestCaseJ4 {
             
             if (null == collapseVal) {
               if (NULL_EXPAND.equals(nullPolicy)) {
-                // nothing to check for this doc, it's in it's own group
+                // nothing to check for this doc, it's in its own group
                 continue;
               }
               

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1ccd5558/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java
index 0a931bf..21924b1 100644
--- a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java
+++ b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java
@@ -828,7 +828,7 @@ public class TestJsonFacetRefinement extends SolrTestCaseHS {
     //   - why aren't we just specifying all the buckets (and child buckets) chosen in phase#1 using "_p" ?
     //   - or at the very least, if the purpose of "_l" is to give other buckets a chance to "bubble up"
     //     in phase#2, then shouldn't a "_l" refinement requests still include the buckets choosen in
-    //     phase#1, and request that the shard fill them in in addition to returning it's own top buckets?
+    //     phase#1, and request that the shard fill them in in addition to returning its own top buckets?
     client.testJQ(params("q", "*:*", "rows", "0", "json.facet", "{"
                          + "processEmpty:true,"
                          + "parent:{ type:terms, field:parent_s, limit:2, overrequest:0, refine:true, facet:{"

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1ccd5558/solr/solr-ref-guide/src/working-with-external-files-and-processes.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/working-with-external-files-and-processes.adoc b/solr/solr-ref-guide/src/working-with-external-files-and-processes.adoc
index 9b1771e..ae1bc55 100644
--- a/solr/solr-ref-guide/src/working-with-external-files-and-processes.adoc
+++ b/solr/solr-ref-guide/src/working-with-external-files-and-processes.adoc
@@ -200,7 +200,7 @@ Token positions are tracked and implicitly added to the token stream - the start
 
 ==== Example Token Streams
 
-// TODO: in cwiki each of these examples was in it's own "panel" ... do we want something like that here?
+// TODO: in cwiki each of these examples was in its own "panel" ... do we want something like that here?
 // TODO: these examples match what was in cwiki, but I'm honestly not sure if the formatting there was correct to start?
 
 [source,text]

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1ccd5558/solr/solr-ref-guide/tools/BuildNavAndPDFBody.java
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/tools/BuildNavAndPDFBody.java b/solr/solr-ref-guide/tools/BuildNavAndPDFBody.java
index 815c7a4..1657f8c 100644
--- a/solr/solr-ref-guide/tools/BuildNavAndPDFBody.java
+++ b/solr/solr-ref-guide/tools/BuildNavAndPDFBody.java
@@ -67,7 +67,7 @@ public class BuildNavAndPDFBody {
     if (null == mainPage) {
       throw new RuntimeException("no main-page found with shortname: " + mainPageShortname);
     }
-    // NOTE: mainPage claims to be it's own parent to prevent anyone decendent from introducing a loop
+    // NOTE: mainPage claims to be its own parent to prevent anyone decendent from introducing a loop
     mainPage.buildPageTreeRecursive(mainPage, allPages);
 
     { // validate that there are no orphan pages

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1ccd5558/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TopicStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TopicStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TopicStream.java
index 9af4cbf..b42ab77 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TopicStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TopicStream.java
@@ -283,7 +283,7 @@ public class TopicStream extends CloudSolrStream implements Expressible  {
     this.eofTuples = Collections.synchronizedMap(new HashMap());
 
     if(checkpoints.size() == 0 && streamContext.numWorkers > 1) {
-      //Each worker must maintain it's own checkpoints
+      //Each worker must maintain its own checkpoints
       this.id = this.id+"_"+streamContext.workerID;
     }
 


[49/50] [abbrv] lucene-solr:jira/http2: SOLR-12877: avoid NPE in TestUtilizeNode.getReplicaList

Posted by da...@apache.org.
SOLR-12877: avoid NPE in TestUtilizeNode.getReplicaList


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d2f7272b
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d2f7272b
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d2f7272b

Branch: refs/heads/jira/http2
Commit: d2f7272b0e2d4faefdf0c7c7e991311bde4e9c3e
Parents: 15002eb
Author: Christine Poerschke <cp...@apache.org>
Authored: Tue Oct 16 12:54:00 2018 -0400
Committer: Christine Poerschke <cp...@apache.org>
Committed: Tue Oct 16 12:54:00 2018 -0400

----------------------------------------------------------------------
 .../src/test/org/apache/solr/cloud/TestUtilizeNode.java   | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d2f7272b/solr/core/src/test/org/apache/solr/cloud/TestUtilizeNode.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestUtilizeNode.java b/solr/core/src/test/org/apache/solr/cloud/TestUtilizeNode.java
index bc64b6d..18ac662 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestUtilizeNode.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestUtilizeNode.java
@@ -170,11 +170,13 @@ public class TestUtilizeNode extends SolrCloudTestCase {
       .getClusterState().getCollectionOrNull(collectionName, false);
     
     List<Replica> results = new ArrayList<>(3);
-    collection.forEachReplica((s, replica) -> {
+    if (collection != null) {
+      collection.forEachReplica((s, replica) -> {
         if (replica.getNodeName().equals(jettyNode.getNodeName())) {
-        results.add(replica);
-      }
-    });
+          results.add(replica);
+        }
+      });
+    }
     return results;
   }
 


[20/50] [abbrv] lucene-solr:jira/http2: SOLR-12739: Use cluster instead of collection as the key for using legacy assignment.

Posted by da...@apache.org.
SOLR-12739: Use cluster instead of collection as the key for using legacy assignment.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/940a7303
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/940a7303
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/940a7303

Branch: refs/heads/jira/http2
Commit: 940a7303ee3cdc668b743377f43664990068749f
Parents: 9f34a7c
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Wed Oct 10 16:19:17 2018 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Wed Oct 10 16:19:17 2018 +0530

----------------------------------------------------------------------
 .../solr/cloud/OverseerCollectionConfigSetProcessorTest.java      | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/940a7303/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
index e999d73..5761359 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
@@ -77,6 +77,7 @@ import org.mockito.stubbing.Answer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.solr.common.params.CollectionAdminParams.CLUSTER;
 import static org.apache.solr.common.params.CollectionAdminParams.COLLECTION;
 import static org.apache.solr.common.params.CollectionAdminParams.DEFAULTS;
 import static org.apache.solr.common.params.CollectionAdminParams.USE_LEGACY_REPLICA_ASSIGNMENT;
@@ -331,7 +332,7 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
     when(cloudDataProviderMock.getClusterStateProvider()).thenReturn(clusterStateProviderMock);
     when(clusterStateProviderMock.getClusterState()).thenReturn(clusterStateMock);
     when(clusterStateProviderMock.getLiveNodes()).thenReturn(liveNodes);
-    when(clusterStateProviderMock.getClusterProperties()).thenReturn(Utils.makeMap(DEFAULTS, Utils.makeMap(COLLECTION, Utils.makeMap(USE_LEGACY_REPLICA_ASSIGNMENT, true))));
+    when(clusterStateProviderMock.getClusterProperties()).thenReturn(Utils.makeMap(DEFAULTS, Utils.makeMap(CLUSTER, Utils.makeMap(USE_LEGACY_REPLICA_ASSIGNMENT, true))));
     when(cloudDataProviderMock.getDistribStateManager()).thenReturn(stateManagerMock);
     when(stateManagerMock.hasData(anyString())).thenAnswer(invocation -> zkMap.containsKey(invocation.getArgument(0)));
     when(stateManagerMock.getAutoScalingConfig()).thenReturn(autoScalingConfig);


[17/50] [abbrv] lucene-solr:jira/http2: SOLR-12843: precommit errors

Posted by da...@apache.org.
SOLR-12843: precommit errors


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/3629e760
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/3629e760
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/3629e760

Branch: refs/heads/jira/http2
Commit: 3629e760113d8faa4b544bffafa3e8b33d2eb404
Parents: 8d205ec
Author: Noble Paul <no...@apache.org>
Authored: Wed Oct 10 15:38:20 2018 +1100
Committer: Noble Paul <no...@apache.org>
Committed: Wed Oct 10 15:38:20 2018 +1100

----------------------------------------------------------------------
 .../java/org/apache/solr/client/solrj/request/RequestWriter.java | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3629e760/solr/solrj/src/java/org/apache/solr/client/solrj/request/RequestWriter.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/RequestWriter.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/RequestWriter.java
index 5d941e0..96650c2 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/RequestWriter.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/RequestWriter.java
@@ -73,6 +73,10 @@ public class RequestWriter {
     return req.getContentWriter(ClientUtils.TEXT_XML);
   }
 
+  /**
+   * @deprecated Use {@link #getContentWriter(SolrRequest)}.
+   */
+  @Deprecated
   public Collection<ContentStream> getContentStreams(SolrRequest req) throws IOException {
     if (req instanceof UpdateRequest) {
       return null;


[10/50] [abbrv] lucene-solr:jira/http2: SOLR-11812: Remove backward compatibility of old LIR implementation in 8.0

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a37a2139/solr/core/src/test/org/apache/solr/cloud/LIRRollingUpdatesTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/LIRRollingUpdatesTest.java b/solr/core/src/test/org/apache/solr/cloud/LIRRollingUpdatesTest.java
deleted file mode 100644
index 88be4db..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/LIRRollingUpdatesTest.java
+++ /dev/null
@@ -1,473 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
-import java.io.Reader;
-import java.io.Writer;
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.concurrent.TimeUnit;
-import java.util.function.Supplier;
-
-import org.apache.solr.JSONTestUtil;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.util.TimeOut;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class LIRRollingUpdatesTest extends SolrCloudTestCase {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private static Map<URI, SocketProxy> proxies;
-  private static Map<URI, JettySolrRunner> jettys;
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    configureCluster(3)
-        .addConfig("conf", configset("cloud-minimal"))
-        .configure();
-    // Add proxies
-    proxies = new HashMap<>(cluster.getJettySolrRunners().size());
-    jettys = new HashMap<>(cluster.getJettySolrRunners().size());
-    for (JettySolrRunner jetty:cluster.getJettySolrRunners()) {
-      SocketProxy proxy = new SocketProxy();
-      jetty.setProxyPort(proxy.getListenPort());
-      cluster.stopJettySolrRunner(jetty);//TODO: Can we avoid this restart
-      cluster.startJettySolrRunner(jetty);
-      proxy.open(jetty.getBaseUrl().toURI());
-      log.info("Adding proxy for URL: " + jetty.getBaseUrl() + ". Proxy: " + proxy.getUrl());
-      proxies.put(proxy.getUrl(), proxy);
-      jettys.put(proxy.getUrl(), jetty);
-    }
-  }
-
-
-  @AfterClass
-  public static void tearDownCluster() throws Exception {
-    for (SocketProxy proxy:proxies.values()) {
-      proxy.close();
-    }
-    proxies = null;
-    jettys = null;
-  }
-
-  @Test
-  // 12-Jun-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 21-May-2018
-  // commented 15-Sep-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 09-Aug-2018
-  public void testNewReplicaOldLeader() throws Exception {
-
-    String collection = "testNewReplicaOldLeader";
-    CollectionAdminRequest.createCollection(collection, 1, 2)
-        .setCreateNodeSet("")
-        .process(cluster.getSolrClient());
-    Properties oldLir = new Properties();
-    oldLir.setProperty("lirVersion", "old");
-
-    CollectionAdminRequest
-        .addReplicaToShard(collection, "shard1")
-        .setProperties(oldLir)
-        .setNode(cluster.getJettySolrRunner(0).getNodeName())
-        .process(cluster.getSolrClient());
-
-    CollectionAdminRequest
-        .addReplicaToShard(collection, "shard1")
-        .setProperties(oldLir)
-        .setNode(cluster.getJettySolrRunner(1).getNodeName())
-        .process(cluster.getSolrClient());
-    waitForState("Time waiting for 1x2 collection", collection, clusterShape(1, 2));
-
-    addDocs(collection, 2, 0);
-
-    Slice shard1 = getCollectionState(collection).getSlice("shard1");
-    //introduce network partition between leader & replica
-    Replica notLeader = shard1.getReplicas(x -> x != shard1.getLeader()).get(0);
-    assertTrue(runInOldLIRMode(collection, "shard1", notLeader));
-    getProxyForReplica(notLeader).close();
-    getProxyForReplica(shard1.getLeader()).close();
-
-    addDoc(collection, 2, getJettyForReplica(shard1.getLeader()));
-    waitForState("Replica " + notLeader.getName() + " is not put as DOWN", collection,
-        (liveNodes, collectionState) ->
-            collectionState.getSlice("shard1").getReplica(notLeader.getName()).getState() == Replica.State.DOWN);
-    getProxyForReplica(shard1.getLeader()).reopen();
-    getProxyForReplica(notLeader).reopen();
-    // make sure that, when new replica works with old leader, it still can recovery normally
-    waitForState("Timeout waiting for recovering", collection, clusterShape(1, 2));
-    assertDocsExistInAllReplicas(Collections.singletonList(notLeader), collection, 0, 2);
-
-    // make sure that, when new replica restart during LIR, it still can recovery normally (by looking at LIR node)
-    getProxyForReplica(notLeader).close();
-    getProxyForReplica(shard1.getLeader()).close();
-
-    addDoc(collection, 3, getJettyForReplica(shard1.getLeader()));
-    waitForState("Replica " + notLeader.getName() + " is not put as DOWN", collection,
-        (liveNodes, collectionState) ->
-            collectionState.getSlice("shard1").getReplica(notLeader.getName()).getState() == Replica.State.DOWN);
-
-    JettySolrRunner notLeaderJetty = getJettyForReplica(notLeader);
-    notLeaderJetty.stop();
-    waitForState("Node did not leave", collection, (liveNodes, collectionState) -> liveNodes.size() == 2);
-    upgrade(notLeaderJetty);
-    notLeaderJetty.start();
-
-    getProxyForReplica(shard1.getLeader()).reopen();
-    getProxyForReplica(notLeader).reopen();
-    waitForState("Timeout waiting for recovering", collection, clusterShape(1, 2));
-    assertFalse(runInOldLIRMode(collection, "shard1", notLeader));
-    assertDocsExistInAllReplicas(Collections.singletonList(notLeader), collection, 0, 3);
-
-    CollectionAdminRequest.deleteCollection(collection).process(cluster.getSolrClient());
-  }
-
-  @Test
-  // 12-Jun-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 04-May-2018
-  // commented 15-Sep-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 09-Aug-2018
-  public void testNewLeaderOldReplica() throws Exception {
-    // in case of new leader & old replica, new leader can still put old replica into LIR
-
-    String collection = "testNewLeaderOldReplica";
-    CollectionAdminRequest.createCollection(collection, 1, 2)
-        .setCreateNodeSet("")
-        .process(cluster.getSolrClient());
-    Properties oldLir = new Properties();
-    oldLir.setProperty("lirVersion", "old");
-
-    CollectionAdminRequest
-        .addReplicaToShard(collection, "shard1")
-        .setNode(cluster.getJettySolrRunner(0).getNodeName())
-        .process(cluster.getSolrClient());
-    waitForState("Timeout waiting for shard1 become active", collection, (liveNodes, collectionState) -> {
-      Slice shard1 = collectionState.getSlice("shard1");
-      if (shard1.getReplicas().size() == 1 && shard1.getLeader() != null) return true;
-      return false;
-    });
-
-    CollectionAdminRequest
-        .addReplicaToShard(collection, "shard1")
-        .setProperties(oldLir)
-        .setNode(cluster.getJettySolrRunner(1).getNodeName())
-        .process(cluster.getSolrClient());
-    waitForState("Time waiting for 1x2 collection", collection, clusterShape(1, 2));
-
-    Slice shard1 = getCollectionState(collection).getSlice("shard1");
-    Replica notLeader = shard1.getReplicas(x -> x != shard1.getLeader()).get(0);
-    Replica leader = shard1.getLeader();
-
-    assertTrue(runInOldLIRMode(collection, "shard1", notLeader));
-    assertFalse(runInOldLIRMode(collection, "shard1", leader));
-
-    addDocs(collection, 2, 0);
-    getProxyForReplica(notLeader).close();
-    getProxyForReplica(leader).close();
-
-    JettySolrRunner leaderJetty = getJettyForReplica(leader);
-    addDoc(collection, 2, leaderJetty);
-    waitForState("Replica " + notLeader.getName() + " is not put as DOWN", collection,
-        (liveNodes, collectionState) ->
-            collectionState.getSlice("shard1").getReplica(notLeader.getName()).getState() == Replica.State.DOWN);
-    // wait a little bit
-    Thread.sleep(500);
-    getProxyForReplica(notLeader).reopen();
-    getProxyForReplica(leader).reopen();
-
-    waitForState("Timeout waiting for recovering", collection, clusterShape(1, 2));
-    assertDocsExistInAllReplicas(Collections.singletonList(notLeader), collection, 0, 2);
-
-    // ensure that after recovery, the upgraded replica will clean its LIR status cause it is no longer needed
-    assertFalse(cluster.getSolrClient().getZkStateReader().getZkClient().exists(
-        ZkController.getLeaderInitiatedRecoveryZnodePath(collection, "shard1", notLeader.getName()), true));
-    // ensure that, leader should not register other replica's term
-    try (ZkShardTerms zkShardTerms = new ZkShardTerms(collection, "shard1", cluster.getZkClient())) {
-      assertFalse(zkShardTerms.getTerms().containsKey(notLeader.getName()));
-    }
-    CollectionAdminRequest.deleteCollection(collection).process(cluster.getSolrClient());
-  }
-
-  public void testLeaderAndMixedReplicas(boolean leaderInOldMode) throws Exception {
-    // in case of new leader and mixed old replica and new replica, new leader can still put all of them into recovery
-    // step1 : setup collection
-    String collection = "testMixedReplicas-"+leaderInOldMode;
-    CollectionAdminRequest.createCollection(collection, 1, 2)
-        .setCreateNodeSet("")
-        .process(cluster.getSolrClient());
-    Properties oldLir = new Properties();
-    oldLir.setProperty("lirVersion", "old");
-
-    if (leaderInOldMode) {
-      CollectionAdminRequest
-          .addReplicaToShard(collection, "shard1")
-          .setProperties(oldLir)
-          .setNode(cluster.getJettySolrRunner(0).getNodeName())
-          .process(cluster.getSolrClient());
-    } else {
-      CollectionAdminRequest
-          .addReplicaToShard(collection, "shard1")
-          .setNode(cluster.getJettySolrRunner(0).getNodeName())
-          .process(cluster.getSolrClient());
-    }
-
-    waitForState("Timeout waiting for shard1 become active", collection, clusterShape(1, 1));
-
-    CollectionAdminRequest
-        .addReplicaToShard(collection, "shard1")
-        .setProperties(oldLir)
-        .setNode(cluster.getJettySolrRunner(1).getNodeName())
-        .process(cluster.getSolrClient());
-
-    CollectionAdminRequest
-        .addReplicaToShard(collection, "shard1")
-        .setNode(cluster.getJettySolrRunner(2).getNodeName())
-        .process(cluster.getSolrClient());
-    waitForState("Timeout waiting for shard1 become active", collection, clusterShape(1, 3));
-
-    Slice shard1 = getCollectionState(collection).getSlice("shard1");
-    Replica replicaInOldMode = shard1.getReplicas(x -> x != shard1.getLeader()).get(0);
-    Replica replicaInNewMode = shard1.getReplicas(x -> x != shard1.getLeader()).get(1);
-    Replica leader = shard1.getLeader();
-
-    assertEquals(leaderInOldMode, runInOldLIRMode(collection, "shard1", leader));
-    if (!runInOldLIRMode(collection, "shard1", replicaInOldMode)) {
-      Replica temp = replicaInOldMode;
-      replicaInOldMode = replicaInNewMode;
-      replicaInNewMode = temp;
-    }
-    assertTrue(runInOldLIRMode(collection, "shard1", replicaInOldMode));
-    assertFalse(runInOldLIRMode(collection, "shard1", replicaInNewMode));
-
-    addDocs(collection, 2, 0);
-
-    // step2 : introduce network partition then add doc, replicas should be put into recovery
-    getProxyForReplica(replicaInOldMode).close();
-    getProxyForReplica(replicaInNewMode).close();
-    getProxyForReplica(leader).close();
-
-    JettySolrRunner leaderJetty = getJettyForReplica(leader);
-    addDoc(collection, 2, leaderJetty);
-
-    Replica finalReplicaInOldMode = replicaInOldMode;
-    waitForState("Replica " + replicaInOldMode.getName() + " is not put as DOWN", collection,
-        (liveNodes, collectionState) ->
-            collectionState.getSlice("shard1").getReplica(finalReplicaInOldMode.getName()).getState() == Replica.State.DOWN);
-    Replica finalReplicaInNewMode = replicaInNewMode;
-    waitForState("Replica " + finalReplicaInNewMode.getName() + " is not put as DOWN", collection,
-        (liveNodes, collectionState) ->
-            collectionState.getSlice("shard1").getReplica(finalReplicaInNewMode.getName()).getState() == Replica.State.DOWN);
-
-    // wait a little bit
-    Thread.sleep(500);
-    getProxyForReplica(replicaInOldMode).reopen();
-    getProxyForReplica(replicaInNewMode).reopen();
-    getProxyForReplica(leader).reopen();
-
-    waitForState("Timeout waiting for recovering", collection, clusterShape(1, 3));
-    assertDocsExistInAllReplicas(Arrays.asList(replicaInNewMode, replicaInOldMode), collection, 0, 2);
-
-    addDocs(collection, 3, 3);
-
-    // ensure that, leader should not register other replica's term
-    try (ZkShardTerms zkShardTerms = new ZkShardTerms(collection, "shard1", cluster.getZkClient())) {
-      assertFalse(zkShardTerms.getTerms().containsKey(replicaInOldMode.getName()));
-    }
-
-    // step3 : upgrade the replica running in old mode to the new mode
-    getProxyForReplica(leader).close();
-    getProxyForReplica(replicaInOldMode).close();
-    addDoc(collection, 6, leaderJetty);
-    JettySolrRunner oldJetty = getJettyForReplica(replicaInOldMode);
-    oldJetty.stop();
-    waitForState("Node did not leave", collection, (liveNodes, collectionState)
-        -> liveNodes.size() == 2);
-    upgrade(oldJetty);
-
-    oldJetty.start();
-    getProxyForReplica(leader).reopen();
-    getProxyForReplica(replicaInOldMode).reopen();
-
-    waitForState("Timeout waiting for recovering", collection, clusterShape(1, 3));
-    assertDocsExistInAllReplicas(Arrays.asList(replicaInNewMode, replicaInOldMode), collection, 0, 6);
-
-    CollectionAdminRequest.deleteCollection(collection).process(cluster.getSolrClient());
-  }
-
-  @Test
-  // 12-Jun-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 04-May-2018
-  // commented 15-Sep-2018  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 09-Aug-2018
-  public void testNewLeaderAndMixedReplicas() throws Exception {
-    testLeaderAndMixedReplicas(false);
-  }
-
-  @Test
-  // 12-Jun-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 04-May-2018
-  public void testOldLeaderAndMixedReplicas() throws Exception {
-    testLeaderAndMixedReplicas(true);
-  }
-
-  private void upgrade(JettySolrRunner solrRunner) {
-    File[] corePaths = new File(solrRunner.getSolrHome()).listFiles();
-    for (File corePath : corePaths) {
-      File coreProperties = new File(corePath, "core.properties");
-      if (!coreProperties.exists()) continue;
-      Properties properties = new Properties();
-
-      try (Reader reader = new InputStreamReader(new FileInputStream(coreProperties), "UTF-8")) {
-        properties.load(reader);
-      } catch (Exception e) {
-        continue;
-      }
-      properties.remove("lirVersion");
-      try (Writer writer = new OutputStreamWriter(new FileOutputStream(coreProperties), "UTF-8")) {
-        properties.store(writer, "Upgraded");
-      } catch (Exception e) {
-        continue;
-      }
-    }
-  }
-
-  protected void assertDocsExistInAllReplicas(List<Replica> notLeaders,
-                                              String testCollectionName, int firstDocId, int lastDocId)
-      throws Exception {
-    Replica leader =
-        cluster.getSolrClient().getZkStateReader().getLeaderRetry(testCollectionName, "shard1", 10000);
-    HttpSolrClient leaderSolr = getHttpSolrClient(leader, testCollectionName);
-    List<HttpSolrClient> replicas =
-        new ArrayList<HttpSolrClient>(notLeaders.size());
-
-    for (Replica r : notLeaders) {
-      replicas.add(getHttpSolrClient(r, testCollectionName));
-    }
-    try {
-      for (int d = firstDocId; d <= lastDocId; d++) {
-        String docId = String.valueOf(d);
-        assertDocExists(leaderSolr, testCollectionName, docId);
-        for (HttpSolrClient replicaSolr : replicas) {
-          assertDocExists(replicaSolr, testCollectionName, docId);
-        }
-      }
-    } finally {
-      if (leaderSolr != null) {
-        leaderSolr.close();
-      }
-      for (HttpSolrClient replicaSolr : replicas) {
-        replicaSolr.close();
-      }
-    }
-  }
-
-  protected void assertDocExists(HttpSolrClient solr, String coll, String docId) throws Exception {
-    NamedList rsp = realTimeGetDocId(solr, docId);
-    String match = JSONTestUtil.matchObj("/id", rsp.get("doc"), docId);
-    assertTrue("Doc with id=" + docId + " not found in " + solr.getBaseURL()
-        + " due to: " + match + "; rsp="+rsp, match == null);
-  }
-
-  private NamedList realTimeGetDocId(HttpSolrClient solr, String docId) throws SolrServerException, IOException {
-    QueryRequest qr = new QueryRequest(params("qt", "/get", "id", docId, "distrib", "false"));
-    return solr.request(qr);
-  }
-
-  protected HttpSolrClient getHttpSolrClient(Replica replica, String coll) throws Exception {
-    ZkCoreNodeProps zkProps = new ZkCoreNodeProps(replica);
-    String url = zkProps.getBaseUrl() + "/" + coll;
-    return getHttpSolrClient(url);
-  }
-
-  private <T> void waitFor(int waitTimeInSecs, T expected, Supplier<T> supplier) throws InterruptedException {
-    TimeOut timeOut = new TimeOut(waitTimeInSecs, TimeUnit.SECONDS, new TimeSource.CurrentTimeSource());
-    while (!timeOut.hasTimedOut()) {
-      if (expected == supplier.get()) return;
-      Thread.sleep(100);
-    }
-    assertEquals(expected, supplier.get());
-  }
-
-  private boolean runInOldLIRMode(String collection, String shard, Replica replica) {
-    try (ZkShardTerms shardTerms = new ZkShardTerms(collection, shard, cluster.getZkClient())) {
-      return !shardTerms.registered(replica.getName());
-    }
-  }
-
-  private void addDoc(String collection, int docId, JettySolrRunner solrRunner) throws IOException, SolrServerException {
-    try (HttpSolrClient solrClient = new HttpSolrClient.Builder(solrRunner.getBaseUrl().toString()).build()) {
-      solrClient.add(collection, new SolrInputDocument("id", String.valueOf(docId), "fieldName_s", String.valueOf(docId)));
-    }
-  }
-
-  private void addDocs(String collection, int numDocs, int startId) throws SolrServerException, IOException {
-    List<SolrInputDocument> docs = new ArrayList<>(numDocs);
-    for (int i = 0; i < numDocs; i++) {
-      int id = startId + i;
-      docs.add(new SolrInputDocument("id", String.valueOf(id), "fieldName_s", String.valueOf(id)));
-    }
-    cluster.getSolrClient().add(collection, docs);
-    cluster.getSolrClient().commit(collection);
-  }
-
-
-  protected JettySolrRunner getJettyForReplica(Replica replica) throws Exception {
-    String replicaBaseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
-    assertNotNull(replicaBaseUrl);
-    URL baseUrl = new URL(replicaBaseUrl);
-
-    JettySolrRunner proxy = jettys.get(baseUrl.toURI());
-    assertNotNull("No proxy found for " + baseUrl + "!", proxy);
-    return proxy;
-  }
-
-  protected SocketProxy getProxyForReplica(Replica replica) throws Exception {
-    String replicaBaseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
-    assertNotNull(replicaBaseUrl);
-    URL baseUrl = new URL(replicaBaseUrl);
-
-    SocketProxy proxy = proxies.get(baseUrl.toURI());
-    if (proxy == null && !baseUrl.toExternalForm().endsWith("/")) {
-      baseUrl = new URL(baseUrl.toExternalForm() + "/");
-      proxy = proxies.get(baseUrl.toURI());
-    }
-    assertNotNull("No proxy found for " + baseUrl + "!", proxy);
-    return proxy;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a37a2139/solr/core/src/test/org/apache/solr/cloud/TestLeaderInitiatedRecoveryThread.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestLeaderInitiatedRecoveryThread.java b/solr/core/src/test/org/apache/solr/cloud/TestLeaderInitiatedRecoveryThread.java
deleted file mode 100644
index ce9d9ad..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/TestLeaderInitiatedRecoveryThread.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.util.Properties;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.util.MockCoreContainer.MockCoreDescriptor;
-import org.apache.solr.util.TimeOut;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.data.Stat;
-import org.junit.Test;
-
-/**
- * Test for {@link LeaderInitiatedRecoveryThread}
- */
-@Deprecated
-@SolrTestCaseJ4.SuppressSSL
-public class TestLeaderInitiatedRecoveryThread extends AbstractFullDistribZkTestBase {
-
-  public TestLeaderInitiatedRecoveryThread() {
-    sliceCount = 1;
-    fixShardCount(2);
-  }
-
-  @Test
-  //17-Aug-2018 commented @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 09-Apr-2018
-  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
-  public void testPublishDownState() throws Exception {
-    waitForRecoveriesToFinish(true);
-
-    final String leaderCoreNodeName = shardToLeaderJetty.get(SHARD1).coreNodeName;
-    final CloudJettyRunner leaderRunner = shardToLeaderJetty.get(SHARD1);
-    final CoreContainer coreContainer1 = leaderRunner.jetty.getCoreContainer();
-    final ZkController zkController1 = coreContainer1.getZkController();
-
-    CloudJettyRunner notLeader = null;
-    for (CloudJettyRunner cloudJettyRunner : shardToJetty.get(SHARD1)) {
-      if (cloudJettyRunner != leaderRunner) {
-        notLeader = cloudJettyRunner;
-        break;
-      }
-    }
-    assertNotNull(notLeader);
-    Replica replica = cloudClient.getZkStateReader().getClusterState().getCollection(DEFAULT_COLLECTION).getReplica(notLeader.coreNodeName);
-    ZkCoreNodeProps replicaCoreNodeProps = new ZkCoreNodeProps(replica);
-    
-    MockCoreDescriptor cd = new MockCoreDescriptor() {
-      public CloudDescriptor getCloudDescriptor() {
-        return new CloudDescriptor(shardToLeaderJetty.get(SHARD1).info.getStr(ZkStateReader.CORE_NAME_PROP), new Properties(), this) {
-          @Override
-          public String getCoreNodeName() {
-            return shardToLeaderJetty.get(SHARD1).info.getStr(ZkStateReader.CORE_NODE_NAME_PROP);
-          }
-          @Override
-          public boolean isLeader() {
-            return true;
-          }
-        };
-      }
-    };
-
-    /*
-     1. Test that publishDownState throws exception when zkController.isReplicaInRecoveryHandling == false
-      */
-
-    SolrException e = expectThrows(SolrException.class,
-        "publishDownState should not have succeeded because replica url is not marked in leader initiated recovery in ZkController",
-        () -> {
-      LeaderInitiatedRecoveryThread thread = new LeaderInitiatedRecoveryThread(zkController1, coreContainer1,
-          DEFAULT_COLLECTION, SHARD1, replicaCoreNodeProps, 1, cd);
-      assertFalse(zkController1.isReplicaInRecoveryHandling(replicaCoreNodeProps.getCoreUrl()));
-      thread.run();
-    });
-    assertEquals(e.code(), SolrException.ErrorCode.INVALID_STATE.code);
-
-    /*
-     2. Test that a non-live replica cannot be put into LIR or down state
-      */
-    LeaderInitiatedRecoveryThread thread = new LeaderInitiatedRecoveryThread(zkController1, coreContainer1,
-        DEFAULT_COLLECTION, SHARD1, replicaCoreNodeProps, 1, cd);
-    // kill the replica
-    int children = cloudClient.getZkStateReader().getZkClient().getChildren("/live_nodes", null, true).size();
-    ChaosMonkey.stop(notLeader.jetty);
-    TimeOut timeOut = new TimeOut(60, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-    while (!timeOut.hasTimedOut()) {
-      if (children > cloudClient.getZkStateReader().getZkClient().getChildren("/live_nodes", null, true).size()) {
-        break;
-      }
-      Thread.sleep(500);
-    }
-    assertTrue(children > cloudClient.getZkStateReader().getZkClient().getChildren("/live_nodes", null, true).size());
-
-    int cversion = getOverseerCversion();
-    // Thread should not publish LIR and down state for node which is not live, regardless of whether forcePublish is true or false
-    assertFalse(thread.publishDownState(replicaCoreNodeProps.getCoreName(), replica.getName(), replica.getNodeName(), replicaCoreNodeProps.getCoreUrl(), false));
-    // lets assert that we did not publish anything to overseer queue, simplest way is to assert that cversion of overseer queue zk node is still the same
-    assertEquals(cversion, getOverseerCversion());
-
-    assertFalse(thread.publishDownState(replicaCoreNodeProps.getCoreName(), replica.getName(), replica.getNodeName(), replicaCoreNodeProps.getCoreUrl(), true));
-    // lets assert that we did not publish anything to overseer queue
-    assertEquals(cversion, getOverseerCversion());
-
-
-    /*
-    3. Test that if ZK connection loss then thread should not attempt to publish down state even if forcePublish=true
-     */
-    ChaosMonkey.start(notLeader.jetty);
-    waitForRecoveriesToFinish(true);
-
-    thread = new LeaderInitiatedRecoveryThread(zkController1, coreContainer1,
-        DEFAULT_COLLECTION, SHARD1, replicaCoreNodeProps, 1, cd) {
-      @Override
-      protected void updateLIRState(String replicaCoreNodeName) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "", new KeeperException.ConnectionLossException());
-      }
-    };
-    assertFalse(thread.publishDownState(replicaCoreNodeProps.getCoreName(), replica.getName(), replica.getNodeName(), replicaCoreNodeProps.getCoreUrl(), false));
-    assertFalse(thread.publishDownState(replicaCoreNodeProps.getCoreName(), replica.getName(), replica.getNodeName(), replicaCoreNodeProps.getCoreUrl(), true));
-    assertNull(zkController1.getLeaderInitiatedRecoveryState(DEFAULT_COLLECTION, SHARD1, replica.getName()));
-
-
-    /*
-     4. Test that if ZK connection loss or session expired then thread should not attempt to publish down state even if forcePublish=true
-      */
-    thread = new LeaderInitiatedRecoveryThread(zkController1, coreContainer1,
-        DEFAULT_COLLECTION, SHARD1, replicaCoreNodeProps, 1, cd) {
-      @Override
-      protected void updateLIRState(String replicaCoreNodeName) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "", new KeeperException.SessionExpiredException());
-      }
-    };
-    assertFalse(thread.publishDownState(replicaCoreNodeProps.getCoreName(), replica.getName(), replica.getNodeName(), replicaCoreNodeProps.getCoreUrl(), false));
-    assertFalse(thread.publishDownState(replicaCoreNodeProps.getCoreName(), replica.getName(), replica.getNodeName(), replicaCoreNodeProps.getCoreUrl(), true));
-    assertNull(zkController1.getLeaderInitiatedRecoveryState(DEFAULT_COLLECTION, SHARD1, replica.getName()));
-
-
-    /*
-     5. Test that any exception other then ZK connection loss or session expired should publish down state only if forcePublish=true
-      */
-    thread = new LeaderInitiatedRecoveryThread(zkController1, coreContainer1,
-        DEFAULT_COLLECTION, SHARD1, replicaCoreNodeProps, 1, cd) {
-      @Override
-      protected void updateLIRState(String replicaCoreNodeName) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "bogus exception");
-      }
-    };
-    // the following should return true because regardless of the bogus exception in setting LIR state, we still want recovery commands to be sent,
-    // however the following will not publish a down state
-    cversion = getOverseerCversion();
-    assertTrue(thread.publishDownState(replicaCoreNodeProps.getCoreName(), replica.getName(), replica.getNodeName(), replicaCoreNodeProps.getCoreUrl(), false));
-
-    // lets assert that we did not publish anything to overseer queue, simplest way is to assert that cversion of overseer queue zk node is still the same
-    assertEquals(cversion, getOverseerCversion());
-
-    assertTrue(thread.publishDownState(replicaCoreNodeProps.getCoreName(), replica.getName(), replica.getNodeName(), replicaCoreNodeProps.getCoreUrl(), true));
-    // this should have published a down state so assert that cversion has incremented
-    assertTrue(getOverseerCversion() > cversion);
-
-    timeOut = new TimeOut(30, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-    while (!timeOut.hasTimedOut()) {
-      Replica r = cloudClient.getZkStateReader().getClusterState().getCollection(DEFAULT_COLLECTION).getReplica(replica.getName());
-      if (r.getState() == Replica.State.DOWN) {
-        break;
-      }
-      Thread.sleep(500);
-    }
-
-    assertNull(zkController1.getLeaderInitiatedRecoveryState(DEFAULT_COLLECTION, SHARD1, replica.getName()));
-    assertEquals(Replica.State.DOWN, cloudClient.getZkStateReader().getClusterState().getCollection(DEFAULT_COLLECTION).getReplica(replica.getName()).getState());
-
-    /*
-    6. Test that non-leader cannot set LIR nodes
-     */
-
-    final CoreContainer coreContainer2 = notLeader.jetty.getCoreContainer();
-    final ZkController zkController2 = coreContainer2.getZkController();
-
-    thread = new LeaderInitiatedRecoveryThread(zkController2, coreContainer2,
-        DEFAULT_COLLECTION, SHARD1, replicaCoreNodeProps, 1, coreContainer2.getCores().iterator().next().getCoreDescriptor()) {
-      @Override
-      protected void updateLIRState(String replicaCoreNodeName) {
-        throw expectThrows(ZkController.NotLeaderException.class, () -> super.updateLIRState(replicaCoreNodeName));
-      }
-    };
-    cversion = getOverseerCversion();
-    assertFalse(thread.publishDownState(replicaCoreNodeProps.getCoreName(), replica.getName(), replica.getNodeName(), replicaCoreNodeProps.getCoreUrl(), false));
-    assertEquals(cversion, getOverseerCversion());
-
-    /*
-     7. assert that we can write a LIR state if everything else is fine
-      */
-    // reset the zkcontroller to the one from the leader
-    final CoreContainer coreContainer3 = leaderRunner.jetty.getCoreContainer();
-    final ZkController zkController3 = coreContainer3.getZkController();
-    thread = new LeaderInitiatedRecoveryThread(zkController3, coreContainer3,
-        DEFAULT_COLLECTION, SHARD1, replicaCoreNodeProps, 1, coreContainer3.getCores().iterator().next().getCoreDescriptor());
-    thread.publishDownState(replicaCoreNodeProps.getCoreName(), replica.getName(), replica.getNodeName(), replicaCoreNodeProps.getCoreUrl(), false);
-    timeOut = new TimeOut(30, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-    while (!timeOut.hasTimedOut()) {
-      Replica.State state = zkController3.getLeaderInitiatedRecoveryState(DEFAULT_COLLECTION, SHARD1, replica.getName());
-      if (state == Replica.State.DOWN) {
-        break;
-      }
-      Thread.sleep(500);
-    }
-    assertNotNull(zkController3.getLeaderInitiatedRecoveryStateObject(DEFAULT_COLLECTION, SHARD1, replica.getName()));
-    assertEquals(Replica.State.DOWN, zkController3.getLeaderInitiatedRecoveryState(DEFAULT_COLLECTION, SHARD1, replica.getName()));
-
-    /*
-    7. Test that
-     */
-  }
-
-  protected int getOverseerCversion() throws KeeperException, InterruptedException {
-    Stat stat = new Stat();
-    cloudClient.getZkStateReader().getZkClient().getData("/overseer/queue", null, stat, true);
-    return stat.getCversion();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a37a2139/solr/core/src/test/org/apache/solr/cloud/ZkShardTermsTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ZkShardTermsTest.java b/solr/core/src/test/org/apache/solr/cloud/ZkShardTermsTest.java
index 1c1b1d2..1745ae4f 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ZkShardTermsTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ZkShardTermsTest.java
@@ -84,7 +84,7 @@ public class ZkShardTermsTest extends SolrCloudTestCase {
       zkShardTerms.registerTerm("replica1");
       zkShardTerms.registerTerm("replica2");
 
-      // normal case when leader start lir process
+      // normal case when leader failed to send an update to replica
       zkShardTerms.ensureTermsIsHigher("replica1", Collections.singleton("replica2"));
       zkShardTerms.startRecovering("replica2");
       assertEquals(zkShardTerms.getTerm("replica2"), 1);
@@ -95,7 +95,6 @@ public class ZkShardTermsTest extends SolrCloudTestCase {
       assertEquals(zkShardTerms.getTerm("replica2"), 1);
       assertEquals(zkShardTerms.getTerm("replica2_recovering"), -1);
 
-      // stack of lir processes
       zkShardTerms.ensureTermsIsHigher("replica1", Collections.singleton("replica2"));
       assertEquals(zkShardTerms.getTerm("replica1"), 2);
       assertEquals(zkShardTerms.getTerm("replica2"), 1);


[50/50] [abbrv] lucene-solr:jira/http2: merge with master

Posted by da...@apache.org.
merge with master


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/9a36e87f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/9a36e87f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/9a36e87f

Branch: refs/heads/jira/http2
Commit: 9a36e87f85a3de78ff4fb473673005f8cca3613a
Parents: 6558203 d2f7272
Author: Cao Manh Dat <da...@apache.org>
Authored: Wed Oct 17 05:53:02 2018 +0700
Committer: Cao Manh Dat <da...@apache.org>
Committed: Wed Oct 17 05:53:02 2018 +0700

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |  16 +
 .../lucene/analysis/cjk/CJKBigramFilter.java    |   8 +
 .../ja/JapaneseNumberFilterFactory.java         |   2 +-
 .../codecs/simpletext/SimpleTextBKDReader.java  |  59 +-
 .../codecs/simpletext/SimpleTextBKDWriter.java  | 144 ++--
 .../simpletext/SimpleTextFieldInfosFormat.java  |  21 +-
 .../simpletext/SimpleTextPointsReader.java      |  20 +-
 .../simpletext/SimpleTextPointsWriter.java      |   6 +-
 .../org/apache/lucene/codecs/PointsWriter.java  |  13 +-
 .../lucene50/Lucene50FieldInfosFormat.java      |   2 +-
 .../lucene60/Lucene60FieldInfosFormat.java      |  21 +-
 .../codecs/lucene60/Lucene60PointsReader.java   |   2 +-
 .../codecs/lucene60/Lucene60PointsWriter.java   |  14 +-
 .../codecs/perfield/PerFieldMergeState.java     |   2 +-
 .../org/apache/lucene/document/BinaryPoint.java |   4 +-
 .../org/apache/lucene/document/DoublePoint.java |  10 +-
 .../org/apache/lucene/document/DoubleRange.java |  10 +-
 .../org/apache/lucene/document/FieldType.java   |  66 +-
 .../org/apache/lucene/document/FloatPoint.java  |  10 +-
 .../org/apache/lucene/document/FloatRange.java  |  10 +-
 .../org/apache/lucene/document/IntPoint.java    |  10 +-
 .../org/apache/lucene/document/IntRange.java    |  10 +-
 .../org/apache/lucene/document/LatLonPoint.java |   6 +-
 .../org/apache/lucene/document/LongPoint.java   |  10 +-
 .../org/apache/lucene/document/LongRange.java   |  10 +-
 .../apache/lucene/document/RangeFieldQuery.java |   4 +-
 .../org/apache/lucene/index/CheckIndex.java     |  37 +-
 .../org/apache/lucene/index/CodecReader.java    |   2 +-
 .../lucene/index/DefaultIndexingChain.java      |  15 +-
 .../java/org/apache/lucene/index/FieldInfo.java |  73 +-
 .../org/apache/lucene/index/FieldInfos.java     |  61 +-
 .../org/apache/lucene/index/IndexWriter.java    |   6 +-
 .../apache/lucene/index/IndexableFieldType.java |   9 +-
 .../org/apache/lucene/index/PointValues.java    |  11 +-
 .../apache/lucene/index/PointValuesWriter.java  |  20 +-
 .../apache/lucene/index/SortingLeafReader.java  |   9 +-
 .../apache/lucene/search/PointInSetQuery.java   |   4 +-
 .../apache/lucene/search/PointRangeQuery.java   |   4 +-
 .../org/apache/lucene/util/bkd/BKDReader.java   | 140 ++--
 .../org/apache/lucene/util/bkd/BKDWriter.java   | 174 +++--
 .../apache/lucene/document/TestFieldType.java   |  11 +-
 .../lucene/index/TestIndexWriterOnVMError.java  |   1 +
 .../apache/lucene/index/TestIndexableField.java |   7 +-
 .../lucene/index/TestPendingSoftDeletes.java    |  10 +-
 .../apache/lucene/index/TestPointValues.java    |  14 +-
 .../apache/lucene/search/TestPointQueries.java  |   2 +-
 .../apache/lucene/util/TestDocIdSetBuilder.java |   7 +-
 .../apache/lucene/util/bkd/Test2BBKDPoints.java |   4 +-
 .../org/apache/lucene/util/bkd/TestBKD.java     | 142 ++--
 .../util/bkd/TestMutablePointsReaderUtils.java  |   7 +-
 .../org/apache/lucene/util/fst/TestFSTs.java    |   2 +-
 .../search/highlight/TermVectorLeafReader.java  |   2 +-
 .../join/PointInSetIncludingScoreQuery.java     |   4 +-
 .../apache/lucene/index/memory/MemoryIndex.java |  24 +-
 .../apache/lucene/document/BigIntegerPoint.java |  10 +-
 .../apache/lucene/document/HalfFloatPoint.java  |  10 +-
 .../lucene/spatial/bbox/BBoxStrategy.java       |   2 +-
 .../spatial/spatial4j/Geo3dShapeFactory.java    |   3 +
 .../spatial/vector/PointVectorStrategy.java     |   2 +-
 .../lucene/spatial/spatial4j/Geo3dRptTest.java  |  41 -
 .../lucene/spatial/spatial4j/Geo3dTest.java     |  85 ++
 .../codecs/asserting/AssertingPointsFormat.java |   4 +-
 .../codecs/cranky/CrankyPointsFormat.java       |  12 +-
 .../lucene/index/AssertingLeafReader.java       |  41 +-
 .../index/BaseIndexFileFormatTestCase.java      |   2 +-
 .../lucene/index/BasePointsFormatTestCase.java  |  76 +-
 .../lucene/index/MismatchedLeafReader.java      |   3 +-
 .../org/apache/lucene/index/RandomCodec.java    |   9 +-
 .../lucene/index/RandomPostingsTester.java      |   4 +-
 .../org/apache/lucene/util/LuceneTestCase.java  |  18 +-
 .../java/org/apache/lucene/util/TestUtil.java   |   2 +-
 solr/CHANGES.txt                                |  32 +-
 .../solr/ltr/model/NeuralNetworkModel.java      |  34 +-
 .../org/apache/solr/ltr/TestLTROnSolrCloud.java |   1 +
 .../java/org/apache/solr/cloud/CloudUtil.java   |  37 -
 .../org/apache/solr/cloud/ElectionContext.java  | 118 ---
 .../cloud/LeaderInitiatedRecoveryThread.java    | 366 ---------
 .../org/apache/solr/cloud/ZkController.java     | 356 +--------
 .../cloud/api/collections/AddReplicaCmd.java    |  19 +-
 .../solr/cloud/api/collections/Assign.java      |  70 +-
 .../api/collections/CreateCollectionCmd.java    |  19 +-
 .../api/collections/DeleteCollectionCmd.java    |   2 +
 .../cloud/api/collections/SplitShardCmd.java    |  37 +-
 .../autoscaling/InactiveShardPlanAction.java    |  48 +-
 .../solr/cloud/overseer/ReplicaMutator.java     |   9 +
 .../solr/cloud/overseer/SliceMutator.java       |  14 +-
 .../solr/handler/UpdateRequestHandler.java      |   2 +-
 .../solr/handler/admin/CollectionsHandler.java  |   9 -
 .../solr/handler/component/ExpandComponent.java |   3 +-
 .../solr/handler/loader/JavabinLoader.java      |  58 ++
 .../org/apache/solr/legacy/BBoxStrategy.java    |   2 +-
 .../apache/solr/legacy/PointVectorStrategy.java |   2 +-
 .../solr/logging/log4j2/Log4j2Watcher.java      |   2 +-
 .../org/apache/solr/schema/SchemaField.java     |   7 +-
 .../solr/search/CollapsingQParserPlugin.java    |   2 +-
 .../java/org/apache/solr/search/Insanity.java   |   2 +-
 .../org/apache/solr/search/ReturnFields.java    |   2 +-
 .../apache/solr/uninverting/FieldCacheImpl.java |   6 +-
 .../solr/uninverting/UninvertingReader.java     |   6 +-
 .../processor/DistributedUpdateProcessor.java   |  30 +-
 .../org/apache/solr/util/TestInjection.java     |  17 +
 .../solr/cloud/BasicDistributedZkTest.java      |   1 +
 .../solr/cloud/CollectionsAPISolrJTest.java     |  11 +
 .../apache/solr/cloud/DeleteReplicaTest.java    |  16 +-
 .../org/apache/solr/cloud/ForceLeaderTest.java  | 190 -----
 .../apache/solr/cloud/HttpPartitionTest.java    |  62 --
 .../solr/cloud/LIRRollingUpdatesTest.java       | 473 ------------
 .../solr/cloud/LegacyCloudClusterPropTest.java  |   2 +-
 .../apache/solr/cloud/MoveReplicaHDFSTest.java  |   3 +-
 ...verseerCollectionConfigSetProcessorTest.java |   7 +
 .../apache/solr/cloud/OverseerRolesTest.java    |   2 +-
 .../solr/cloud/RestartWhileUpdatingTest.java    |   2 +-
 .../apache/solr/cloud/TestCloudRecovery.java    |   1 +
 .../cloud/TestDeleteCollectionOnDownNodes.java  |   2 +
 .../TestLeaderInitiatedRecoveryThread.java      | 242 ------
 .../apache/solr/cloud/TestRandomFlRTGCloud.java |   2 +-
 .../org/apache/solr/cloud/TestTlogReplica.java  |   1 +
 .../org/apache/solr/cloud/TestUtilizeNode.java  |  10 +-
 .../apache/solr/cloud/TestWithCollection.java   |   4 +-
 .../org/apache/solr/cloud/ZkShardTermsTest.java |   3 +-
 .../solr/cloud/api/collections/AssignTest.java  |  39 +-
 .../CollectionsAPIAsyncDistributedZkTest.java   |   1 +
 .../cloud/api/collections/ShardSplitTest.java   |  62 ++
 .../TestLocalFSCloudBackupRestore.java          |   2 +-
 .../AutoAddReplicasIntegrationTest.java         |  10 +-
 .../AutoAddReplicasPlanActionTest.java          |   7 +
 .../autoscaling/AutoScalingHandlerTest.java     |  13 +-
 .../autoscaling/ComputePlanActionTest.java      |  13 +-
 .../autoscaling/ExecutePlanActionTest.java      |   1 +
 .../MetricTriggerIntegrationTest.java           |   1 +
 .../ScheduledMaintenanceTriggerTest.java        |  47 +-
 .../ScheduledTriggerIntegrationTest.java        |   1 +
 .../sim/SimClusterStateProvider.java            |  41 +-
 .../sim/TestSimComputePlanAction.java           |   6 +-
 .../sim/TestSimExecutePlanAction.java           |   1 +
 .../autoscaling/sim/TestSimExtremeIndexing.java |   4 +
 .../sim/TestSimGenericDistributedQueue.java     |   1 +
 .../autoscaling/sim/TestSimLargeCluster.java    |   2 +
 .../autoscaling/sim/TestSimPolicyCloud.java     |   2 +-
 .../sim/TestSimTriggerIntegration.java          |   5 +
 .../solr/cloud/cdcr/CdcrBootstrapTest.java      |   1 +
 .../cloud/cdcr/CdcrOpsAndBoundariesTest.java    |   1 +
 .../cloud/cdcr/CdcrWithNodesRestartsTest.java   |   1 +
 .../cloud/hdfs/HdfsUnloadDistributedZkTest.java |   3 +-
 .../HdfsWriteToMultipleCollectionsTest.java     |   3 +-
 .../admin/MetricsHistoryHandlerTest.java        |   2 +-
 .../component/CustomHighlightComponentTest.java |   1 +
 .../handler/component/StatsComponentTest.java   |   2 +-
 ...estDistributedStatsComponentCardinality.java |   2 +-
 .../reporters/SolrJmxReporterCloudTest.java     |   3 +-
 .../search/TestRandomCollapseQParserPlugin.java |   2 +-
 .../search/facet/TestJsonFacetRefinement.java   |   2 +-
 .../solr/search/stats/TestDistribIDF.java       |   1 +
 .../TestDocTermOrdsUninvertLimit.java           |   1 +
 .../solr/uninverting/TestUninvertingReader.java |   6 +-
 .../TimeRoutedAliasUpdateProcessorTest.java     |   1 +
 solr/solr-ref-guide/src/cloud-screens.adoc      |   8 +-
 solr/solr-ref-guide/src/collections-api.adoc    |   4 +-
 .../src/images/cloud-screens/cloud-radial.png   | Bin 102878 -> 0 bytes
 .../src/migrate-to-policy-rule.adoc             | 170 ++++
 .../src/solrcloud-autoscaling-overview.adoc     |   2 +-
 .../src/solrcloud-autoscaling.adoc              |   3 +-
 .../solr-ref-guide/src/time-routed-aliases.adoc |   7 +-
 ...zookeeper-to-manage-configuration-files.adoc |  12 +-
 ...rking-with-external-files-and-processes.adoc |   2 +-
 .../tools/BuildNavAndPDFBody.java               |   2 +-
 .../apache/solr/client/solrj/SolrClient.java    |   9 +-
 .../cloud/autoscaling/AddReplicaSuggester.java  |   7 +-
 .../client/solrj/cloud/autoscaling/Clause.java  |  31 +-
 .../cloud/autoscaling/FreeDiskVariable.java     |  22 +-
 .../cloud/autoscaling/MoveReplicaSuggester.java |   4 +-
 .../client/solrj/cloud/autoscaling/Policy.java  |   7 +-
 .../solrj/cloud/autoscaling/PolicyHelper.java   |   2 +-
 .../solrj/cloud/autoscaling/Suggester.java      |  39 +-
 .../solrj/cloud/autoscaling/Variable.java       |  20 +
 .../org/apache/solr/client/solrj/io/Lang.java   |   2 +
 .../client/solrj/io/eval/Log10Evaluator.java    |  50 ++
 .../client/solrj/io/eval/PairSortEvaluator.java |  93 +++
 .../client/solrj/io/eval/PowerEvaluator.java    |  55 +-
 .../solr/client/solrj/io/stream/LetStream.java  |  11 +-
 .../client/solrj/io/stream/SelectStream.java    |  11 +-
 .../client/solrj/io/stream/TopicStream.java     |   2 +-
 .../solrj/io/stream/expr/StreamFactory.java     |   5 +-
 .../request/MultiContentWriterRequest.java      | 124 +++
 .../client/solrj/request/RequestWriter.java     |   4 +
 .../common/params/CollectionAdminParams.java    |  16 +-
 .../src/resources/apispec/cluster.Commands.json |   9 +
 ...tAutoscalingPreferencesUsedWithNoPolicy.json |  53 ++
 .../testComputePlanAfterNodeAdded.json          |  16 +
 .../solr/autoscaling/testCoresSuggestions.json  |  17 +
 .../testCreateCollectionWithEmptyPolicy.json    |  20 +
 .../solr/autoscaling/testDiskSpaceHint.json     |  16 +
 .../solr/autoscaling/testEqualOnNonNode.json    |  83 ++
 .../solr/autoscaling/testFreeDiskDeviation.json |  35 +
 .../autoscaling/testFreeDiskSuggestions.json    |  27 +
 .../autoscaling/testFreediskPercentage.json     |  25 +
 .../autoscaling/testMoveReplicaSuggester.json   |  15 +
 .../solrj/solr/autoscaling/testPolicy.json      |  41 +
 .../solr/autoscaling/testPortSuggestions.json   |  22 +
 .../testReplicaCountSuggestions.json            |  15 +
 .../solr/autoscaling/testReplicaPercentage.json |  46 ++
 .../autoscaling/testReplicaZonesPercentage.json |  15 +
 .../autoscaling/testSyspropSuggestions1.json    |  24 +
 .../solr/autoscaling/testViolationOutput.json   |  22 +
 .../solr/autoscaling/testWithCollection.json    |  21 +
 .../testWithCollectionMoveReplica.json          |  28 +
 .../testWithCollectionMoveVsAddSuggestions.json |  49 ++
 .../testWithCollectionSuggestions.json          |  21 +
 .../ref_guide_examples/ZkConfigFilesTest.java   |  94 +++
 .../solr/client/solrj/SolrExampleTests.java     |  40 +-
 .../solrj/cloud/autoscaling/TestPolicy.java     | 771 +++----------------
 .../solrj/cloud/autoscaling/TestPolicy2.java    | 117 +--
 .../embedded/LargeVolumeBinaryJettyTest.java    |   2 +
 .../solrj/embedded/LargeVolumeJettyTest.java    |   2 +
 .../solrj/impl/CloudSolrClientBadInputTest.java |  73 ++
 .../client/solrj/impl/CloudSolrClientTest.java  |  35 +-
 .../ConcurrentUpdateSolrClientBadInputTest.java |  91 +++
 .../solrj/impl/HttpSolrClientBadInputTest.java  |  93 +++
 .../impl/LBHttpSolrClientBadInputTest.java      |  89 +++
 .../apache/solr/client/solrj/io/TestLang.java   |   2 +-
 .../solrj/io/stream/MathExpressionTest.java     | 130 ++++
 .../solrj/io/stream/StreamDecoratorTest.java    |   3 +-
 .../cloud/TestCollectionStateWatchers.java      |   8 +-
 .../solr/common/params/ShardParamsTest.java     |   3 +-
 solr/webapp/web/css/angular/menu.css            |   1 -
 solr/webapp/web/index.html                      |   1 -
 solr/webapp/web/js/angular/controllers/cloud.js |  70 +-
 solr/webapp/web/partials/cloud.html             |   2 +-
 228 files changed, 3631 insertions(+), 3593 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9a36e87f/solr/CHANGES.txt
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9a36e87f/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
----------------------------------------------------------------------
diff --cc solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
index 0d1bbeb,004f4f7..8a58b6a
--- a/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
@@@ -858,22 -869,9 +853,9 @@@ public class DistributedUpdateProcesso
              String coreNodeName = ((Replica) stdNode.getNodeProps().getNodeProps()).getName();
              // if false, then the node is probably not "live" anymore
              // and we do not need to send a recovery message
 -            Throwable rootCause = SolrException.getRootCause(error.e);
 +            Throwable rootCause = SolrException.getRootCause(error.t);
-             if (!isOldLIRMode && zkController.getShardTerms(collection, shardId).registered(coreNodeName)) {
-               log.error("Setting up to try to start recovery on replica {} with url {} by increasing leader term", coreNodeName, replicaUrl, rootCause);
-               replicasShouldBeInLowerTerms.add(coreNodeName);
-             } else {
-               // The replica did not registered its term, so it must run with old LIR implementation
-               log.error("Setting up to try to start recovery on replica {}", replicaUrl, rootCause);
-               zkController.ensureReplicaInLeaderInitiatedRecovery(
-                   req.getCore().getCoreContainer(),
-                   collection,
-                   shardId,
-                   stdNode.getNodeProps(),
-                   req.getCore().getCoreDescriptor(),
-                   false /* forcePublishState */
-               );
-             }
+             log.error("Setting up to try to start recovery on replica {} with url {} by increasing leader term", coreNodeName, replicaUrl, rootCause);
+             replicasShouldBeInLowerTerms.add(coreNodeName);
            } catch (Exception exc) {
              Throwable setLirZnodeFailedCause = SolrException.getRootCause(exc);
              log.error("Leader failed to set replica " +

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9a36e87f/solr/solrj/src/java/org/apache/solr/client/solrj/SolrClient.java
----------------------------------------------------------------------


[28/50] [abbrv] lucene-solr:jira/http2: SOLR-12851: Update CHANGES.txt

Posted by da...@apache.org.
SOLR-12851: Update CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d48f22c1
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d48f22c1
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d48f22c1

Branch: refs/heads/jira/http2
Commit: d48f22c1ad5cedf20760a2b53bb7bba0693d3f75
Parents: a0bb501
Author: Joel Bernstein <jb...@apache.org>
Authored: Thu Oct 11 11:19:57 2018 -0400
Committer: Joel Bernstein <jb...@apache.org>
Committed: Thu Oct 11 11:19:57 2018 -0400

----------------------------------------------------------------------
 solr/CHANGES.txt | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d48f22c1/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 2db18c1..628abb8 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -186,6 +186,8 @@ Bug Fixes
 
 * SOLR-12729: SplitShardCmd should lock the parent shard to prevent parallel splitting requests. (ab)
 
+* SOLR-12851: Improvements and fixes to let and select Streaming Expressions (Joel Bernstein)
+
 Improvements
 ----------------------